[antlr3] 01/07: Imported Upstream version 3.0.1+dfsg

Tony Mancill tmancill at moszumanska.debian.org
Sat Jul 11 18:38:39 UTC 2015


This is an automated email from the git hooks/post-receive script.

tmancill pushed a commit to branch master
in repository antlr3.

commit 1c779208a7e5af066f9f0086fda4143a63ce0470
Author: tony mancill <tmancill at debian.org>
Date:   Sat Jul 11 10:52:13 2015 -0700

    Imported Upstream version 3.0.1+dfsg
---
 LICENSE.txt                                        |   26 +
 README.txt                                         | 2017 ++++++++++
 build.properties                                   |    8 +
 build.xml                                          |  227 ++
 runtime/Java/doxyfile                              |  264 ++
 .../src/org/antlr/runtime/ANTLRFileStream.java     |   78 +
 .../src/org/antlr/runtime/ANTLRInputStream.java    |   43 +
 .../src/org/antlr/runtime/ANTLRReaderStream.java   |   68 +
 .../src/org/antlr/runtime/ANTLRStringStream.java   |  221 ++
 .../Java/src/org/antlr/runtime/BaseRecognizer.java |  831 ++++
 runtime/Java/src/org/antlr/runtime/BitSet.java     |  324 ++
 runtime/Java/src/org/antlr/runtime/CharStream.java |   57 +
 .../src/org/antlr/runtime/CharStreamState.java     |   45 +
 .../Java/src/org/antlr/runtime/ClassicToken.java   |  107 +
 .../Java/src/org/antlr/runtime/CommonToken.java    |  172 +
 .../src/org/antlr/runtime/CommonTokenStream.java   |  370 ++
 runtime/Java/src/org/antlr/runtime/DFA.java        |  187 +
 .../src/org/antlr/runtime/EarlyExitException.java  |   41 +
 .../antlr/runtime/FailedPredicateException.java    |   54 +
 runtime/Java/src/org/antlr/runtime/IntStream.java  |  116 +
 runtime/Java/src/org/antlr/runtime/Lexer.java      |  345 ++
 .../antlr/runtime/MismatchedNotSetException.java   |   41 +
 .../antlr/runtime/MismatchedRangeException.java    |   42 +
 .../org/antlr/runtime/MismatchedSetException.java  |   44 +
 .../antlr/runtime/MismatchedTokenException.java    |   44 +
 .../antlr/runtime/MismatchedTreeNodeException.java |   22 +
 .../org/antlr/runtime/NoViableAltException.java    |   52 +
 runtime/Java/src/org/antlr/runtime/Parser.java     |   65 +
 .../org/antlr/runtime/ParserRuleReturnScope.java   |   49 +
 .../org/antlr/runtime/RecognitionException.java    |  180 +
 .../src/org/antlr/runtime/RuleReturnScope.java     |   15 +
 runtime/Java/src/org/antlr/runtime/Token.java      |   84 +
 .../src/org/antlr/runtime/TokenRewriteStream.java  |  512 +++
 .../Java/src/org/antlr/runtime/TokenSource.java    |   49 +
 .../Java/src/org/antlr/runtime/TokenStream.java    |   68 +
 .../runtime/debug/BlankDebugEventListener.java     |   77 +
 .../src/org/antlr/runtime/debug/DebugEventHub.java |  258 ++
 .../antlr/runtime/debug/DebugEventListener.java    |  312 ++
 .../antlr/runtime/debug/DebugEventRepeater.java    |   60 +
 .../antlr/runtime/debug/DebugEventSocketProxy.java |  338 ++
 .../src/org/antlr/runtime/debug/DebugParser.java   |  113 +
 .../org/antlr/runtime/debug/DebugTokenStream.java  |  146 +
 .../org/antlr/runtime/debug/DebugTreeAdaptor.java  |  164 +
 .../antlr/runtime/debug/DebugTreeNodeStream.java   |  145 +
 .../org/antlr/runtime/debug/DebugTreeParser.java   |  115 +
 .../org/antlr/runtime/debug/ParseTreeBuilder.java  |   80 +
 .../Java/src/org/antlr/runtime/debug/Profiler.java |  506 +++
 .../debug/RemoteDebugEventSocketListener.java      |  511 +++
 .../runtime/debug/TraceDebugEventListener.java     |   69 +
 .../Java/src/org/antlr/runtime/debug/Tracer.java   |   65 +
 runtime/Java/src/org/antlr/runtime/misc/Stats.java |  117 +
 .../Java/src/org/antlr/runtime/tree/BaseTree.java  |  193 +
 .../org/antlr/runtime/tree/BaseTreeAdaptor.java    |  190 +
 .../src/org/antlr/runtime/tree/CommonTree.java     |  127 +
 .../org/antlr/runtime/tree/CommonTreeAdaptor.java  |  137 +
 .../antlr/runtime/tree/CommonTreeNodeStream.java   |  560 +++
 .../org/antlr/runtime/tree/DOTTreeGenerator.java   |  205 +
 .../src/org/antlr/runtime/tree/DoubleLinkTree.java |   54 +
 .../Java/src/org/antlr/runtime/tree/ParseTree.java |   79 +
 .../runtime/tree/RewriteCardinalityException.java  |   47 +
 .../runtime/tree/RewriteEarlyExitException.java    |   39 +
 .../runtime/tree/RewriteEmptyStreamException.java  |   35 +
 .../runtime/tree/RewriteRuleElementStream.java     |  211 +
 .../runtime/tree/RewriteRuleSubtreeStream.java     |   83 +
 .../antlr/runtime/tree/RewriteRuleTokenStream.java |   67 +
 runtime/Java/src/org/antlr/runtime/tree/Tree.java  |   64 +
 .../src/org/antlr/runtime/tree/TreeAdaptor.java    |  212 +
 .../src/org/antlr/runtime/tree/TreeNodeStream.java |   87 +
 .../src/org/antlr/runtime/tree/TreeParser.java     |  135 +
 .../org/antlr/runtime/tree/TreePatternLexer.java   |  135 +
 .../org/antlr/runtime/tree/TreePatternParser.java  |  156 +
 .../antlr/runtime/tree/TreeRuleReturnScope.java    |   40 +
 .../src/org/antlr/runtime/tree/TreeWizard.java     |  409 ++
 .../runtime/tree/UnBufferedTreeNodeStream.java     |  561 +++
 src/org/antlr/Tool.java                            |  551 +++
 src/org/antlr/analysis/DFA.java                    | 1106 ++++++
 src/org/antlr/analysis/DFAOptimizer.java           |  263 ++
 src/org/antlr/analysis/DFAState.java               |  811 ++++
 src/org/antlr/analysis/DecisionProbe.java          |  941 +++++
 src/org/antlr/analysis/Label.java                  |  374 ++
 src/org/antlr/analysis/LookaheadSet.java           |   92 +
 src/org/antlr/analysis/NFA.java                    |   77 +
 src/org/antlr/analysis/NFAConfiguration.java       |  147 +
 src/org/antlr/analysis/NFAContext.java             |  285 ++
 src/org/antlr/analysis/NFAConversionThread.java    |   38 +
 src/org/antlr/analysis/NFAState.java               |  252 ++
 src/org/antlr/analysis/NFAToDFAConverter.java      | 1742 ++++++++
 src/org/antlr/analysis/RuleClosureTransition.java  |   60 +
 src/org/antlr/analysis/SemanticContext.java        |  482 +++
 src/org/antlr/analysis/State.java                  |   54 +
 src/org/antlr/analysis/StateCluster.java           |   41 +
 src/org/antlr/analysis/Transition.java             |   80 +
 src/org/antlr/codegen/ACyclicDFACodeGenerator.java |  186 +
 src/org/antlr/codegen/ANTLRTokenTypes.txt          |   95 +
 src/org/antlr/codegen/ActionTranslator.g           |  818 ++++
 src/org/antlr/codegen/ActionTranslator.tokens      |   35 +
 src/org/antlr/codegen/ActionTranslatorLexer.java   | 3640 +++++++++++++++++
 src/org/antlr/codegen/CPPTarget.java               |  140 +
 src/org/antlr/codegen/CSharpTarget.java            |   46 +
 src/org/antlr/codegen/CTarget.java                 |  238 ++
 src/org/antlr/codegen/CodeGenTreeWalker.java       | 3132 +++++++++++++++
 src/org/antlr/codegen/CodeGenTreeWalker.smap       | 2419 ++++++++++++
 .../antlr/codegen/CodeGenTreeWalkerTokenTypes.java |  135 +
 .../antlr/codegen/CodeGenTreeWalkerTokenTypes.txt  |   95 +
 src/org/antlr/codegen/CodeGenerator.java           | 1186 ++++++
 src/org/antlr/codegen/JavaTarget.java              |   44 +
 src/org/antlr/codegen/ObjCTarget.java              |  109 +
 src/org/antlr/codegen/PythonTarget.java            |  217 +
 src/org/antlr/codegen/RubyTarget.java              |   73 +
 src/org/antlr/codegen/Target.java                  |  294 ++
 src/org/antlr/codegen/codegen.g                    | 1300 ++++++
 src/org/antlr/codegen/templates/ANTLRCore.sti      |  374 ++
 src/org/antlr/codegen/templates/C/AST.stg          |  616 +++
 src/org/antlr/codegen/templates/C/ASTDbg.stg       |   45 +
 src/org/antlr/codegen/templates/C/C.stg            | 2860 ++++++++++++++
 src/org/antlr/codegen/templates/C/Dbg.stg          |  184 +
 src/org/antlr/codegen/templates/C/ST.stg           |  163 +
 src/org/antlr/codegen/templates/CSharp/AST.stg     |  465 +++
 src/org/antlr/codegen/templates/CSharp/ASTDbg.stg  |   44 +
 src/org/antlr/codegen/templates/CSharp/CSharp.stg  | 1368 +++++++
 src/org/antlr/codegen/templates/CSharp/Dbg.stg     |  192 +
 src/org/antlr/codegen/templates/CSharp/ST.stg      |  169 +
 src/org/antlr/codegen/templates/Java/AST.stg       |  460 +++
 src/org/antlr/codegen/templates/Java/ASTDbg.stg    |   65 +
 src/org/antlr/codegen/templates/Java/Dbg.stg       |  210 +
 src/org/antlr/codegen/templates/Java/Java.stg      | 1251 ++++++
 src/org/antlr/codegen/templates/Java/ST.stg        |  163 +
 src/org/antlr/codegen/templates/ObjC/AST.stg       |  615 +++
 src/org/antlr/codegen/templates/ObjC/ASTDbg.stg    |   46 +
 src/org/antlr/codegen/templates/ObjC/Dbg.stg       |  178 +
 src/org/antlr/codegen/templates/ObjC/ObjC.stg      | 1458 +++++++
 src/org/antlr/codegen/templates/Python/AST.stg     |  478 +++
 src/org/antlr/codegen/templates/Python/Python.stg  | 1281 ++++++
 src/org/antlr/codegen/templates/Ruby/Ruby.stg      | 1352 +++++++
 src/org/antlr/codegen/templates/cpp/CPP.stg        | 1351 +++++++
 src/org/antlr/misc/Barrier.java                    |   35 +
 src/org/antlr/misc/BitSet.java                     |  562 +++
 src/org/antlr/misc/IntArrayList.java               |  153 +
 src/org/antlr/misc/IntSet.java                     |   84 +
 src/org/antlr/misc/Interval.java                   |  137 +
 src/org/antlr/misc/IntervalSet.java                |  640 +++
 src/org/antlr/misc/MutableInteger.java             |   15 +
 src/org/antlr/misc/OrderedHashSet.java             |  101 +
 src/org/antlr/misc/Utils.java                      |   73 +
 src/org/antlr/test/BaseTest.java                   |  542 +++
 src/org/antlr/test/DebugTestAutoAST.java           |   32 +
 src/org/antlr/test/DebugTestRewriteAST.java        |    6 +
 src/org/antlr/test/ErrorQueue.java                 |   41 +
 src/org/antlr/test/TestASTConstruction.java        |  361 ++
 src/org/antlr/test/TestAttributes.java             | 3140 +++++++++++++++
 src/org/antlr/test/TestAutoAST.java                |  541 +++
 src/org/antlr/test/TestCharDFAConversion.java      |  553 +++
 src/org/antlr/test/TestCommonTreeNodeStream.java   |  203 +
 src/org/antlr/test/TestDFAConversion.java          | 1275 ++++++
 src/org/antlr/test/TestDFAMatching.java            |  101 +
 src/org/antlr/test/TestInterpretedLexing.java      |  175 +
 src/org/antlr/test/TestInterpretedParsing.java     |  181 +
 src/org/antlr/test/TestIntervalSet.java            |  389 ++
 src/org/antlr/test/TestJavaCodeGeneration.java     |  124 +
 src/org/antlr/test/TestLexer.java                  |  199 +
 src/org/antlr/test/TestMessages.java               |   46 +
 src/org/antlr/test/TestNFAConstruction.java        | 1192 ++++++
 src/org/antlr/test/TestRewriteAST.java             | 1273 ++++++
 src/org/antlr/test/TestRewriteTemplates.java       |  319 ++
 .../test/TestSemanticPredicateEvaluation.java      |  237 ++
 src/org/antlr/test/TestSemanticPredicates.java     |  707 ++++
 src/org/antlr/test/TestSets.java                   |  260 ++
 src/org/antlr/test/TestSymbolDefinitions.java      |  892 +++++
 .../test/TestSyntacticPredicateEvaluation.java     |  414 ++
 src/org/antlr/test/TestTemplates.java              |  343 ++
 src/org/antlr/test/TestTokenRewriteStream.java     |  462 +++
 src/org/antlr/test/TestTreeNodeStream.java         |  339 ++
 src/org/antlr/test/TestTreeParsing.java            |  245 ++
 src/org/antlr/test/TestTreeWizard.java             |  388 ++
 .../antlr/test/TestUnBufferedTreeNodeStream.java   |  111 +
 src/org/antlr/tool/ANTLRErrorListener.java         |   42 +
 src/org/antlr/tool/ANTLRLexer.java                 | 1794 +++++++++
 src/org/antlr/tool/ANTLRLexer.smap                 | 1203 ++++++
 src/org/antlr/tool/ANTLRParser.java                | 4172 ++++++++++++++++++++
 src/org/antlr/tool/ANTLRParser.smap                | 2758 +++++++++++++
 src/org/antlr/tool/ANTLRTokenTypes.java            |  133 +
 src/org/antlr/tool/ANTLRTokenTypes.txt             |   95 +
 src/org/antlr/tool/ANTLRTreePrinter.java           | 2295 +++++++++++
 src/org/antlr/tool/ANTLRTreePrinter.smap           | 1670 ++++++++
 src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java |  129 +
 src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt  |   95 +
 src/org/antlr/tool/ANTLRv3.g                       |  745 ++++
 src/org/antlr/tool/ActionAnalysis.g                |  129 +
 src/org/antlr/tool/ActionAnalysis.tokens           |    5 +
 src/org/antlr/tool/ActionAnalysisLexer.java        |  400 ++
 src/org/antlr/tool/AssignTokenTypesWalker.java     | 1949 +++++++++
 src/org/antlr/tool/AssignTokenTypesWalker.smap     | 1403 +++++++
 .../tool/AssignTokenTypesWalkerTokenTypes.java     |  133 +
 .../tool/AssignTokenTypesWalkerTokenTypes.txt      |   96 +
 src/org/antlr/tool/Attribute.java                  |  133 +
 src/org/antlr/tool/AttributeScope.java             |  179 +
 src/org/antlr/tool/BuildDependencyGenerator.java   |  193 +
 src/org/antlr/tool/DOTGenerator.java               |  383 ++
 src/org/antlr/tool/DefineGrammarItemsWalker.java   | 2995 ++++++++++++++
 src/org/antlr/tool/DefineGrammarItemsWalker.smap   | 2248 +++++++++++
 .../tool/DefineGrammarItemsWalkerTokenTypes.java   |  130 +
 .../tool/DefineGrammarItemsWalkerTokenTypes.txt    |   95 +
 src/org/antlr/tool/ErrorManager.java               |  922 +++++
 src/org/antlr/tool/FASerializer.java               |  211 +
 src/org/antlr/tool/Grammar.java                    | 2341 +++++++++++
 src/org/antlr/tool/GrammarAST.java                 |  496 +++
 .../antlr/tool/GrammarAnalysisAbortedMessage.java  |   67 +
 .../antlr/tool/GrammarDanglingStateMessage.java    |   70 +
 .../tool/GrammarInsufficientPredicatesMessage.java |   67 +
 .../antlr/tool/GrammarNonDeterminismMessage.java   |  128 +
 src/org/antlr/tool/GrammarReport.java              |  383 ++
 src/org/antlr/tool/GrammarSanity.java              |  290 ++
 src/org/antlr/tool/GrammarSemanticsMessage.java    |   88 +
 src/org/antlr/tool/GrammarSyntaxMessage.java       |   80 +
 .../antlr/tool/GrammarUnreachableAltsMessage.java  |   93 +
 src/org/antlr/tool/Interp.java                     |   87 +
 src/org/antlr/tool/Interpreter.java                |  425 ++
 src/org/antlr/tool/LeftRecursionCyclesMessage.java |   53 +
 src/org/antlr/tool/Message.java                    |  128 +
 src/org/antlr/tool/NFAFactory.java                 |  692 ++++
 src/org/antlr/tool/NameSpaceChecker.java           |  236 ++
 src/org/antlr/tool/NonRegularDecisionMessage.java  |   66 +
 src/org/antlr/tool/RandomPhrase.java               |  180 +
 src/org/antlr/tool/RecursionOverflowMessage.java   |   82 +
 src/org/antlr/tool/Rule.java                       |  562 +++
 src/org/antlr/tool/RuleLabelScope.java             |   99 +
 src/org/antlr/tool/ToolMessage.java                |   75 +
 src/org/antlr/tool/TreeToNFAConverter.java         | 2852 +++++++++++++
 src/org/antlr/tool/TreeToNFAConverter.smap         | 2084 ++++++++++
 .../antlr/tool/TreeToNFAConverterTokenTypes.java   |  131 +
 .../antlr/tool/TreeToNFAConverterTokenTypes.txt    |   95 +
 src/org/antlr/tool/antlr.g                         | 1222 ++++++
 src/org/antlr/tool/antlr.print.g                   |  362 ++
 src/org/antlr/tool/assign.types.g                  |  472 +++
 src/org/antlr/tool/buildnfa.g                      |  732 ++++
 src/org/antlr/tool/define.g                        |  615 +++
 src/org/antlr/tool/templates/depend.stg            |   12 +
 src/org/antlr/tool/templates/dot/decision-rank.st  |    1 +
 src/org/antlr/tool/templates/dot/dfa.st            |    7 +
 src/org/antlr/tool/templates/dot/edge.st           |    1 +
 src/org/antlr/tool/templates/dot/epsilon-edge.st   |    1 +
 src/org/antlr/tool/templates/dot/nfa.st            |    6 +
 src/org/antlr/tool/templates/dot/state.st          |    1 +
 src/org/antlr/tool/templates/dot/stopstate.st      |    1 +
 .../tool/templates/messages/formats/antlr.stg      |   42 +
 .../antlr/tool/templates/messages/formats/gnu.stg  |   42 +
 .../tool/templates/messages/formats/vs2005.stg     |   42 +
 .../antlr/tool/templates/messages/languages/en.stg |  278 ++
 248 files changed, 110169 insertions(+)

diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..1d1d5d6
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,26 @@
+[The "BSD licence"]
+Copyright (c) 2003-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000..a88c0f9
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,2017 @@
+Early Access ANTLR v3
+ANTLR 3.0.1
+August 13, 2007
+
+Terence Parr, parrt at cs usfca edu
+ANTLR project lead and supreme dictator for life
+University of San Francisco
+
+INTRODUCTION 
+
+[Java, C, Python, C# targets are available; others coming soon]
+
+Welcome to ANTLR v3!  I've been working on this for nearly 4 years and it's
+finally ready!  I have lots of features to add later, but this will be
+the first set.
+
+You should use v3 in conjunction with ANTLRWorks:
+
+    http://www.antlr.org/works/index.html 
+
+The book will also help you a great deal (printed May 15, 2007); you
+can also buy the PDF:
+
+http://www.pragmaticprogrammer.com/titles/tpantlr/index.html
+
+See the getting started document:
+
+http://www.antlr.org/wiki/display/ANTLR3/FAQ+-+Getting+Started
+
+You also have the examples plus the source to guide you.
+
+See the new wiki FAQ:
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+
+and general doc root:
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+3+Wiki+Home
+
+Please help add/update FAQ entries.
+
+If all else fails, you can buy support or ask the antlr-interest list:
+
+    http://www.antlr.org/support.html
+
+I have made very little effort at this point to deal well with
+erroneous input (e.g., bad syntax might make ANTLR crash).  I will clean
+this up after I've rewritten v3 in v3.  v3 is written in v2 at the moment.
+
+Per the license in LICENSE.txt, this software is not guaranteed to
+work and might even destroy all life on this planet:
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+EXAMPLES
+
+ANTLR v3 sample grammars (currently for C, C#, Java targets):
+
+    http://www.antlr.org/download/examples-v3.tar.gz
+
+contains the following examples: LL-star, cminus, dynamic-scope,
+fuzzy, hoistedPredicates, island-grammar, java, python, scopes,
+simplecTreeParser, treeparser, tweak, xmlLexer.
+
+Also check out Mantra Programming Language for a prototype (work in
+progress) using v3:
+
+    http://www.linguamantra.org/
+
+----------------------------------------------------------------------
+
+What is ANTLR?
+
+ANTLR stands for (AN)other (T)ool for (L)anguage (R)ecognition and was
+originally known as PCCTS.  ANTLR is a language tool that provides a
+framework for constructing recognizers, compilers, and translators
+from grammatical descriptions containing actions.  Target language list:
+
+http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
+
+----------------------------------------------------------------------
+
+How is ANTLR v3 different than ANTLR v2?
+
+See "What is the difference between ANTLR v2 and v3?"
+
+    http://www.antlr.org/wiki/pages/viewpage.action?pageId=719
+
+See migration guide:
+
+    http://www.antlr.org/wiki/display/ANTLR3/Migrating+from+ANTLR+2+to+ANTLR+3
+
+----------------------------------------------------------------------
+
+How do I install this damn thing?
+
+Just untar and you'll get:
+
+antlr-3.0.1/README.txt (this file)
+antlr-3.0.1/LICENSE.txt
+antlr-3.0.1/src/org/antlr/...
+antlr-3.0.1/lib/stringtemplate-3.0.jar (3.0.1 needs 3.0)
+antlr-3.0.1/lib/antlr-2.7.7.jar
+antlr-3.0.1/lib/antlr-3.0.1.jar
+
+Then you need to add all the jars in lib to your CLASSPATH.
+
+Please see the FAQ
+
+http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+
+----------------------------------------------------------------------
+
+CHANGES
+
+INCOMPATIBILITY WARNING -- templates have changed; must regen output from
+                           grammars.  Runtime libraries have also changed.
+                           Debug event listener interface has changed also.
+
+3.0.1 - August 13, 2007
+
+[See target pages on the wiki for more information on the non-Java targets]
+
+August 7, 2007
+
+* added escaping of double quotes in DOTTreeGenerator
+
+July 22, 2007
+
+* fixed dynamic scope implementation in lexers. They were not creating new scope
+  entries on the stack.  Unsupported feature!
+
+July 19, 2007
+
+* implemented new unique ID; GC was causing non unique hash codes.  Debugging
+  tree grammars was messing up.
+
+July 17, 2007
+
+* Added line/charposition to node socket events and event dump so
+  we have more info during tree parsing.  Only works if your
+  tree adaptor returns a value Token object from getToken(treenode)
+  with line/col set.  Refactored consumeNode/LN to use deserializeNode().
+
+* Fixed mismatched tree node exceptions; for imaginary nodes, it said
+  "missing null".  Now prints the token type we found.
+
+* Cleaned up exception stuff. MismatchedTreeNodeException was setting
+  line/col, but only RecognitionException should do that.
+
+* If imaginary token gets a mismatch, there is no line info.  Search
+  backwards in stream if input node stream supports to find last
+  node with good line/col info. E.g.,
+
+ANTLRv3Tree.g: node from after line 156:72 mismatched tree node: EOA expecting <UP>
+
+  which used to be:
+
+ANTLRv3Tree.g: node from line 0:0 mismatched tree node: null expecting <UP>
+
+* mismatched tree node exceptions were not sent to the debug event stream.
+  Due to a type being slightly different on recoverFromMismatchedToken()
+  in DebugTreeParser.  Was calling BaseRecognizer version not subclass.
+  Now we get:
+
+  9459:   Recognition exception MismatchedTreeNodeException(0!=0)
+
+* List labels were not allowed as root nodes in tree rewrites like
+  ^($listlabel ...).  Had to add a template to AST.stg:
+
+  /** Gen ^($label ...) where label+=... */
+  rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+
+July 16, 2007
+
+* fixed nextNode in RewriteRuleSubtreeStream was dup'ing too much,
+  screwing up debug event stream.  Also there was a bug in how
+  the rewrite tree stream stuff decided to dup nodes.
+
+* fixed bug in LT for tree parsing; text was not transmitted properly;
+  only single words worked.
+
+* made decision for rule put line/col on colon not first token of first alt.
+
+* remote ProxyToken now emits token index for easier debugging when looking
+  at AW's event stream.  For example, the @5 here is the token index:
+
+  31	Consume hidden [ /<64>,channel=99,30:7, @5]
+
+* same is true for consume nodes now:
+
+  25586	Consume node [')'/, <44>, 4712040, at 1749]	25
+
+  When debugging tree parsers, it helps to track errors when you know
+  what corresponding input symbol created this tree node.
+
+* Changed debug events associated with trees quite a bit.  Passes nodes around
+  now rather than text, type, unique IDs etc...  Mostly affects internal stuff.
+  Target developers will have some work in their runtime to do to match
+  this change. :(  BUT, there is only a slight tweak in the Dbg.stg
+  and ASTDbg.stg templates.
+  Interface just didn't make sense as is.  If you turn on debugging, and
+  want to track a node creation, you want the node pointer not its ID,
+  text, etc...
+  Added ProxyTree for passing across socket.  Has line/charpos and tokenIndex
+
+July 15, 2007
+
+* added null ptr protection in CommonTreeAdaptor.
+
+July 14, 2007
+
+* null child in TreeAdaptor does nothing now.  Changed interface and
+  implementation.  Changed DebugTreeAdaptor to not fire events on null add
+  as well.
+
+July 12, 2007
+
+* added get method for the line/col to DFA map in Grammar.java
+
+July 7, 2007
+
+* fixed wrong order of test for exceptions in Lexer.getErrorMessage()
+
+June 28, 2007
+
+* Added ability to set the port number in the constructor for the debug parser.
+
+June 5, 2007
+
+* Changed (hidden) option -verbose to -Xnfastates; this just prints out the NFA states along each nondeterministic path for nondeterminism warnings.
+
+May 18, 2007
+
+* there were some dependencies with org.antlr.* that I removed from
+  org.antlr.runtime.*
+
+3.0 final - May 17, 2007
+
+May 14, 2007
+
+* Auto backtracking didn't work with ! and ^ suffixes on first element
+  of an alt.
+
+* Auto backtracking didn't work with an action as first element.
+
+May 10, 2007
+
+* turn off the warning about no local messages:
+ no such locale file org/antlr/tool/templates/messages/languages/ru.stg retrying with English locale
+
+May 5, 2007
+
+* moving org.antlr.runtime to runtime/Java/src/org/... Other target
+  source / libs are under runtime/targetname.
+
+May 4, 2007
+
+* You could not use arguments on a token reference that was a route in a
+  tree rewrite rule like -> ^(ID[args] ...).
+
+May 3, 2007
+
+* Fixed ANTLR-82.  Actions after the root were considered part of
+  an optional child.  They were not always executed.  Required a change
+  to the ANTLRCore.sti interface for tree() template.
+
+May 2, 2007
+
+* Fixed ANTLR-117. Wasn't building decisions properly for subrules in
+  syntactic predicates.
+
+April 22, 2007
+
+* Made build.xml ref all jars in antlr lib.  Thanks to Miguel Ping.
+
+* Fixed ANTLR-11
+
+* Now labels on ranges and such in lexer work properly.
+
+* ActionAnalysisLexer was in wrong package.
+
+April 21, 2007
+
+* Pushing a huge update that fixes:
+	http://www.antlr.org:8888/browse/ANTLR-112
+	http://www.antlr.org:8888/browse/ANTLR-110
+	http://www.antlr.org:8888/browse/ANTLR-109
+	http://www.antlr.org:8888/browse/ANTLR-103
+	http://www.antlr.org:8888/browse/ANTLR-97
+	http://www.antlr.org:8888/browse/ANTLR-113
+	http://www.antlr.org:8888/browse/ANTLR-66
+	http://www.antlr.org:8888/browse/ANTLR-98
+	http://www.antlr.org:8888/browse/ANTLR-24
+	http://www.antlr.org:8888/browse/ANTLR-114
+	http://www.antlr.org:8888/browse/ANTLR-5
+	http://www.antlr.org:8888/browse/ANTLR-6
+
+  Basically, I gutted the way AST rewrites work.  MUCH better.
+
+* Fixed lots of little label issues in the lexer.  Couldn't do x+=ID
+  in lexer, for example.  Fixed ANTLR-114, ANTLR-112
+
+* Isolated EOT transition in lexer generated dangling else clause.
+  Fixed ANTLR-113.
+
+April 17, 2007
+
+* Fixed a major problem with gated semantic predicates.  Added more
+  unit tests.
+
+* Fixed bug in cyclic DFA with syntactic predicates.  Wasn't rewinding
+  properly.  Further, mark() in token stream did not fill buffer so
+  when you rewound back to last marker index was -1 not 0.  At same time
+  I fixed ANTLR-103.  Syn preds evaluated only once now.
+
+* Altered code gen file writing so it writes directly to a file
+  instead of building a big string and then writing that out.  Should
+  be faster and much less memory intensive.
+
+* Fixed so antlr writes files to correct location again.  See:
+
+http://www.antlr.org/wiki/pages/viewpage.action?pageId=1862
+
+3.0b7 - April 12, 2007
+
+April 10, 2007
+
+* Allows -> {...} actions now when building ASTs.  Fixed ANTLR-14.
+
+* Allows ! on sets and wildcard now during output=AST option. Fixed ANTLR-17.
+
+* Fixed ANTLR-92 bug.  Couldn't use sets with -> tree construction.
+
+* No lexer rule for a token type is now a warning.
+
+* Fixed set labels in lexer; ANTLR-60 bug
+
+* Fixed problem with duplicate state variable definitions in switch-case
+
+April 9, 2007
+
+* Gated predicates didn't work properly in cyclic DFA.
+
+April 7, 2007
+
+* Couldn't have more than one set per rule it seems.  Fixed.
+
+April 3, 2007
+
+* Fix a problem in my unused label optimization.  Added new
+  pass over actions to examine them.
+
+* RuleReturnScope has method back:
+  /** Has a value potentially if output=template; Don't use StringTemplate
+   *  type as it then causes a dependency with ST lib.
+   */
+  public Object getTemplate() { return null; }
+
+March 30, 2007
+
+* Fixed ANTLR-8.  Labels to rules w/o return values caused compile errors.
+
+* Fixed ANTLR-89; semantic predicates in lexer sometimes
+  caused exception in code gen.
+
+* Fixed ANTLR-36; remove runtime dependency with ST
+
+March 29, 2007
+
+* Over last few days, I've gutted how ANTLR handles sets of chars or
+  tokens.  I cleaned up a lot of stuff in the grammars and added lots
+  of unit tests.
+
+March 26, 2007
+
+* CommonTreeNodeStream didn't push correctly; couldn't handle very
+  deeply nested trees.
+
+* Fixed bug that E : 'a' 'b' ; made E be seen as an alias of 'a'.
+
+March 22, 2007
+
+* Working with Egor Ushakov from Sun Optimization / NetBeans team I
+  made all the Java lexer transition tables static w/o screwing up
+  ability to reference semantic predicates etc...  Only changed Java.stg
+
+* cached text string in CommonToken.getText(); saves on repeated calls;
+  Java mode.
+
+* made all generated methods final; saves a few percent speed according to
+  Egor Ushakov (Java only).
+
+* removed most assignments from each lexer rule and even the Lexer.emit()
+  call!  All done in nextToken now.  Saves on code gen size and a wee bit of
+  execution speed probably.  Variables became fields: type, channel, line,
+  etc... Now emit() needs no args even.  Again, Egor helped on this.
+
+March 17, 2007
+
+* Jonathan DeKlotz updated C# templates to be 3.0b6 current
+
+March 14, 2007
+
+* Manually-specified (...)=> force backtracking eval of that predicate.
+  backtracking=true mode does not however.  Added unit test.
+
+March 14, 2007
+
+* Fixed bug in lexer where ~T didn't compute the set from rule T.
+
+* Added -Xnoinlinedfa make all DFA with tables; no inline prediction with IFs
+
+* Fixed http://www.antlr.org:8888/browse/ANTLR-80.
+  Sem pred states didn't define lookahead vars.
+
+* Fixed http://www.antlr.org:8888/browse/ANTLR-91.  
+  When forcing some acyclic DFA to be state tables, they broke.
+  Forcing all DFA to be state tables should give same results.
+
+March 12, 2007
+
+* setTokenSource in CommonTokenStream didn't clear tokens list.
+  setCharStream calls reset in Lexer.
+
+* Altered -depend.  No longer printing grammar files for multiple input
+  files with -depend.  Doesn't show T__.g temp file anymore. Added
+  TLexer.tokens.  Added .h files if defined.
+
+February 11, 2007
+
+* Added -depend command-line option that, instead of processing files,
+  it shows you what files the input grammar(s) depend on and what files
+  they generate. For combined grammar T.g:
+
+  $ java org.antlr.Tool -depend T.g
+
+  You get:
+
+  TParser.java : T.g
+  T.tokens : T.g
+  T__.g : T.g
+
+  Now, assuming U.g is a tree grammar ref'd T's tokens:
+
+  $ java org.antlr.Tool -depend T.g U.g
+
+  TParser.java : T.g
+  T.tokens : T.g
+  T__.g : T.g
+  U.g: T.tokens
+  U.java : U.g
+  U.tokens : U.g
+
+  Handles spaces by escaping them.  Pays attention to -o, -fo and -lib.
+  Dir 'x y' is a valid dir in current dir.
+
+  $ java org.antlr.Tool -depend -lib /usr/local/lib -o 'x y' T.g U.g
+  x\ y/TParser.java : T.g
+  x\ y/T.tokens : T.g
+  x\ y/T__.g : T.g
+  U.g: /usr/local/lib/T.tokens
+  x\ y/U.java : U.g
+  x\ y/U.tokens : U.g
+
+  You have API access via org.antlr.tool.BuildDependencyGenerator class:
+  getGeneratedFileList(), getDependenciesFileList().  You can also access
+  the output template: getDependencies().  The file
+  org/antlr/tool/templates/depend.stg contains the template.  You can
+  modify as you want.  File objects go in so you can play with path etc...
+
+February 10, 2007
+
+* no more .gl files generated.  All .g all the time.
+
+* changed @finally to be @after and added a finally clause to the
+  exception stuff.  I also removed the superfluous "exception"
+  keyword.  Here's what the new syntax looks like:
+
+  a
+  @after { System.out.println("ick"); }
+    : 'a'
+    ;        
+    catch[RecognitionException e] { System.out.println("foo"); }
+    catch[IOException e] { System.out.println("io"); }
+    finally { System.out.println("foobar"); }
+
+  @after executes after bookkeeping to set $rule.stop, $rule.tree but
+  before scopes pop and any memoization happens.  Dynamic scopes and
+  memoization are still in generated finally block because they must
+  exec even if error in rule.  The @after action and tree setting
+  stuff can technically be skipped upon syntax error in rule.  [Later
+  we might add something to finally to stick an ERROR token in the
+  tree and set the return value.]  Sequence goes: set $stop, $tree (if
+  any), @after (if any), pop scopes (if any), memoize (if needed),
+  grammar finally clause.  Last 3 are in generated code's finally
+  clause.
+
+3.0b6 - January 31, 2007
+
+January 30, 2007
+
+* Fixed bug in IntervalSet.and: it returned the same empty set all the time
+  rather than new empty set.  Code altered the same empty set.
+
+* Made analysis terminate faster upon a decision that takes too long;
+  it seemed to keep doing work for a while.  Refactored some names
+  and updated comments.  Also made it terminate when it realizes it's
+  non-LL(*) due to recursion.  just added terminate conditions to loop
+  in convert().
+
+* Sometimes fatal non-LL(*) messages didn't appear; instead you got
+  "antlr couldn't analyze", which is actually untrue.  I had the
+  order of some prints wrong in the DecisionProbe.
+
+* The code generator incorrectly detected when it could use a fixed,
+  acyclic inline DFA (i.e., using an IF).  Upon non-LL(*) decisions
+  with predicates, analysis made cyclic DFA.  But this stops
+  the computation detecting whether they are cyclic.  I just added
+  a protection in front of the acyclic DFA generator to avoid if
+  non-LL(*).  Updated comments.
+
+January 23, 2007
+
+* Made tree node streams use adaptor to create navigation nodes.
+  Thanks to Emond Papegaaij.
+
+January 22, 2007
+
+* Added lexer rule properties: start, stop
+
+January 1, 2007
+
+* analysis failsafe is back on; if a decision takes too long, it bails out
+  and uses k=1
+
+January 1, 2007
+
+* += labels for rules only work for output option; previously elements
+  of list were the return value structs, but are now either the tree or
+  StringTemplate return value.  You can label different rules now
+  x+=a x+=b.
+
+December 30, 2006
+
+* Allow \" to work correctly in "..." template.
+
+December 28, 2006
+
+* errors that are now warnings: missing AST label type in trees.
+  Also "no start rule detected" is warning.
+
+* tree grammars also can do rewrite=true for output=template.
+  Only works for alts with single node or tree as alt elements.
+  If you are going to use $text in a tree grammar or do rewrite=true
+  for templates, you must use in your main:
+
+  nodes.setTokenStream(tokens);
+
+* You get a warning for tree grammars that do rewrite=true and
+  output=template and have -> for alts that are not simple nodes
+  or simple trees.  new unit tests in TestRewriteTemplates at end.
+
+December 27, 2006
+
+* Error message appears when you use -> in tree grammar with
+  output=template and rewrite=true for alt that is not simple
+  node or tree ref.
+
+* no more $stop attribute for tree parsers; meaningless/useless.
+  Removed from TreeRuleReturnScope also.
+
+* rule text attribute in tree parser must pull from token buffer.
+  Makes no sense otherwise.  added getTokenStream to TreeNodeStream
+  so rule $text attr works.  CommonTreeNodeStream etc... now let
+  you set the token stream so you can access later from tree parser.
+  $text is not well-defined for rules like
+
+     slist : stat+ ;
+
+  because stat is not a single node nor rooted with a single node.
+  $slist.text will get only first stat.  I need to add a warning about
+  this...
+
+* Fixed http://www.antlr.org:8888/browse/ANTLR-76 for Java.
+  Enhanced TokenRewriteStream so it accepts any object; converts
+  to string at last second.  Allows you to rewrite with StringTemplate
+  templates now :)
+
+* added rewrite option that makes -> template rewrites do replace ops for
+  TokenRewriteStream input stream.  In output=template and rewrite=true mode
+  same as before 'cept that the parser does
+
+    ((TokenRewriteStream)input).replace(
+	      ((Token)retval.start).getTokenIndex(),
+	      input.LT(-1).getTokenIndex(),
+	      retval.st);
+
+  after each rewrite so that the input stream is altered.  Later refs to
+  $text will have rewrites.  Here's a sample test program for grammar Rew.
+
+        FileReader groupFileR = new FileReader("Rew.stg");
+        StringTemplateGroup templates = new StringTemplateGroup(groupFileR);
+        ANTLRInputStream input = new ANTLRInputStream(System.in);
+        RewLexer lexer = new RewLexer(input);
+        TokenRewriteStream tokens = new TokenRewriteStream(lexer);
+        RewParser parser = new RewParser(tokens);
+        parser.setTemplateLib(templates);
+        parser.program();
+        System.out.println(tokens.toString());
+        groupFileR.close();
+
+December 26, 2006
+
+* BaseTree.dupTree didn't dup recursively.
+
+December 24, 2006
+
+* Cleaned up some comments and removed field treeNode
+  from MismatchedTreeNodeException class.  It is "node" in
+  RecognitionException.
+
+* Changed type from Object to BitSet for expecting fields in
+  MismatchedSetException and MismatchedNotSetException
+
+* Cleaned up error printing in lexers and the messages that it creates.
+
+* Added this to TreeAdaptor:
+	/** Return the token object from which this node was created.
+	 *  Currently used only for printing an error message.
+	 *  The error display routine in BaseRecognizer needs to
+	 *  display where the input the error occurred. If your
+	 *  tree of limitation does not store information that can
+	 *  lead you to the token, you can create a token filled with
+	 *  the appropriate information and pass that back.  See
+	 *  BaseRecognizer.getErrorMessage().
+	 */
+	public Token getToken(Object t);
+
+December 23, 2006
+
+* made BaseRecognizer.displayRecognitionError nonstatic so people can
+  override it. Not sure why it was static before.
+
+* Removed state/decision message that comes out of no 
+  viable alternative exceptions, as that was too much.
+  removed the decision number from the early exit exception
+  also.  During development, you can simply override
+  displayRecognitionError from BaseRecognizer to add the stuff
+  back in if you want.
+
+* made output go to an output method you can override: emitErrorMessage()
+
+* general cleanup of the error emitting code in BaseRecognizer.  Lots
+  more stuff you can override: getErrorHeader, getTokenErrorDisplay,
+  emitErrorMessage, getErrorMessage.
+
+December 22, 2006
+
+* Altered Tree.Parser.matchAny() so that it skips entire trees if
+  node has children otherwise skips one node.  Now this works to
+  skip entire body of function if single-rooted subtree:
+  ^(FUNC name=ID arg=ID .)
+
+* Added "reverse index" from node to stream index.  Override
+  fillReverseIndex() in CommonTreeNodeStream if you want to change.
+  Use getNodeIndex(node) to find stream index for a specific tree node.
+  See getNodeIndex(), reverseIndex(Set tokenTypes),
+  reverseIndex(int tokenType), fillReverseIndex().  The indexing
+  costs time and memory to fill, but pulling stuff out will be lots
+  faster as it can jump from a node ptr straight to a stream index.
+
+* Added TreeNodeStream.get(index) to make it easier for interpreters to
+  jump around in tree node stream.
+
+* New CommonTreeNodeStream buffers all nodes in stream for fast jumping
+  around.  It now has push/pop methods to invoke other locations in
+  the stream for building interpreters.
+
+* Moved CommonTreeNodeStream to UnBufferedTreeNodeStream and removed
+  Iterator implementation.  moved toNodesOnlyString() to TestTreeNodeStream
+
+* [BREAKS ANY TREE IMPLEMENTATION]
+  made CommonTreeNodeStream work with any tree node type.  TreeAdaptor
+  now implements isNil so must add; trivial, but does break back
+  compatibility.
+
+December 17, 2006
+
+* Added traceIn/Out methods to recognizers so that you can override them;
+  previously they were in-line print statements. The message has also
+  been slightly improved.
+
+* Factored BuildParseTree into debug package; cleaned stuff up. Fixed
+  unit tests.
+
+December 15, 2006
+
+* [BREAKS ANY TREE IMPLEMENTATION]
+  org.antlr.runtime.tree.Tree; needed to add get/set for token start/stop
+  index so CommonTreeAdaptor can assume Tree interface not CommonTree
+  implementation.  Otherwise, no way to create your own nodes that satisfy
+  Tree because CommonTreeAdaptor was doing 
+
+	public int getTokenStartIndex(Object t) {
+		return ((CommonTree)t).startIndex;
+	}
+
+  Added to Tree:
+
+	/**  What is the smallest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStartIndex();
+
+	void setTokenStartIndex(int index);
+
+	/**  What is the largest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStopIndex();	
+
+	void setTokenStopIndex(int index);
+
+December 13, 2006
+ 
+* Added org.antlr.runtime.tree.DOTTreeGenerator so you can generate DOT
+  diagrams easily from trees.
+
+	CharStream input = new ANTLRInputStream(System.in);
+	TLexer lex = new TLexer(input);
+	CommonTokenStream tokens = new CommonTokenStream(lex);
+	TParser parser = new TParser(tokens);
+	TParser.e_return r = parser.e();
+	Tree t = (Tree)r.tree;
+	System.out.println(t.toStringTree());
+	DOTTreeGenerator gen = new DOTTreeGenerator();
+	StringTemplate st = gen.toDOT(t);
+	System.out.println(st);
+
+* Changed the way mark()/rewind() work in CommonTreeNode stream to mirror
+  more flexible solution in ANTLRStringStream.  Forgot to set lastMarker
+  anyway.  Now you can rewind to non-most-recent marker.
+
+December 12, 2006
+
+* Temp lexer now end in .gl (T__.gl, for example)
+
+* TreeParser suffix no longer generated for tree grammars
+
+* Defined reset for lexer, parser, tree parser; rewinds the input stream also
+
+December 10, 2006
+
+* Made Grammar.abortNFAToDFAConversion() abort in middle of a DFA.
+
+December 9, 2006
+
+* fixed bug in OrderedHashSet.add().  It didn't track elements correctly.
+
+December 6, 2006
+
+* updated build.xml for future Ant compatibility, thanks to Matt Benson.
+
+* various tests in TestRewriteTemplate and TestSyntacticPredicateEvaluation
+  were using the old 'channel' vs. new '$channel' notation.
+  TestInterpretedParsing didn't pick up an earlier change to CommonToken.
+  Reported by Matt Benson.
+
+* fixed platform dependent test failures in TestTemplates, supplied by Matt
+  Benson.
+
+November 29, 2006
+
+*  optimized semantic predicate evaluation so that p||!p yields true.
+
+November 22, 2006
+
+* fixed bug that prevented var = $rule.some_retval from working in anything
+  but the first alternative of a rule or subrule.
+
+* attribute names containing digits were not allowed, this is now fixed,
+  allowing attributes like 'name1' but not '1name1'.
+
+November 19, 2006
+
+* Removed LeftRecursionMessage and apparatus because it seems that I check
+  for left recursion upfront before analysis and everything gets specified as
+  recursion cycles at this point.
+
+November 16, 2006
+
+* TokenRewriteStream.replace was not passing programName to next method.
+
+November 15, 2006
+
+* updated DOT files for DFA generation to make smaller circles.
+
+* made epsilon edges italics in the NFA diagrams.
+
+3.0b5 - November 15, 2006
+
+The biggest thing is that your grammar file names must match the grammar name
+inside (your generated class names will also be different) and we use
+$channel=HIDDEN now instead of channel=99 inside lexer actions.
+Should be compatible other than that.   Please look at complete list of
+changes.
+
+November 14, 2006
+
+* Force token index to be -1 for CommonIndex in case not set.
+
+November 11, 2006
+
+* getUniqueID for TreeAdaptor now uses identityHashCode instead of hashCode.
+
+November 10, 2006
+
+* No grammar nondeterminism warning now when wildcard '.' is final alt.
+  Examples:
+
+	a : A | B | . ;
+
+	A : 'a'
+	  | .
+	  ;
+
+	SL_COMMENT
+	    : '//' (options {greedy=false;} : .)* '\r'? '\n'
+	    ;
+
+	SL_COMMENT2
+	    : '//' (options {greedy=false;} : 'x'|.)* '\r'? '\n'
+	    ;
+
+
+November 8, 2006
+
+* Syntactic predicates did not get hoisting properly upon non-LL(*) decision.  Other hoisting issues fixed.  Cleaned up code.
+
+* Removed failsafe that check to see if I'm spending too much time on a single DFA; I don't think we need it anymore.
+
+November 3, 2006
+
+* $text, $line, etc... were not working in assignments. Fixed and added
+  test case.
+
+* $label.text translated to label.getText in lexer even if label was on a char
+
+November 2, 2006
+
+* Added error if you don't specify what the AST type is; actions in tree
+  grammar won't work without it.
+
+  $ cat x.g
+  tree grammar x;
+  a : ID {String s = $ID.text;} ;
+
+  ANTLR Parser Generator   Early Access Version 3.0b5 (??, 2006)  1989-2006
+  error: x.g:0:0: (152) tree grammar x has no ASTLabelType option
+
+November 1, 2006
+
+* $text, $line, etc... were not working properly within lexer rule.
+
+October 32, 2006
+
+* Finally actions now execute before dynamic scopes are popped it in the
+  rule. Previously was not possible to access the rules scoped variables
+  in a finally action.
+
+October 29, 2006
+
+* Altered ActionTranslator to emit errors on setting read-only attributes
+  such as $start, $stop, $text in a rule. Also forbid setting any attributes
+  in rules/tokens referenced by a label or name.
+  Setting dynamic scopes's attributes and your own parameter attributes
+  is legal.
+
+October 27, 2006
+
+* Altered how ANTLR figures out what decision is associated with which
+  block of grammar.  Makes ANTLRWorks correctly find DFA for a block.
+
+October 26, 2006
+
+* Fixed bug where EOT transitions led to no NFA configs in a DFA state,
+  yielding an error in DFA table generation.
+
+* renamed action.g to ActionTranslator.g
+  the ActionTranslator class is now called ActionTranslatorLexer, as ANTLR
+  generates this classname now. Fixed rest of codebase accordingly.
+
+* added rules recognizing setting of scopes' attributes to ActionTranslator.g
+  the Objective C target needed access to the right-hand side of the assignment
+  in order to generate correct code
+
+* changed ANTLRCore.sti to reflect the new mandatory templates to support the above
+  namely: scopeSetAttributeRef, returnSetAttributeRef and the ruleSetPropertyRef_*
+  templates, with the exception of ruleSetPropertyRef_text. we cannot set this attribute
+
+October 19, 2006
+
+* Fixed 2 bugs in DFA conversion that caused exceptions.
+  altered functionality of getMinElement so it ignores elements<0.
+
+October 18, 2006
+
+* moved resetStateNumbersToBeContiguous() to after issuing of warnings;
+  an internal error in that routine should make more sense as issues
+  with decision will appear first.
+
+* fixed cut/paste bug I introduced when fixed EOF in min/max
+  bug. Prevented C grammar from working briefly.
+
+October 17, 2006
+
+* Removed a failsafe that seems to be unnecessary that ensure DFA didn't
+  get too big.  It was resulting in some failures in code generation that
+  led me on quite a strange debugging trip.
+
+October 16, 2006
+
+* Use channel=HIDDEN not channel=99 to put tokens on hidden channel.
+
+October 12, 2006
+
+* ANTLR now has a customizable message format for errors and warnings,
+  to make it easier to fulfill requirements by IDEs and such.
+  The format to be used can be specified via the '-message-format name'
+  command line switch. The default for name is 'antlr', also available
+  at the moment is 'gnu'. This is done via StringTemplate, for details
+  on the requirements look in org/antlr/tool/templates/messages/formats/
+
+* line numbers for lexers in combined grammars are now reported correctly.
+
+September 29, 2006
+
+* ANTLRReaderStream improperly checked for end of input.
+
+September 28, 2006
+
+* For ANTLRStringStream, LA(-1) was off by one...gave you LA(-2).
+
+3.0b4 - August 24, 2006
+
+* error when no rules in grammar.  doesn't crash now.
+
+* Token is now an interface.
+
+* remove dependence on non runtime classes in runtime package.
+
+* filename and grammar name must be same Foo in Foo.g.  Generates FooParser,
+  FooLexer, ...  Combined grammar Foo generates Foo$Lexer.g which generates
+  FooLexer.java.  tree grammars generate FooTreeParser.java
+
+August 24, 2006
+
+* added C# target to lib, codegen, templates
+
+August 11, 2006
+
+* added tree arg to navigation methods in treeadaptor
+
+August 07, 2006
+
+* fixed bug related to (a|)+ on end of lexer rules.  crashed instead
+  of warning.
+
+* added warning that interpreter doesn't do synpreds yet
+
+* allow different source of classloader:
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+if ( cl==null ) {
+    cl = this.getClass().getClassLoader();
+}
+
+
+July 26, 2006
+
+* compressed DFA edge tables significantly.  All edge tables are
+  unique. The transition table can reuse arrays.  Look like this now:
+
+     public static readonly DFA30_transition0 =
+     	new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...};
+         public static readonly DFA30_transition1 =
+     	new short[] { 21 };
+      public static readonly short[][] DFA30_transition = {
+     	  DFA30_transition0,
+     	  DFA30_transition0,
+     	  DFA30_transition1,
+     	  ...
+      };
+
+* If you defined both a label like EQ and '=', sometimes the '=' was
+  used instead of the EQ label.
+
+* made headerFile template have same arg list as outputFile for consistency
+
+* outputFile, lexer, genericParser, parser, treeParser templates
+  reference cyclicDFAs attribute which was no longer used after I
+  started the new table-based DFA.  I made cyclicDFADescriptors
+  argument to outputFile and headerFile (only).  I think this is
+  correct as only OO languages will want the DFA in the recognizer.
+  At the top level, C and friends can use it.  Changed name to use
+  cyclicDFAs again as it's a better name probably.  Removed parameter
+  from the lexer, ...  For example, my parser template says this now:
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+* made all token ref token types go thru code gen's
+  getTokenTypeAsTargetLabel()
+
+* no more computing DFA transition tables for acyclic DFA.
+
+July 25, 2006
+
+* fixed a place where I was adding syn predicates into rewrite stuff.
+
+* turned off invalid token index warning in AW support; had a problem.
+
+* bad location event generated with -debug for synpreds in autobacktrack mode.
+
+July 24, 2006
+
+* changed runtime.DFA so that it treats all chars and token types as
+  char (unsigned 16 bit int).  -1 becomes '\uFFFF' then or 65535.
+
+* changed MAX_STATE_TRANSITIONS_FOR_TABLE to be 65534 by default
+  now. This means that all states can use a table to do transitions.
+
+* was not making synpreds on (C)* type loops with backtrack=true
+
+* was copying tree stuff and actions into synpreds with backtrack=true
+
+* was making synpreds on even single alt rules / blocks with backtrack=true
+
+3.0b3 - July 21, 2006
+
+* ANTLR fails to analyze complex decisions much less frequently.  It
+  turns out that the set of decisions for which ANTLR fails (times
+  out) is the same set (so far) of non-LL(*) decisions.  Morever, I'm
+  able to detect this situation quickly and report rather than timing
+  out. Errors look like:
+
+  java.g:468:23: [fatal] rule concreteDimensions has non-LL(*)
+    decision due to recursive rule invocations in alts 1,2.  Resolve
+    by left-factoring or using syntactic predicates with fixed k
+    lookahead or use backtrack=true option.
+
+  This message only appears when k=*.
+
+* Shortened no viable alt messages to not include decision
+  description:
+
+[compilationUnit, declaration]: line 8:8 decision=<<67:1: declaration
+: ( ( fieldDeclaration )=> fieldDeclaration | ( methodDeclaration )=>
+methodDeclaration | ( constructorDeclaration )=>
+constructorDeclaration | ( classDeclaration )=> classDeclaration | (
+interfaceDeclaration )=> interfaceDeclaration | ( blockDeclaration )=>
+blockDeclaration | emptyDeclaration );>> state 3 (decision=14) no
+viable alt; token=[@1,184:187='java',<122>,8:8]
+
+  too long and hard to read.
+
+July 19, 2006
+
+* Code gen bug: states with no emanating edges were ignored by ST.
+  Now an empty list is used.
+
+* Added grammar parameter to recognizer templates so they can access
+  properties like getName(), ...
+
+July 10, 2006
+
+* Fixed the gated pred merged state bug.  Added unit test.
+
+* added new method to Target: getTokenTypeAsTargetLabel()
+
+July 7, 2006
+
+* I was doing an AND instead of OR in the gated predicate stuff.
+  Thanks to Stephen Kou!
+
+* Reduce op for combining predicates was insanely slow sometimes and
+  didn't actually work well.  Now it's fast and works.
+
+* There is a bug in merging of DFA stop states related to gated
+  preds...turned it off for now.
+
+3.0b2 - July 5, 2006
+
+July 5, 2006
+
+* token emission not properly protected in lexer filter mode.
+
+* EOT, EOT DFA state transition tables should be init'd to -1 (only
+  was doing this for compressed tables).  Fixed.
+
+* in trace mode, exit method not shown for memoized rules
+
+* added -Xmaxdfaedges to allow you to increase number of edges allowed
+  for a single DFA state before it becomes "special" and can't fit in
+  a simple table.
+
+* Bug in tables.  Short are signed so min/max tables for DFA are now
+  char[].  Bizarre.
+
+July 3, 2006
+
+* Added a method to reset the tool error state for current thread.
+  See ErrorManager.java
+
+* [Got this working properly today] backtrack mode that let's you type
+  in any old crap and ANTLR will backtrack if it can't figure out what
+  you meant.  No errors are reported by antlr during analysis.  It
+  implicitly adds a syn pred in front of every production, using them
+  only if static grammar LL(*) analysis fails.  Syn pred code is not
+  generated if the pred is not used in a decision.
+
+  This is essentially a rapid prototyping mode.
+
+* Added backtracking report to the -report option
+
+* Added NFA->DFA conversion early termination report to the -report option
+
+* Added grammar level k and backtrack options to -report
+
+* Added a dozen unit tests to test autobacktrack NFA construction.
+
+* If you are using filter mode, you must manually use option
+  memoize=true now.
+
+July 2, 2006
+
+* Added k=* option so you can set k=2, for example, on whole grammar,
+  but an individual decision can be LL(*).
+
+* memoize option for grammars, rules, blocks.  Remove -nomemo cmd-line option
+
+* but in DOT generator for DFA; fixed.
+
+* runtime.DFA reported errors even when backtracking
+
+July 1, 2006
+
+* Added -X option list to help
+
+* Syn preds were being hoisted into other rules, causing lots of extra
+  backtracking.
+
+June 29, 2006
+
+* unnecessary files removed during build.
+
+* Matt Benson updated build.xml
+
+* Detecting use of synpreds in analysis now instead of codegen.  In
+  this way, I can avoid analyzing decisions in synpreds for synpreds
+  not used in a DFA for a real rule.  This is used to optimize things
+  for backtrack option.
+
+* Code gen must add _fragment or whatever to end of pred name in
+  template synpredRule to avoid having ANTLR know anything about
+  method names.
+
+* Added -IdbgST option to emit ST delimiters at start/stop of all
+  templates spit out.
+
+June 28, 2006
+
+* Tweaked message when ANTLR cannot handle analysis.
+
+3.0b1 - June 27, 2006
+
+June 24, 2006
+
+* syn preds no longer generate little static classes; they also don't
+  generate a whole bunch of extra crap in the rules built to test syn
+  preds.  Removed GrammarFragmentPointer class from runtime.
+
+June 23-24, 2006
+
+* added output option to -report output.
+
+* added profiling info:
+  Number of rule invocations in "guessing" mode
+  number of rule memoization cache hits
+  number of rule memoization cache misses
+
+* made DFA DOT diagrams go left to right not top to bottom
+
+* I try to recursive overflow states now by resolving these states
+  with semantic/syntactic predicates if they exist.  The DFA is then
+  deterministic rather than simply resolving by choosing first
+  nondeterministic alt.  I used to generated errors:
+
+~/tmp $ java org.antlr.Tool -dfa t.g
+ANTLR Parser Generator   Early Access Version 3.0b2 (July 5, 2006)  1989-2006
+t.g:2:5: Alternative 1: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b
+t.g:2:5: Alternative 2: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b
+
+  Now, I uses predicates if available and emits no warnings.
+
+* made sem preds share accept states.  Previously, multiple preds in a
+decision forked new accepts each time for each nondet state.
+
+June 19, 2006
+
+* Need parens around the prediction expressions in templates.
+
+* Referencing $ID.text in an action forced bad code gen in lexer rule ID.
+
+* Fixed a bug in how predicates are collected.  The definition of
+  "last predicated alternative" was incorrect in the analysis.  Further,
+  gated predicates incorrectly missed a case where an edge should become
+  true (a tautology).
+
+* Removed an unnecessary input.consume() reference in the runtime/DFA class.
+
+June 14, 2006
+
+* -> ($rulelabel)? didn't generate proper code for ASTs.
+
+* bug in code gen (did not compile)
+a : ID -> ID
+  | ID -> ID
+  ;
+Problem is repeated ref to ID from left side.  Juergen pointed this out.
+
+* use of tokenVocab with missing file yielded exception
+
+* (A|B)=> foo yielded an exception as (A|B) is a set not a block. Fixed.
+
+* Didn't set ID1= and INT1= for this alt:
+  | ^(ID INT+ {System.out.print(\"^(\"+$ID+\" \"+$INT+\")\");})
+
+* Fixed so repeated dangling state errors only occur once like:
+t.g:4:17: the decision cannot distinguish between alternative(s) 2,1 for at least one input sequence
+
+* tracking of rule elements was on (making list defs at start of
+  method) with templates instead of just with ASTs.  Turned off.
+
+* Doesn't crash when you give it a missing file now.
+
+* -report: add output info: how many LL(1) decisions.
+
+June 13, 2006
+
+* ^(ROOT ID?) Didn't work; nor did any other nullable child list such as
+  ^(ROOT ID* INT?).  Now, I check to see if child list is nullable using
+  Grammar.LOOK() and, if so, I generate an "IF lookahead is DOWN" gate
+  around the child list so the whole thing is optional.
+
+* Fixed a bug in LOOK that made it not look through nullable rules.
+
+* Using AST suffixes or -> rewrite syntax now gives an error w/o a grammar
+  output option.  Used to crash ;)
+
+* References to EOF ended up with improper -1 refs instead of EOF in output.
+
+* didn't warn of ambig ref to $expr in rewrite; fixed.
+list
+     :	'[' expr 'for' type ID 'in' expr ']'
+	-> comprehension(expr={$expr.st},type={},list={},i={})
+	;
+
+June 12, 2006
+
+* EOF works in the parser as a token name.
+
+* Rule b:(A B?)*; didn't display properly in AW due to the way ANTLR
+  generated NFA.
+
+* "scope x;" in a rule for unknown x gives no error.  Fixed.  Added unit test.
+
+* Label type for refs to start/stop in tree parser and other parsers were
+  not used.  Lots of casting.  Ick. Fixed.
+
+* couldn't refer to $tokenlabel in isolation; but need so we can test if
+  something was matched.  Fixed.
+
+* Lots of little bugs fixed in $x.y, %... translation due to new
+  action translator.
+
+* Improperly tracking block nesting level; result was that you couldn't
+  see $ID in action of rule "a : A+ | ID {Token t = $ID;} | C ;"
+
+* a : ID ID {$ID.text;} ; did not get a warning about ambiguous $ID ref.
+
+* No error was found on $COMMENT.text:
+
+COMMENT
+    :   '/*' (options {greedy=false;} : . )* '*/'
+        {System.out.println("found method "+$COMMENT.text);}
+    ;
+
+  $enclosinglexerrule scope does not exist.  Use text or setText() here.
+
+June 11, 2006
+
+* Single return values are initialized now to default or to your spec.
+
+* cleaned up input stream stuff.  Added ANTLRReaderStream, ANTLRInputStream
+  and refactored.  You can specify encodings now on ANTLRFileStream (and
+  ANTLRInputStream) now.
+
+* You can set text local var now in a lexer rule and token gets that text.
+  start/stop indexes are still set for the token.
+
+* Changed lexer slightly.  Calling a nonfragment rule from a
+  nonfragment rule does not set the overall token.
+
+June 10, 2006
+
+* Fixed bug where unnecessary escapes yield char==0 like '\{'.
+
+* Fixed analysis bug.  This grammar didn't report a recursion warning:
+x   : y X
+    | y Y
+    ;
+y   : L y R
+    | B
+    ;
+  The DFAState.equals() method was messed up.
+
+* Added @synpredgate {...} action so you can tell ANTLR how to gate actions
+  in/out during syntactic predicate evaluation.
+
+* Fuzzy parsing should be more efficient.  It should backtrack over a rule
+  and then rewind and do it again "with feeling" to exec actions.  It was
+  actually doing it 3x not 2x.
+
+June 9, 2006
+
+* Gutted and rebuilt the action translator for $x.y, $x::y, ...
+  Uses ANTLR v3 now for the first time inside v3 source. :)
+  ActionTranslator.java
+
+* Fixed a bug where referencing a return value on a rule didn't work
+  because later a ref to that rule's predefined properties didn't
+  properly force a return value struct to be built.  Added unit test.
+
+June 6, 2006
+
+* New DFA mechanisms.  Cyclic DFA are implemented as state tables,
+  encoded via strings as java cannot handle large static arrays :(
+  States with edges emanating that have predicates are specially
+  treated.  A method is generated to do these states.  The DFA
+  simulation routine uses the "special" array to figure out if the
+  state is special.  See March 25, 2006 entry for description:
+  http://www.antlr.org/blog/antlr3/codegen.tml.  analysis.DFA now has
+  all the state tables generated for code gen.  CyclicCodeGenerator.java
+  disappeared as it's unneeded code. :)
+
+* Internal general clean up of the DFA.states vs uniqueStates thing.
+  Fixed lookahead decisions no longer fill uniqueStates.  Waste of
+  time.  Also noted that when adding sem pred edges, I didn't check
+  for state reuse.  Fixed.
+
+June 4, 2006
+
+* When resolving ambig DFA states predicates, I did not add the new states
+  to the list of unique DFA states.  No observable effect on output except
+  that DFA state numbers were not always contiguous for predicated decisions.
+  I needed this fix for new DFA tables.
+
+3.0ea10 - June 2, 2006
+
+June 2, 2006
+
+* Improved grammar stats and added syntactic pred tracking.
+
+June 1, 2006
+
+* Due to a type mismatch, the DebugParser.recoverFromMismatchedToken()
+  method was not called.  Debug events for mismatched token error
+  notification were not sent to ANTLRWorks probably
+
+* Added getBacktrackingLevel() for any recognizer; needed for profiler.
+
+* Only writes profiling data for antlr grammar analysis with -profile set
+
+* Major update and bug fix to (runtime) Profiler.
+
+May 27, 2006
+
+* Added Lexer.skip() to force lexer to ignore current token and look for
+  another; no token is created for current rule and is not passed on to
+  parser (or other consumer of the lexer).
+
+* Parsers are much faster now.  I removed use of java.util.Stack for pushing
+  follow sets and use a hardcoded array stack instead.  Dropped from
+  5900ms to 3900ms for parse+lex time parsing entire java 1.4.2 source.  Lex
+  time alone was about 1500ms.  Just looking at parse time, we get about 2x
+  speed improvement. :)
+
+May 26, 2006
+
+* Fixed NFA construction so it generates NFA for (A*)* such that ANTLRWorks
+  can display it properly.
+
+May 25, 2006
+
+* added abort method to Grammar so AW can terminate the conversion if it's
+  taking too long.
+
+May 24, 2006
+
+* added method to get left recursive rules from grammar without doing full
+  grammar analysis.
+
+* analysis, code gen not attempted if serious error (like
+  left-recursion or missing rule definition) occurred while reading
+  the grammar in and defining symbols.
+
+* added amazing optimization; reduces analysis time by 90% for java
+  grammar; simple IF statement addition!
+
+3.0ea9 - May 20, 2006
+
+* added global k value for grammar to limit lookahead for all decisions unless
+overridden in a particular decision.
+
+* added failsafe so that any decision taking longer than 2 seconds to create
+the DFA will fall back on k=1.  Use -ImaxtimeforDFA n (in ms) to set the time.
+
+* added an option (turned off for now) to use multiple threads to
+perform grammar analysis.  Not much help on a 2-CPU computer as
+garbage collection seems to peg the 2nd CPU already. :( Gotta wait for
+a 4 CPU box ;)
+
+* switched from #src to // $ANTLR src directive.
+
+* CommonTokenStream.getTokens() looked past end of buffer sometimes. fixed.
+
+* unicode literals didn't really work in DOT output and generated code. fixed.
+
+* fixed the unit test rig so it compiles nicely with Java 1.5
+
+* Added ant build.xml file (reads build.properties file)
+
+* predicates sometimes failed to compile/eval properly due to missing (...)
+  in IF expressions.  Forced (..)
+
+* (...)? with only one alt were not optimized.  Was:
+
+        // t.g:4:7: ( B )?
+        int alt1=2;
+        int LA1_0 = input.LA(1);
+        if ( LA1_0==B ) {
+            alt1=1;
+        }
+        else if ( LA1_0==-1 ) {
+            alt1=2;
+        }
+        else {
+            NoViableAltException nvae =
+                new NoViableAltException("4:7: ( B )?", 1, 0, input);
+            throw nvae;
+        }
+
+is now:
+
+        // t.g:4:7: ( B )?
+        int alt1=2;
+        int LA1_0 = input.LA(1);
+        if ( LA1_0==B ) {
+            alt1=1;
+        }
+
+  Smaller, faster and more readable.
+
+* Allow manual init of return values now:
+  functionHeader returns [int x=3*4, char (*f)()=null] : ... ;
+
+* Added optimization for DFAs that fixed a codegen bug with rules in lexer:
+   EQ			 : '=' ;
+   ASSIGNOP		 : '=' | '+=' ;
+  EQ is a subset of other rule.  It did not given an error which is
+  correct, but generated bad code.
+
+* ANTLR was sending column not char position to ANTLRWorks.
+
+* Bug fix: location 0, 0 emitted for synpreds and empty alts.
+
+* debugging event handshake how sends grammar file name.  Added getGrammarFileName() to recognizers.  Java.stg generates it:
+
+    public String getGrammarFileName() { return "<fileName>"; }
+
+* tree parsers can do arbitrary lookahead now including backtracking.  I
+  updated CommonTreeNodeStream.
+
+* added events for debugging tree parsers:
+
+	/** Input for a tree parser is an AST, but we know nothing for sure
+	 *  about a node except its type and text (obtained from the adaptor).
+	 *  This is the analog of the consumeToken method.  Again, the ID is
+	 *  the hashCode usually of the node so it only works if hashCode is
+	 *  not implemented.
+	 */
+	public void consumeNode(int ID, String text, int type);
+
+	/** The tree parser looked ahead */
+	public void LT(int i, int ID, String text, int type);
+
+	/** The tree parser has popped back up from the child list to the
+	 *  root node.
+	 */
+	public void goUp();
+
+	/** The tree parser has descended to the first child of a the current
+	 *  root node.
+	 */
+	public void goDown();
+
+* Added DebugTreeNodeStream and DebugTreeParser classes
+
+* Added ctor because the debug tree node stream will need to ask quesitons about nodes and since  nodes are just Object, it needs an adaptor to decode the nodes and get text/type info for the debugger.
+
+public CommonTreeNodeStream(TreeAdaptor adaptor, Tree tree);
+
+* added getter to TreeNodeStream:
+	public TreeAdaptor getTreeAdaptor();
+
+* Implemented getText/getType in CommonTreeAdaptor.
+
+* Added TraceDebugEventListener that can dump all events to stdout.
+
+* I broke down and make Tree implement getText
+
+* tree rewrites now gen location debug events.
+
+* added AST debug events to listener; added blank listener for convenience
+
+* updated debug events to send begin/end backtrack events for debugging
+
+* with a : (b->b) ('+' b -> ^(PLUS $a b))* ; you get b[0] each time as
+  there is no loop in rewrite rule itself.  Need to know context that
+  the -> is inside the rule and hence b means last value of b not all
+  values.
+
+* Bug in TokenRewriteStream; ops at indexes < start index blocked proper op.
+
+* Actions in ST rewrites "-> ({$op})()" were not translated
+
+* Added new action name:
+
+ at rulecatch {
+catch (RecognitionException re) {
+    reportError(re);
+    recover(input,re);
+}
+catch (Throwable t) {
+    System.err.println(t);
+}
+}
+Overrides rule catch stuff.
+
+* Isolated $ refs caused exception
+
+3.0ea8 - March 11, 2006
+
+* added @finally {...} action like @init for rules.  Executes in
+  finally block (java target) after all other stuff like rule memoization.
+  No code changes needs; ST just refs a new action:
+      <ruleDescriptor.actions.finally>
+
+* hideous bug fixed: PLUS='+' didn't result in '+' rule in lexer
+
+* TokenRewriteStream didn't do toString() right when no rewrites had been done.
+
+* lexer errors in interpreter were not printed properly
+
+* bitsets are dumped in hex not decimal now for FOLLOW sets
+
+* /* epsilon */ is not printed now when printing out grammars with empty alts
+
+* Fixed another bug in tree rewrite stuff where it was checking that elements
+  had at least one element.  Strange...commented out for now to see if I can remember what's up.
+
+* Tree rewrites had problems when you didn't have x+=FOO variables.  Rules
+  like this work now:
+
+  a : (x=ID)? y=ID -> ($x $y)?;
+
+* filter=true for lexers turns on k=1 and backtracking for every token
+  alternative.  Put the rules in priority order.
+
+* added getLine() etc... to Tree to support better error reporting for
+  trees.  Added MismatchedTreeNodeException.
+
+* $templates::foo() is gone.  added % as special template symbol.
+  %foo(a={},b={},...) ctor (even shorter than $templates::foo(...))
+  %({name-expr})(a={},...) indirect template ctor reference
+
+  The above are parsed by antlr.g and translated by codegen.g
+  The following are parsed manually here:
+
+  %{string-expr} anonymous template from string expr
+  %{expr}.y = z; template attribute y of StringTemplate-typed expr to z
+  %x.y = z; set template attribute y of x (always set never get attr)
+            to z [languages like python without ';' must still use the
+            ';' which the code generator is free to remove during code gen]
+
+* -> ({expr})(a={},...) notation for indirect template rewrite.
+  expr is the name of the template.
+
+* $x[i]::y and $x[-i]::y notation for accesssing absolute scope stack
+  indexes and relative negative scopes.  $x[-1]::y is the y attribute
+  of the previous scope (stack top - 1).
+
+* filter=true mode for lexers; can do this now...upon mismatch, just
+  consumes a char and tries again:
+lexer grammar FuzzyJava;
+options {filter=true;}
+
+FIELD
+    :   TYPE WS? name=ID WS? (';'|'=')
+        {System.out.println("found var "+$name.text);}
+    ;
+
+* refactored char streams so ANTLRFileStream is now a subclass of
+  ANTLRStringStream.
+
+* char streams for lexer now allowed nested backtracking in lexer.
+
+* added TokenLabelType for lexer/parser for all token labels
+
+* line numbers for error messages were not updated properly in antlr.g
+  for strings, char literals and <<...>>
+
+* init action in lexer rules was before the type,start,line,... decls.
+
+* Tree grammars can now specify output; I've only tested output=templat
+  though.
+
+* You can reference EOF now in the parser and lexer.  It's just token type
+  or char value -1.
+
+* Bug fix: $ID refs in the *lexer* were all messed up.  Cleaned up the
+  set of properties available...
+
+* Bug fix: .st not found in rule ref when rule has scope:
+field
+scope {
+	StringTemplate funcDef;
+}
+    :   ...
+	{$field::funcDef = $field.st;}
+    ;
+it gets field_stack.st instead
+
+* return in backtracking must return retval or null if return value.
+
+* $property within a rule now works like $text, $st, ...
+
+* AST/Template Rewrites were not gated by backtracking==0 so they
+  executed even when guessing.  Auto AST construction is now gated also.
+
+* CommonTokenStream was somehow returning tokens not text in toString()
+
+* added useful methods to runtime.BitSet and also to CommonToken so you can
+  update the text.  Added nice Token stream method:
+
+  /** Given a start and stop index, return a List of all tokens in
+   *  the token type BitSet.  Return null if no tokens were found.  This
+   *  method looks at both on and off channel tokens.
+   */
+  public List getTokens(int start, int stop, BitSet types);
+
+* literals are now passed in the .tokens files so you can ref them in
+  tree parses, for example.
+
+* added basic exception handling; no labels, just general catches:
+
+a : {;}A | B ;
+        exception
+                catch[RecognitionException re] {
+                        System.out.println("recog error");
+                }
+                catch[Exception e] {
+                        System.out.println("error");
+                }
+
+* Added method to TokenStream:
+  public String toString(Token start, Token stop);
+
+* antlr generates #src lines in lexer grammars generated from combined grammars
+  so error messages refer to original file.
+
+* lexers generated from combined grammars now use originally formatting.
+
+* predicates have $x.y stuff translated now.  Warning: predicates might be
+  hoisted out of context.
+
+* return values in return val structs are now public.
+
+* output=template with return values on rules was broken.  I assume return values with ASTs was broken too.  Fixed.
+
+3.0ea7 - December 14, 2005
+
+* Added -print option to print out grammar w/o actions
+
+* Renamed BaseParser to be BaseRecognizer and even made Lexer derive from
+  this; nice as it now shares backtracking support code.
+
+* Added syntactic predicates (...)=>.  See December 4, 2005 entry:
+
+  http://www.antlr.org/blog/antlr3/lookahead.tml
+
+  Note that we have a new option for turning off rule memoization during
+  backtracking:
+
+  -nomemo        when backtracking don't generate memoization code
+
+* Predicates are now tested in order that you specify the alts.  If you
+  leave the last alt "naked" (w/o pred), it will assume a true pred rather
+  than union of other preds.
+
+* Added gated predicates "{p}?=>" that literally turn off a production whereas
+disambiguating predicates are only hoisted into the predictor when syntax alone
+is not sufficient to uniquely predict alternatives.
+
+A : {p}?  => "a" ;
+B : {!p}? => ("a"|"b")+ ;
+
+* bug fixed related to predicates in predictor
+lexer grammar w;
+A : {p}? "a" ;
+B : {!p}? ("a"|"b")+ ;
+DFA is correct.  A state splits for input "a" on the pred.
+Generated code though was hosed.  No pred tests in prediction code!
+I added testLexerPreds() and others in TestSemanticPredicateEvaluation.java
+
+* added execAction template in case we want to do something in front of
+  each action execution or something.
+
+* left-recursive cycles from rules w/o decisions were not detected.
+
+* undefined lexer rules were not announced! fixed.
+
+* unreachable messages for Tokens rule now indicate rule name not alt. E.g.,
+
+  Ruby.lexer.g:24:1: The following token definitions are unreachable: IVAR
+
+* nondeterminism warnings improved for Tokens rule:
+
+Ruby.lexer.g:10:1: Multiple token rules can match input such as ""0".."9"": INT, FLOAT
+As a result, tokens(s) FLOAT were disabled for that input
+
+
+* DOT diagrams didn't show escaped char properly.
+
+* Char/string literals are now all 'abc' not "abc".
+
+* action syntax changed "@scope::actionname {action}" where scope defaults
+  to "parser" if parser grammar or combined grammar, "lexer" if lexer grammar,
+  and "treeparser" if tree grammar.  The code generation targets decide
+  what scopes are available.  Each "scope" yields a hashtable for use in
+  the output templates.  The scopes full of actions are sent to all output
+  file templates (currently headerFile and outputFile) as attribute actions.
+  Then you can reference <actions.scope> to get the map of actions associated
+  with scope and <actions.parser.header> to get the parser's header action
+  for example.  This should be very flexible.  The target should only have
+  to define which scopes are valid, but the action names should be variable
+  so we don't have to recompile ANTLR to add actions to code gen templates.
+
+  grammar T;
+  options {language=Java;}
+  @header { package foo; }
+  @parser::stuff { int i; } // names within scope not checked; target dependent
+  @members { int i; }
+  @lexer::header {head}
+  @lexer::members { int j; }
+  @headerfile::blort {...} // error: this target doesn't have headerfile
+  @treeparser::members {...} // error: this is not a tree parser
+  a
+  @init {int i;}
+    : ID
+    ;
+  ID : 'a'..'z';
+
+  For now, the Java target uses members and header as a valid name.  Within a
+  rule, the init action name is valid.
+
+* changed $dynamicscope.value to $dynamicscope::value even if value is defined
+  in same rule such as $function::name where rule function defines name.
+
+* $dynamicscope gets you the stack
+
+* rule scopes go like this now:
+
+  rule
+  scope {...}
+  scope slist,Symbols;
+  	: ...
+	;
+
+* Created RuleReturnScope as a generic rule return value.  Makes it easier
+  to do this:
+    RuleReturnScope r = parser.program();
+    System.out.println(r.getTemplate().toString());
+
+* $template, $tree, $start, etc...
+
+* $r.x in current rule.  $r is ignored as fully-qualified name. $r.start works too
+
+* added warning about $r referring to both return value of rule and dynamic scope of rule
+
+* integrated StringTemplate in a very simple manner
+
+Syntax:
+-> template(arglist) "..."
+-> template(arglist) <<...>>
+-> namedTemplate(arglist)
+-> {free expression}
+-> // empty
+
+Predicate syntax:
+a : A B -> {p1}? foo(a={$A.text})
+        -> {p2}? foo(a={$B.text})
+        -> // return nothing
+
+An arg list is just a list of template attribute assignments to actions in curlies.
+
+There is a setTemplateLib() method for you to use with named template rewrites.
+
+Use a new option:
+
+grammar t;
+options {output=template;}
+...
+
+This all should work for tree grammars too, but I'm still testing.
+
+* fixed bugs where strings were improperly escaped in exceptions, comments, etc..  For example, newlines came out as newlines not the escaped version
+
+3.0ea6 - November 13, 2005
+
+* turned off -debug/-profile, which was on by default
+
+* completely refactored the output templates; added some missing templates.
+
+* dramatically improved infinite recursion error messages (actually
+  left-recursion never even was printed out before).
+
+* wasn't printing dangling state messages when it reanalyzes with k=1.
+
+* fixed a nasty bug in the analysis engine dealing with infinite recursion.
+  Spent all day thinking about it and cleaned up the code dramatically.
+  Bug fixed and software is more powerful and I understand it better! :)
+
+* improved verbose DFA nodes; organized by alt
+
+* got much better random phrase generation.  For example:
+
+ $ java org.antlr.tool.RandomPhrase simple.g program
+ int Ktcdn ';' method wh '(' ')' '{' return 5 ';' '}'
+
+* empty rules like "a : ;" generated code that didn't compile due to
+  try/catch for RecognitionException.  Generated code couldn't possibly
+  throw that exception.
+
+* when printing out a grammar, such as in comments in generated code,
+  ANTLR didn't print ast suffix stuff back out for literals.
+
+* This never exited loop:
+  DATA : (options {greedy=false;}: .* '\n' )* '\n' '.' ;
+  and now it works due to new default nongreedy .*  Also this works:
+  DATA : (options {greedy=false;}: .* '\n' )* '.' ;
+
+* Dot star ".*" syntax didn't work; in lexer it is nongreedy by
+  default.  In parser it is on greedy but also k=1 by default.  Added
+  unit tests.  Added blog entry to describe.
+
+* ~T where T is the only token yielded an empty set but no error
+
+* Used to generate unreachable message here:
+
+  parser grammar t;
+  a : ID a
+    | ID
+    ;
+
+  z.g:3:11: The following alternatives are unreachable: 2
+
+  In fact it should really be an error; now it generates:
+
+  no start rule in grammar t (no rule can obviously be followed by EOF)
+
+  Per next change item, ANTLR cannot know that EOF follows rule 'a'.
+
+* added error message indicating that ANTLR can't figure out what your
+  start rule is.  Required to properly generate code in some cases.
+
+* validating semantic predicates now work (if they are false, they
+  throw a new FailedPredicateException
+
+* two hideous bug fixes in the IntervalSet, which made analysis go wrong
+  in a few cases.  Thanks to Oliver Zeigermann for finding lots of bugs
+  and making suggested fixes (including the next two items)!
+
+* cyclic DFAs are now nonstatic and hence can access instance variables
+
+* labels are now allowed on lexical elements (in the lexer)
+
+* added some internal debugging options
+
+* ~'a'* and ~('a')* were not working properly; refactored antlr.g grammar
+
+3.0ea5 - July 5, 2005
+
+* Using '\n' in a parser grammar resulted in a nonescaped version of '\n' in the token names table making compilation fail.  I fixed this by reorganizing/cleaning up portion of ANTLR that deals with literals.  See comment org.antlr.codegen.Target.
+
+* Target.getMaxCharValue() did not use the appropriate max value constant.
+
+* ALLCHAR was a constant when it should use the Target max value def.  set complement for wildcard also didn't use the Target def.  Generally cleaned up the max char value stuff.
+
+* Code gen didn't deal with ASTLabelType properly...I think even the 3.0ea7 example tree parser was broken! :(
+
+* Added a few more unit tests dealing with escaped literals
+
+3.0ea4 - June 29, 2005
+
+* tree parsers work; added CommonTreeNodeStream.  See simplecTreeParser
+  example in examples-v3 tarball.
+
+* added superClass and ASTLabelType options
+
+* refactored Parser to have a BaseParser and added TreeParser
+
+* bug fix: actions being dumped in description strings; compile errors
+  resulted
+
+3.0ea3 - June 23, 2005
+
+Enhancements
+
+* Automatic tree construction operators are in: ! ^ ^^
+
+* Tree construction rewrite rules are in
+	-> {pred1}? rewrite1
+	-> {pred2}? rewrite2
+	...
+	-> rewriteN
+
+  The rewrite rules may be elements like ID, expr, $label, {node expr}
+  and trees ^( <root> <children> ).  You have have (...)?, (...)*, (...)+
+  subrules as well.
+
+  You may have rewrites in subrules not just at outer level of rule, but
+  any -> rewrite forces auto AST construction off for that alternative
+  of that rule.
+
+  To avoid cycles, copy semantics are used:
+
+  r : INT -> INT INT ;
+
+  means make two new nodes from the same INT token.
+
+  Repeated references to a rule element implies a copy for at least one
+  tree:
+
+  a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom tree)
+
+* $ruleLabel.tree refers to tree created by matching the labeled element.
+
+* A description of the blocks/alts is generated as a comment in output code
+
+* A timestamp / signature is put at top of each generated code file
+
+3.0ea2 - June 12, 2005
+
+Bug fixes
+
+* Some error messages were missing the stackTrace parameter
+
+* Removed the file locking mechanism as it's not cross platform
+
+* Some absolute vs relative path name problems with writing output
+  files.  Rules are now more concrete.  -o option takes precedence
+  // -o /tmp /var/lib/t.g => /tmp/T.java
+  // -o subdir/output /usr/lib/t.g => subdir/output/T.java
+  // -o . /usr/lib/t.g => ./T.java
+  // -o /tmp subdir/t.g => /tmp/subdir/t.g
+  // If they didn't specify a -o dir so just write to location
+  // where grammar is, absolute or relative
+
+* does error checking on unknown option names now
+
+* Using just language code not locale name for error message file.  I.e.,
+  the default (and for any English speaking locale) is en.stg not en_US.stg
+  anymore.
+
+* The error manager now asks the Tool to panic rather than simply doing
+  a System.exit().
+
+* Lots of refactoring concerning grammar, rule, subrule options.  Now
+  detects invalid options.
+
+3.0ea1 - June 1, 2005
+
+Initial early access release
diff --git a/build.properties b/build.properties
new file mode 100644
index 0000000..6db78e1
--- /dev/null
+++ b/build.properties
@@ -0,0 +1,8 @@
+compile.debug=true
+compile.debuglevel=lines
+compile.deprecation=false
+compile.optimize=false
+
+version=3.0ea9
+
+stringtemplate.jar=/usr/local/lib/stringtemplate-2.3b6.jar
diff --git a/build.xml b/build.xml
new file mode 100644
index 0000000..19b5001
--- /dev/null
+++ b/build.xml
@@ -0,0 +1,227 @@
+<!-- Contributed by Oliver Zeigermann
+     Modified by Jean Bovet
+     Modified by Matt Benson
+     Modified by Miguel Ping
+     Library dependency: install library http://jakarta.apache.org/bcel/ in $ANT_HOME/lib, or other supported 3rd-party lib option
+-->
+
+<project name="antlr3" default="build">
+    <property file="build.properties" />
+
+    <property name="build.dir" location="build" />
+    <property name="lib.dir" location="lib" />
+
+    <property name="build.classes" location="${build.dir}/classes" />
+    <property name="build.rtclasses" location="${build.dir}/rtclasses" />
+    <property name="build.tests" location="${build.dir}/tests" />
+    <property name="build.tests.xml" location="${build.tests}/xml" />
+    <property name="build.tests.reports" location="${build.tests}/reports" />
+    <property name="temp.dir" location="${java.io.tmpdir}/antlr3" />
+    <property name="includetests" value="org/antlr/test/Test*.java" />
+
+    <property name="src.dir" location="src" />
+    <property name="src.rt" location="runtime/Java/src" />
+    <property name="codegen.dir" location="codegen" />
+
+    <property name="tool.class"
+              location="${build.classes}/org/antlr/Tool.class" />
+
+    <property name="compile.debug" value="true" />
+    <property name="compile.debuglevel" value="lines,vars,source" />
+    <property name="compile.deprecation" value="false" />
+    <property name="compile.optimize" value="false" />
+
+    <path id="src.path">
+        <pathelement location="${src.dir}" />
+        <pathelement location="${codegen.dir}" />
+    </path>
+
+    <path id="rt.classpath">
+        <fileset dir="${lib.dir}" includes="**/*.jar" />
+    </path>
+
+    <path id="classpath">
+        <path refid="rt.classpath" />
+        <pathelement location="${build.rtclasses}" />
+    </path>
+
+    <condition property="bcel.available">
+        <available classname="org.apache.bcel.Constants" />
+    </condition>
+
+    <macrodef name="generate">
+      <attribute name="grammar" />
+      <sequential>
+          <antlr target="${codegen.dir}/@{grammar}" />
+      </sequential>
+    </macrodef>
+
+    <presetdef name="myjavac">
+        <javac debug="${compile.debug}"
+               debuglevel="${compile.debuglevel}"
+               deprecation="${compile.deprecation}"
+               optimize="${compile.optimize}"
+               source="1.5" target="jsr14" />
+    </presetdef>
+
+    <target name="clean" description="Deletes all generated files">
+        <delete dir="${build.dir}" />
+        <delete dir="${codegen.dir}" />
+    </target>
+
+    <target name="generator-prepare">
+        <mkdir dir="${codegen.dir}" />
+        <copy todir="${codegen.dir}" preservelastmodified="true">
+            <fileset dir="${src.dir}">
+                <include name="org/antlr/tool/antlr.g" />
+                <include name="org/antlr/tool/antlr.print.g" />
+                <include name="org/antlr/tool/assign.types.g" />
+                <include name="org/antlr/tool/buildnfa.g" />
+                <include name="org/antlr/tool/define.g" />
+                <include name="org/antlr/codegen/codegen.g" />
+            </fileset>
+            <flattenmapper />
+        </copy>
+
+        <!--
+            make sure we rebuild anybody who uses ANTLRTokenTypes
+            by deleting all target files younger than antlr.g:
+          -->
+        <delete>
+            <fileset dir="${codegen.dir}" excludes="*.g">
+                <not>
+                    <depend targetdir="${codegen.dir}">
+                        <mapper type="merge" to="antlr.g" />
+                    </depend>
+                </not>
+            </fileset>
+        </delete>
+    </target>
+
+    <target name="generator" depends="generator-prepare">
+        <generate grammar="antlr.g" />
+        <generate grammar="antlr.print.g" />
+        <generate grammar="assign.types.g" />
+        <generate grammar="buildnfa.g" />
+        <generate grammar="define.g" />
+        <generate grammar="codegen.g" />
+    </target>
+
+    <target name="compile-rt">
+        <mkdir dir="${build.rtclasses}" />
+        <myjavac srcdir="${src.rt}" destdir="${build.rtclasses}"
+                 classpathref="rt.classpath" />
+    </target>
+
+    <target name="compile" depends="generator,compile-rt">
+        <mkdir dir="${build.classes}" />
+        <myjavac destdir="${build.classes}" classpathref="classpath">
+            <src refid="src.path" />
+        </myjavac>
+    </target>
+
+    <target name="templates">
+        <copy todir="${build.classes}">
+            <fileset dir="${src.dir}" includes="**/*.stg,**/*.st,**/*.sti" />
+        </copy>
+    </target>
+
+    <target name="jarnames" depends="version">
+        <property name="antlr3.jar"
+                  location="${build.dir}/antlr${jar.version}.jar" />
+        <property name="antlr3.rt.jar"
+                  location="${build.dir}/antlr${jar.version}-runtime.jar" />
+    </target>
+
+    <target name="build-rt" depends="compile-rt,jarnames"
+            description="Creates the ANTLR3 runtime jar">
+        <jar jarfile="${antlr3.rt.jar}" index="true" filesonly="true">
+            <fileset file="LICENSE.txt" />
+            <fileset dir="${build.rtclasses}" />
+            <manifest>
+                <attribute name="Version" value="${version}" />
+            </manifest>
+        </jar>
+    </target>
+
+    <target name="build" depends="compile,templates,jarnames"
+            description="Creates the ANTLR3 fullversion jar">
+        <jar jarfile="${antlr3.jar}" index="true" filesonly="true">
+            <fileset file="LICENSE.txt" />
+            <fileset dir="${build.classes}" excludes="org/antlr/test/**" />
+            <fileset dir="${build.rtclasses}" />
+            <manifest>
+                <attribute name="Version" value="${version}" />
+            </manifest>
+        </jar>
+    </target>
+
+    <target name="build-all" depends="build,build-rt"
+            description="Creates the ANTLR3 fullversion and runtime jars" />
+
+    <target name="version" depends="version-bcel" unless="bcel.available">
+        <echo>Install bcel in the classpath to have automatic version in jar name</echo>
+        <property name="version" value="unknown 3.x build" />
+        <property name="jar.version" value="" />
+    </target>
+
+    <target name="version-bcel" if="bcel.available">
+
+        <condition property="enc" value="ISO-8859-1" else="${file.encoding}">
+            <!-- ironically, the AntVersion condition would tell us what
+                 we need to know, so we use its absence to indicate
+                 an Ant version prior to 1.7.0: -->
+            <available classname="org.apache.tools.ant.taskdefs.condition.AntVersion" />
+        </condition>
+
+        <loadproperties srcfile="${tool.class}" encoding="${enc}">
+            <filterchain>
+                <classconstants/>
+                <prefixlines prefix="Tool." />
+            </filterchain>
+        </loadproperties>
+
+        <property name="version" value="${Tool.VERSION}" />
+        <property name="jar.version" value="-${version}" />
+    </target>
+
+    <target name="rebuild" depends="clean,build" />
+    <target name="rebuild-rt" depends="clean,build-rt" />
+    <target name="rebuild-all" depends="clean,build-all" />
+
+    <target name="run-tests" depends="compile">
+        <mkdir dir="${build.tests.xml}" />
+        <mkdir dir="${temp.dir}" />
+        <delete>
+            <fileset dir="${build.tests.xml}" />
+        </delete>
+        <junit printsummary="withOutAndErr" showoutput="true"
+               fork="true" forkmode="once" failureproperty="testfailure"
+               tempdir="${temp.dir}">
+            <formatter type="xml" />
+            <formatter type="plain" />
+            <classpath>
+                <pathelement path="${build.classes}" />
+                <path refid="classpath" />
+                <pathelement path="${antlr2.jar}" />
+            </classpath>
+            <test if="testcase" name="${testcase}" todir="${build.tests.xml}" />
+            <batchtest todir="${build.tests.xml}" unless="testcase">
+                <fileset dir="${src.dir}" includes="${includetests}" />
+            </batchtest>
+        </junit>
+    </target>
+
+    <target name="run-reports">
+        <mkdir dir="${build.tests.reports}" />
+        <junitreport todir="${build.tests.reports}">
+            <fileset dir="${build.tests.xml}" includes="TEST-*.xml" />
+            <report format="frames" todir="${build.tests.reports}" />
+        </junitreport>
+    </target>
+
+    <target name="test" description="Run tests" depends="run-tests,run-reports">
+        <fail if="testfailure">Tests failed</fail>
+    </target>
+
+</project>
diff --git a/runtime/Java/doxyfile b/runtime/Java/doxyfile
new file mode 100644
index 0000000..7237472
--- /dev/null
+++ b/runtime/Java/doxyfile
@@ -0,0 +1,264 @@
+# Doxyfile 1.5.2
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+DOXYFILE_ENCODING      = UTF-8
+PROJECT_NAME           = "ANTLR API"
+PROJECT_NUMBER         = 3.0
+OUTPUT_DIRECTORY       = api
+CREATE_SUBDIRS         = NO
+OUTPUT_LANGUAGE        = English
+BRIEF_MEMBER_DESC      = YES
+REPEAT_BRIEF           = YES
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+ALWAYS_DETAILED_SEC    = YES
+INLINE_INHERITED_MEMB  = NO
+FULL_PATH_NAMES        = YES
+STRIP_FROM_PATH        = /Applications/
+STRIP_FROM_INC_PATH    = 
+SHORT_NAMES            = NO
+JAVADOC_AUTOBRIEF      = NO
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP         = NO
+INHERIT_DOCS           = YES
+SEPARATE_MEMBER_PAGES  = NO
+TAB_SIZE               = 8
+ALIASES                = 
+OPTIMIZE_OUTPUT_FOR_C  = NO
+OPTIMIZE_OUTPUT_JAVA   = YES
+BUILTIN_STL_SUPPORT    = NO
+CPP_CLI_SUPPORT        = NO
+DISTRIBUTE_GROUP_DOC   = NO
+SUBGROUPING            = YES
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL            = YES
+EXTRACT_PRIVATE        = YES
+EXTRACT_STATIC         = YES
+EXTRACT_LOCAL_CLASSES  = YES
+EXTRACT_LOCAL_METHODS  = NO
+HIDE_UNDOC_MEMBERS     = NO
+HIDE_UNDOC_CLASSES     = NO
+HIDE_FRIEND_COMPOUNDS  = NO
+HIDE_IN_BODY_DOCS      = NO
+INTERNAL_DOCS          = NO
+CASE_SENSE_NAMES       = NO
+HIDE_SCOPE_NAMES       = NO
+SHOW_INCLUDE_FILES     = YES
+INLINE_INFO            = YES
+SORT_MEMBER_DOCS       = YES
+SORT_BRIEF_DOCS        = NO
+SORT_BY_SCOPE_NAME     = NO
+GENERATE_TODOLIST      = YES
+GENERATE_TESTLIST      = NO
+GENERATE_BUGLIST       = NO
+GENERATE_DEPRECATEDLIST= NO
+ENABLED_SECTIONS       = 
+MAX_INITIALIZER_LINES  = 30
+SHOW_USED_FILES        = YES
+SHOW_DIRECTORIES       = NO
+FILE_VERSION_FILTER    = 
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET                  = NO
+WARNINGS               = YES
+WARN_IF_UNDOCUMENTED   = YES
+WARN_IF_DOC_ERROR      = YES
+WARN_NO_PARAMDOC       = NO
+WARN_FORMAT            = "$file:$line: $text"
+WARN_LOGFILE           = 
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT                  = /Users/parrt/antlr/code/antlr/main/runtime/Java/src
+INPUT_ENCODING         = UTF-8
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.d \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.idl \
+                         *.odl \
+                         *.cs \
+                         *.php \
+                         *.php3 \
+                         *.inc \
+                         *.m \
+                         *.mm \
+                         *.dox \
+                         *.py
+RECURSIVE              = YES
+EXCLUDE                = 
+EXCLUDE_SYMLINKS       = NO
+EXCLUDE_PATTERNS       = 
+EXCLUDE_SYMBOLS        = java::util \
+                         java::io
+EXAMPLE_PATH           = 
+EXAMPLE_PATTERNS       = *
+EXAMPLE_RECURSIVE      = NO
+IMAGE_PATH             = 
+INPUT_FILTER           = 
+FILTER_PATTERNS        = 
+FILTER_SOURCE_FILES    = NO
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER         = YES
+INLINE_SOURCES         = NO
+STRIP_CODE_COMMENTS    = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION    = NO
+REFERENCES_LINK_SOURCE = YES
+USE_HTAGS              = NO
+VERBATIM_HEADERS       = YES
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX     = NO
+COLS_IN_ALPHA_INDEX    = 5
+IGNORE_PREFIX          = 
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML          = YES
+HTML_OUTPUT            = .
+HTML_FILE_EXTENSION    = .html
+HTML_HEADER            = 
+HTML_FOOTER            = 
+HTML_STYLESHEET        = 
+HTML_ALIGN_MEMBERS     = YES
+GENERATE_HTMLHELP      = NO
+CHM_FILE               = 
+HHC_LOCATION           = 
+GENERATE_CHI           = NO
+BINARY_TOC             = NO
+TOC_EXPAND             = NO
+DISABLE_INDEX          = NO
+ENUM_VALUES_PER_LINE   = 4
+GENERATE_TREEVIEW      = NO
+TREEVIEW_WIDTH         = 250
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX         = NO
+LATEX_OUTPUT           = latex
+LATEX_CMD_NAME         = latex
+MAKEINDEX_CMD_NAME     = makeindex
+COMPACT_LATEX          = NO
+PAPER_TYPE             = a4wide
+EXTRA_PACKAGES         = 
+LATEX_HEADER           = 
+PDF_HYPERLINKS         = NO
+USE_PDFLATEX           = YES
+LATEX_BATCHMODE        = NO
+LATEX_HIDE_INDICES     = NO
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF           = NO
+RTF_OUTPUT             = rtf
+COMPACT_RTF            = NO
+RTF_HYPERLINKS         = NO
+RTF_STYLESHEET_FILE    = 
+RTF_EXTENSIONS_FILE    = 
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN           = NO
+MAN_OUTPUT             = man
+MAN_EXTENSION          = .3
+MAN_LINKS              = NO
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML           = NO
+XML_OUTPUT             = xml
+XML_SCHEMA             = 
+XML_DTD                = 
+XML_PROGRAMLISTING     = YES
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF   = NO
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD       = NO
+PERLMOD_LATEX          = NO
+PERLMOD_PRETTY         = YES
+PERLMOD_MAKEVAR_PREFIX = 
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING   = YES
+MACRO_EXPANSION        = NO
+EXPAND_ONLY_PREDEF     = NO
+SEARCH_INCLUDES        = YES
+INCLUDE_PATH           = 
+INCLUDE_FILE_PATTERNS  = 
+PREDEFINED             = 
+EXPAND_AS_DEFINED      = 
+SKIP_FUNCTION_MACROS   = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+TAGFILES               = 
+GENERATE_TAGFILE       = 
+ALLEXTERNALS           = NO
+EXTERNAL_GROUPS        = YES
+PERL_PATH              = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS         = NO
+MSCGEN_PATH            = /Applications/Doxygen.app/Contents/Resources/
+HIDE_UNDOC_RELATIONS   = YES
+HAVE_DOT               = YES
+CLASS_GRAPH            = YES
+COLLABORATION_GRAPH    = YES
+GROUP_GRAPHS           = YES
+UML_LOOK               = NO
+TEMPLATE_RELATIONS     = NO
+INCLUDE_GRAPH          = YES
+INCLUDED_BY_GRAPH      = YES
+CALL_GRAPH             = NO
+CALLER_GRAPH           = NO
+GRAPHICAL_HIERARCHY    = YES
+DIRECTORY_GRAPH        = YES
+DOT_IMAGE_FORMAT       = png
+DOT_PATH               = /Applications/Doxygen.app/Contents/Resources/
+DOTFILE_DIRS           = 
+DOT_GRAPH_MAX_NODES    = 50
+DOT_TRANSPARENT        = NO
+DOT_MULTI_TARGETS      = NO
+GENERATE_LEGEND        = YES
+DOT_CLEANUP            = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+SEARCHENGINE           = NO
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java b/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java
new file mode 100644
index 0000000..c06c511
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java
@@ -0,0 +1,78 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import java.io.*;
+
+/** This is a char buffer stream that is loaded from a file
+ *  all at once when you construct the object.  This looks very
+ *  much like an ANTLReader or ANTLRInputStream, but it's a special case
+ *  since we know the exact size of the object to load.  We can avoid lots
+ *  of data copying. 
+ */
+public class ANTLRFileStream extends ANTLRStringStream {
+	protected String fileName;
+
+	public ANTLRFileStream(String fileName) throws IOException {
+		this(fileName, null);
+	}
+
+	public ANTLRFileStream(String fileName, String encoding) throws IOException {
+		this.fileName = fileName;
+		load(fileName, encoding);
+	}
+
+	public void load(String fileName, String encoding)
+		throws IOException
+	{
+		if ( fileName==null ) {
+			return;
+		}
+		File f = new File(fileName);
+		int size = (int)f.length();
+		InputStreamReader isr;
+		FileInputStream fis = new FileInputStream(fileName);
+		if ( encoding!=null ) {
+			isr = new InputStreamReader(fis, encoding);
+		}
+		else {
+			isr = new InputStreamReader(fis);
+		}
+		try {
+			data = new char[size];
+			super.n = isr.read(data);
+		}
+		finally {
+			isr.close();
+		}
+	}
+
+	public String getSourceName() {
+		return fileName;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRInputStream.java b/runtime/Java/src/org/antlr/runtime/ANTLRInputStream.java
new file mode 100644
index 0000000..957f6a4
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/ANTLRInputStream.java
@@ -0,0 +1,43 @@
+package org.antlr.runtime;
+
+import java.io.*;
+
+/** A kind of ReaderStream that pulls from an InputStream.
+ *  Useful for reading from stdin and specifying file encodings etc...
+  */
+public class ANTLRInputStream extends ANTLRReaderStream {
+	public ANTLRInputStream() {
+	}
+
+	public ANTLRInputStream(InputStream input) throws IOException {
+		this(input, null);
+	}
+
+	public ANTLRInputStream(InputStream input, int size) throws IOException {
+		this(input, size, null);
+	}
+
+	public ANTLRInputStream(InputStream input, String encoding) throws IOException {
+		this(input, INITIAL_BUFFER_SIZE, encoding);
+	}
+
+	public ANTLRInputStream(InputStream input, int size, String encoding) throws IOException {
+		this(input, size, READ_BUFFER_SIZE, encoding);
+	}
+
+	public ANTLRInputStream(InputStream input,
+							int size,
+							int readBufferSize,
+							String encoding)
+		throws IOException
+	{
+		InputStreamReader isr;
+		if ( encoding!=null ) {
+			isr = new InputStreamReader(input, encoding);
+		}
+		else {
+			isr = new InputStreamReader(input);
+		}
+		load(isr, size, readBufferSize);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRReaderStream.java b/runtime/Java/src/org/antlr/runtime/ANTLRReaderStream.java
new file mode 100644
index 0000000..d9aa85b
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/ANTLRReaderStream.java
@@ -0,0 +1,68 @@
+package org.antlr.runtime;
+
+import java.io.*;
+
+/** Vacuum all input from a Reader and then treat it like a StringStream.
+ *  Manage the buffer manually to avoid unnecessary data copying.
+ *
+ *  If you need encoding, use ANTLRInputStream.
+ */
+public class ANTLRReaderStream extends ANTLRStringStream {
+	public static final int READ_BUFFER_SIZE = 1024;
+	public static final int INITIAL_BUFFER_SIZE = 1024;
+
+	public ANTLRReaderStream() {
+	}
+
+	public ANTLRReaderStream(Reader r) throws IOException {
+		this(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE);
+	}
+
+	public ANTLRReaderStream(Reader r, int size) throws IOException {
+		this(r, size, READ_BUFFER_SIZE);
+	}
+
+	public ANTLRReaderStream(Reader r, int size, int readChunkSize) throws IOException {
+		load(r, size, readChunkSize);
+	}
+
+	public void load(Reader r, int size, int readChunkSize)
+		throws IOException
+	{
+		if ( r==null ) {
+			return;
+		}
+		if ( size<=0 ) {
+			size = INITIAL_BUFFER_SIZE;
+		}
+		if ( readChunkSize<=0 ) {
+			size = READ_BUFFER_SIZE;
+		}
+		// System.out.println("load "+size+" in chunks of "+readChunkSize);
+		try {
+			// alloc initial buffer size.
+			data = new char[size];
+			// read all the data in chunks of readChunkSize
+			int numRead=0;
+			int p = 0;
+			do {
+				if ( p+readChunkSize > data.length ) { // overflow?
+					// System.out.println("### overflow p="+p+", data.length="+data.length);
+					char[] newdata = new char[data.length*2]; // resize
+					System.arraycopy(data, 0, newdata, 0, data.length);
+					data = newdata;
+				}
+				numRead = r.read(data, p, readChunkSize);
+				// System.out.println("read "+numRead+" chars; p was "+p+" is now "+(p+numRead));
+				p += numRead;
+			} while (numRead!=-1); // while not EOF
+			// set the actual size of the data available;
+			// EOF subtracted one above in p+=numRead; add one back
+			super.n = p+1;
+			//System.out.println("n="+n);
+		}
+		finally {
+			r.close();
+		}
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRStringStream.java b/runtime/Java/src/org/antlr/runtime/ANTLRStringStream.java
new file mode 100644
index 0000000..2d9fd62
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/ANTLRStringStream.java
@@ -0,0 +1,221 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A pretty quick CharStream that pulls all data from an array
+ *  directly.  Every method call counts in the lexer.  Java's
+ *  strings aren't very good so I'm avoiding.
+ */
+public class ANTLRStringStream implements CharStream {
+	/** The data being scanned */
+	protected char[] data;
+
+	/** How many characters are actually in the buffer */
+	protected int n;
+
+	/** 0..n-1 index into string of next char */
+	protected int p=0;
+
+	/** line number 1..n within the input */
+	protected int line = 1;
+
+	/** The index of the character relative to the beginning of the line 0..n-1 */
+	protected int charPositionInLine = 0;
+
+	/** tracks how deep mark() calls are nested */
+	protected int markDepth = 0;
+
+	/** A list of CharStreamState objects that tracks the stream state
+	 *  values line, charPositionInLine, and p that can change as you
+	 *  move through the input stream.  Indexed from 1..markDepth.
+     *  A null is kept @ index 0.  Create upon first call to mark().
+	 */
+	protected List markers;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	public ANTLRStringStream() {
+	}
+
+	/** Copy data in string to a local char array */
+	public ANTLRStringStream(String input) {
+		this();
+		this.data = input.toCharArray();
+		this.n = input.length();
+	}
+
+	/** This is the preferred constructor as no data is copied */
+	public ANTLRStringStream(char[] data, int numberOfActualCharsInArray) {
+		this();
+		this.data = data;
+		this.n = numberOfActualCharsInArray;
+	}
+
+	/** Reset the stream so that it's in the same state it was
+	 *  when the object was created *except* the data array is not
+	 *  touched.
+	 */
+	public void reset() {
+		p = 0;
+		line = 1;
+		charPositionInLine = 0;
+		markDepth = 0;
+	}
+
+    public void consume() {
+		//System.out.println("prev p="+p+", c="+(char)data[p]);
+        if ( p < n ) {
+			charPositionInLine++;
+			if ( data[p]=='\n' ) {
+				/*
+				System.out.println("newline char found on line: "+line+
+								   "@ pos="+charPositionInLine);
+				*/
+				line++;
+				charPositionInLine=0;
+			}
+            p++;
+			//System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
+        }
+    }
+
+    public int LA(int i) {
+		if ( i==0 ) {
+			return 0; // undefined
+		}
+		if ( i<0 ) {
+			i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+			if ( (p+i-1) < 0 ) {
+				return CharStream.EOF; // invalid; no char before first char
+			}
+		}
+
+		if ( (p+i-1) >= n ) {
+            //System.out.println("char LA("+i+")=EOF; p="+p);
+            return CharStream.EOF;
+        }
+        //System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
+		//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
+		return data[p+i-1];
+    }
+
+	public int LT(int i) {
+		return LA(i);
+	}
+
+	/** Return the current input symbol index 0..n where n indicates the
+     *  last symbol has been read.  The index is the index of char to
+	 *  be returned from LA(1).
+     */
+    public int index() {
+        return p;
+    }
+
+	public int size() {
+		return n;
+	}
+
+	public int mark() {
+        if ( markers==null ) {
+            markers = new ArrayList();
+            markers.add(null); // depth 0 means no backtracking, leave blank
+        }
+        markDepth++;
+		CharStreamState state = null;
+		if ( markDepth>=markers.size() ) {
+			state = new CharStreamState();
+			markers.add(state);
+		}
+		else {
+			state = (CharStreamState)markers.get(markDepth);
+		}
+		state.p = p;
+		state.line = line;
+		state.charPositionInLine = charPositionInLine;
+		lastMarker = markDepth;
+		return markDepth;
+    }
+
+    public void rewind(int m) {
+		CharStreamState state = (CharStreamState)markers.get(m);
+		// restore stream state
+		seek(state.p);
+		line = state.line;
+		charPositionInLine = state.charPositionInLine;
+		release(m);
+	}
+
+	public void rewind() {
+		rewind(lastMarker);
+	}
+
+	public void release(int marker) {
+		// unwind any other markers made after m and release m
+		markDepth = marker;
+		// release this marker
+		markDepth--;
+	}
+
+	/** consume() ahead until p==index; can't just set p=index as we must
+	 *  update line and charPositionInLine.
+	 */
+	public void seek(int index) {
+		if ( index<=p ) {
+			p = index; // just jump; don't update stream state (line, ...)
+			return;
+		}
+		// seek forward, consume until p hits index
+		while ( p<index ) {
+			consume();
+		}
+	}
+
+	public String substring(int start, int stop) {
+		return new String(data,start,stop-start+1);
+	}
+
+	public int getLine() {
+		return line;
+	}
+
+	public int getCharPositionInLine() {
+		return charPositionInLine;
+	}
+
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	public void setCharPositionInLine(int pos) {
+		this.charPositionInLine = pos;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/BaseRecognizer.java b/runtime/Java/src/org/antlr/runtime/BaseRecognizer.java
new file mode 100644
index 0000000..3d98b03
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/BaseRecognizer.java
@@ -0,0 +1,831 @@
+package org.antlr.runtime;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** A generic recognizer that can handle recognizers generated from
+ *  lexer, parser, and tree grammars.  This is all the parsing
+ *  support code essentially; most of it is error recovery stuff and
+ *  backtracking.
+ */
+public abstract class BaseRecognizer {
+	public static final int MEMO_RULE_FAILED = -2;
+	public static final int MEMO_RULE_UNKNOWN = -1;
+	public static final int INITIAL_FOLLOW_STACK_SIZE = 100;
+
+	public static final Integer MEMO_RULE_FAILED_I = new Integer(MEMO_RULE_FAILED);
+
+	// copies from Token object for convenience in actions
+	public static final int DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
+	public static final int HIDDEN = Token.HIDDEN_CHANNEL;
+
+	public static final String NEXT_TOKEN_RULE_NAME = "nextToken";
+
+	/** Track the set of token types that can follow any rule invocation.
+	 *  Stack grows upwards.  When it hits the max, it grows 2x in size
+	 *  and keeps going.
+	 */
+	protected BitSet[] following = new BitSet[INITIAL_FOLLOW_STACK_SIZE];
+	protected int _fsp = -1;
+
+	/** This is true when we see an error and before having successfully
+	 *  matched a token.  Prevents generation of more than one error message
+	 *  per error.
+	 */
+	protected boolean errorRecovery = false;
+
+	/** The index into the input stream where the last error occurred.
+	 * 	This is used to prevent infinite loops where an error is found
+	 *  but no token is consumed during recovery...another error is found,
+	 *  ad naseum.  This is a failsafe mechanism to guarantee that at least
+	 *  one token/tree node is consumed for two errors.
+	 */
+	protected int lastErrorIndex = -1;
+
+	/** In lieu of a return value, this indicates that a rule or token
+	 *  has failed to match.  Reset to false upon valid token match.
+	 */
+	protected boolean failed = false;
+
+	/** If 0, no backtracking is going on.  Safe to exec actions etc...
+	 *  If >0 then it's the level of backtracking.
+	 */
+	protected int backtracking = 0;
+
+	/** An array[size num rules] of Map<Integer,Integer> that tracks
+	 *  the stop token index for each rule.  ruleMemo[ruleIndex] is
+	 *  the memoization table for ruleIndex.  For key ruleStartIndex, you
+	 *  get back the stop token for associated rule or MEMO_RULE_FAILED.
+	 *
+	 *  This is only used if rule memoization is on (which it is by default).
+	 */
+	protected Map[] ruleMemo;
+
+	/** reset the parser's state; subclasses must rewinds the input stream */
+	public void reset() {
+		// wack everything related to error recovery
+		_fsp = -1;
+		errorRecovery = false;
+		lastErrorIndex = -1;
+		failed = false;
+		// wack everything related to backtracking and memoization
+		backtracking = 0;
+		for (int i = 0; ruleMemo!=null && i < ruleMemo.length; i++) { // wipe cache
+			ruleMemo[i] = null;
+		}
+	}
+
+	/** Match current input symbol against ttype.  Upon error, do one token
+	 *  insertion or deletion if possible.  You can override to not recover
+	 *  here and bail out of the current production to the normal error
+	 *  exception catch (at the end of the method) by just throwing
+	 *  MismatchedTokenException upon input.LA(1)!=ttype.
+	 */
+	public void match(IntStream input, int ttype, BitSet follow)
+		throws RecognitionException
+	{
+		if ( input.LA(1)==ttype ) {
+			input.consume();
+			errorRecovery = false;
+			failed = false;
+			return;
+		}
+		if ( backtracking>0 ) {
+			failed = true;
+			return;
+		}
+		mismatch(input, ttype, follow);
+		return;
+	}
+
+	public void matchAny(IntStream input) {
+		errorRecovery = false;
+		failed = false;
+		input.consume();
+	}
+
+	/** factor out what to do upon token mismatch so tree parsers can behave
+	 *  differently.  Override this method in your parser to do things
+	 *  like bailing out after the first error; just throw the mte object
+	 *  instead of calling the recovery method.
+	 */
+	protected void mismatch(IntStream input, int ttype, BitSet follow)
+		throws RecognitionException
+	{
+		MismatchedTokenException mte =
+			new MismatchedTokenException(ttype, input);
+		recoverFromMismatchedToken(input, mte, ttype, follow);
+	}
+
+	/** Report a recognition problem.
+	 *
+	 *  This method sets errorRecovery to indicate the parser is recovering
+	 *  not parsing.  Once in recovery mode, no errors are generated.
+	 *  To get out of recovery mode, the parser must successfully match
+	 *  a token (after a resync).  So it will go:
+	 *
+	 * 		1. error occurs
+	 * 		2. enter recovery mode, report error
+	 * 		3. consume until token found in resynch set
+	 * 		4. try to resume parsing
+	 * 		5. next match() will reset errorRecovery mode
+	 */
+	public void reportError(RecognitionException e) {
+		// if we've already reported an error and have not matched a token
+		// yet successfully, don't report any errors.
+		if ( errorRecovery ) {
+			//System.err.print("[SPURIOUS] ");
+			return;
+		}
+		errorRecovery = true;
+
+		displayRecognitionError(this.getTokenNames(), e);
+	}
+
+	public void displayRecognitionError(String[] tokenNames,
+										RecognitionException e)
+	{
+		String hdr = getErrorHeader(e);
+		String msg = getErrorMessage(e, tokenNames);
+		emitErrorMessage(hdr+" "+msg);
+	}
+
+	/** What error message should be generated for the various
+	 *  exception types?
+	 *
+	 *  Not very object-oriented code, but I like having all error message
+	 *  generation within one method rather than spread among all of the
+	 *  exception classes. This also makes it much easier for the exception
+	 *  handling because the exception classes do not have to have pointers back
+	 *  to this object to access utility routines and so on. Also, changing
+	 *  the message for an exception type would be difficult because you
+	 *  would have to subclassing exception, but then somehow get ANTLR
+	 *  to make those kinds of exception objects instead of the default.
+	 *  This looks weird, but trust me--it makes the most sense in terms
+	 *  of flexibility.
+	 *
+	 *  For grammar debugging, you will want to override this to add
+	 *  more information such as the stack frame with
+	 *  getRuleInvocationStack(e, this.getClass().getName()) and,
+	 *  for no viable alts, the decision description and state etc...
+	 *
+	 *  Override this to change the message generated for one or more
+	 *  exception types.
+	 */
+	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
+		String msg = null;
+		if ( e instanceof MismatchedTokenException ) {
+			MismatchedTokenException mte = (MismatchedTokenException)e;
+			String tokenName="<unknown>";
+			if ( mte.expecting== Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[mte.expecting];
+			}
+			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
+				" expecting "+tokenName;
+		}
+		else if ( e instanceof MismatchedTreeNodeException ) {
+			MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
+			String tokenName="<unknown>";
+			if ( mtne.expecting==Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[mtne.expecting];
+			}
+			msg = "mismatched tree node: "+mtne.node+
+				" expecting "+tokenName;
+		}
+		else if ( e instanceof NoViableAltException ) {
+			NoViableAltException nvae = (NoViableAltException)e;
+			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+			// and "(decision="+nvae.decisionNumber+") and
+			// "state "+nvae.stateNumber
+			msg = "no viable alternative at input "+getTokenErrorDisplay(e.token);
+		}
+		else if ( e instanceof EarlyExitException ) {
+			EarlyExitException eee = (EarlyExitException)e;
+			// for development, can add "(decision="+eee.decisionNumber+")"
+			msg = "required (...)+ loop did not match anything at input "+
+				getTokenErrorDisplay(e.token);
+		}
+		else if ( e instanceof MismatchedSetException ) {
+			MismatchedSetException mse = (MismatchedSetException)e;
+			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
+				" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof MismatchedNotSetException ) {
+			MismatchedNotSetException mse = (MismatchedNotSetException)e;
+			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
+				" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof FailedPredicateException ) {
+			FailedPredicateException fpe = (FailedPredicateException)e;
+			msg = "rule "+fpe.ruleName+" failed predicate: {"+
+				fpe.predicateText+"}?";
+		}
+		return msg;
+	}
+
+	/** What is the error header, normally line/character position information? */
+	public String getErrorHeader(RecognitionException e) {
+		return "line "+e.line+":"+e.charPositionInLine;
+	}
+
+	/** How should a token be displayed in an error message? The default
+	 *  is to display just the text, but during development you might
+	 *  want to have a lot of information spit out.  Override in that case
+	 *  to use t.toString() (which, for CommonToken, dumps everything about
+	 *  the token). This is better than forcing you to override a method in
+	 *  your token objects because you don't have to go modify your lexer
+	 *  so that it creates a new Java type.
+	 */
+	public String getTokenErrorDisplay(Token t) {
+		String s = t.getText();
+		if ( s==null ) {
+			if ( t.getType()==Token.EOF ) {
+				s = "<EOF>";
+			}
+			else {
+				s = "<"+t.getType()+">";
+			}
+		}
+		s = s.replaceAll("\n","\\\\n");
+		s = s.replaceAll("\r","\\\\r");
+		s = s.replaceAll("\t","\\\\t");
+		return "'"+s+"'";
+	}
+
+	/** Override this method to change where error messages go */
+	public void emitErrorMessage(String msg) {
+		System.err.println(msg);
+	}
+
+	/** Recover from an error found on the input stream.  Mostly this is
+	 *  NoViableAlt exceptions, but could be a mismatched token that
+	 *  the match() routine could not recover from.
+	 */
+	public void recover(IntStream input, RecognitionException re) {
+		if ( lastErrorIndex==input.index() ) {
+			// uh oh, another error at same token index; must be a case
+			// where LT(1) is in the recovery token set so nothing is
+			// consumed; consume a single token so at least to prevent
+			// an infinite loop; this is a failsafe.
+			input.consume();
+		}
+		lastErrorIndex = input.index();
+		BitSet followSet = computeErrorRecoverySet();
+		beginResync();
+		consumeUntil(input, followSet);
+		endResync();
+	}
+
+	/** A hook to listen in on the token consumption during error recovery.
+	 *  The DebugParser subclasses this to fire events to the listenter.
+	 */
+	public void beginResync() {
+	}
+
+	public void endResync() {
+	}
+
+	/*  Compute the error recovery set for the current rule.  During
+	 *  rule invocation, the parser pushes the set of tokens that can
+	 *  follow that rule reference on the stack; this amounts to
+	 *  computing FIRST of what follows the rule reference in the
+	 *  enclosing rule. This local follow set only includes tokens
+	 *  from within the rule; i.e., the FIRST computation done by
+	 *  ANTLR stops at the end of a rule.
+	 *
+	 *  EXAMPLE
+	 *
+	 *  When you find a "no viable alt exception", the input is not
+	 *  consistent with any of the alternatives for rule r.  The best
+	 *  thing to do is to consume tokens until you see something that
+	 *  can legally follow a call to r *or* any rule that called r.
+	 *  You don't want the exact set of viable next tokens because the
+	 *  input might just be missing a token--you might consume the
+	 *  rest of the input looking for one of the missing tokens.
+	 *
+	 *  Consider grammar:
+	 *
+	 *  a : '[' b ']'
+	 *    | '(' b ')'
+	 *    ;
+	 *  b : c '^' INT ;
+	 *  c : ID
+	 *    | INT
+	 *    ;
+	 *
+	 *  At each rule invocation, the set of tokens that could follow
+	 *  that rule is pushed on a stack.  Here are the various "local"
+	 *  follow sets:
+	 *
+	 *  FOLLOW(b1_in_a) = FIRST(']') = ']'
+	 *  FOLLOW(b2_in_a) = FIRST(')') = ')'
+	 *  FOLLOW(c_in_b) = FIRST('^') = '^'
+	 *
+	 *  Upon erroneous input "[]", the call chain is
+	 *
+	 *  a -> b -> c
+	 *
+	 *  and, hence, the follow context stack is:
+	 *
+	 *  depth  local follow set     after call to rule
+	 *    0         <EOF>                    a (from main())
+	 *    1          ']'                     b
+	 *    3          '^'                     c
+	 *
+	 *  Notice that ')' is not included, because b would have to have
+	 *  been called from a different context in rule a for ')' to be
+	 *  included.
+	 *
+	 *  For error recovery, we cannot consider FOLLOW(c)
+	 *  (context-sensitive or otherwise).  We need the combined set of
+	 *  all context-sensitive FOLLOW sets--the set of all tokens that
+	 *  could follow any reference in the call chain.  We need to
+	 *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+	 *  we resync'd to that token, we'd consume until EOF.  We need to
+	 *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+	 *  In this case, for input "[]", LA(1) is in this set so we would
+	 *  not consume anything and after printing an error rule c would
+	 *  return normally.  It would not find the required '^' though.
+	 *  At this point, it gets a mismatched token error and throws an
+	 *  exception (since LA(1) is not in the viable following token
+	 *  set).  The rule exception handler tries to recover, but finds
+	 *  the same recovery set and doesn't consume anything.  Rule b
+	 *  exits normally returning to rule a.  Now it finds the ']' (and
+	 *  with the successful match exits errorRecovery mode).
+	 *
+	 *  So, you cna see that the parser walks up call chain looking
+	 *  for the token that was a member of the recovery set.
+	 *
+	 *  Errors are not generated in errorRecovery mode.
+	 *
+	 *  ANTLR's error recovery mechanism is based upon original ideas:
+	 *
+	 *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
+	 *
+	 *  and
+	 *
+	 *  "A note on error recovery in recursive descent parsers":
+	 *  http://portal.acm.org/citation.cfm?id=947902.947905
+	 *
+	 *  Later, Josef Grosch had some good ideas:
+	 *
+	 *  "Efficient and Comfortable Error Recovery in Recursive Descent
+	 *  Parsers":
+	 *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+	 *
+	 *  Like Grosch I implemented local FOLLOW sets that are combined
+	 *  at run-time upon error to avoid overhead during parsing.
+	 */
+	protected BitSet computeErrorRecoverySet() {
+		return combineFollows(false);
+	}
+
+	/** Compute the context-sensitive FOLLOW set for current rule.
+	 *  This is set of token types that can follow a specific rule
+	 *  reference given a specific call chain.  You get the set of
+	 *  viable tokens that can possibly come next (lookahead depth 1)
+	 *  given the current call chain.  Contrast this with the
+	 *  definition of plain FOLLOW for rule r:
+	 *
+	 *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+	 *
+	 *  where x in T* and alpha, beta in V*; T is set of terminals and
+	 *  V is the set of terminals and nonterminals.  In other words,
+	 *  FOLLOW(r) is the set of all tokens that can possibly follow
+	 *  references to r in *any* sentential form (context).  At
+	 *  runtime, however, we know precisely which context applies as
+	 *  we have the call chain.  We may compute the exact (rather
+	 *  than covering superset) set of following tokens.
+	 *
+	 *  For example, consider grammar:
+	 *
+	 *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+	 *       | "return" expr '.'
+	 *       ;
+	 *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+	 *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+	 *       | '(' expr ')'
+	 *       ;
+	 *
+	 *  The FOLLOW sets are all inclusive whereas context-sensitive
+	 *  FOLLOW sets are precisely what could follow a rule reference.
+	 *  For input input "i=(3);", here is the derivation:
+	 *
+	 *  stat => ID '=' expr ';'
+	 *       => ID '=' atom ('+' atom)* ';'
+	 *       => ID '=' '(' expr ')' ('+' atom)* ';'
+	 *       => ID '=' '(' atom ')' ('+' atom)* ';'
+	 *       => ID '=' '(' INT ')' ('+' atom)* ';'
+	 *       => ID '=' '(' INT ')' ';'
+	 *
+	 *  At the "3" token, you'd have a call chain of
+	 *
+	 *    stat -> expr -> atom -> expr -> atom
+	 *
+	 *  What can follow that specific nested ref to atom?  Exactly ')'
+	 *  as you can see by looking at the derivation of this specific
+	 *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+	 *
+	 *  You want the exact viable token set when recovering from a
+	 *  token mismatch.  Upon token mismatch, if LA(1) is member of
+	 *  the viable next token set, then you know there is most likely
+	 *  a missing token in the input stream.  "Insert" one by just not
+	 *  throwing an exception.
+	 */
+	protected BitSet computeContextSensitiveRuleFOLLOW() {
+		return combineFollows(true);
+	}
+
+	protected BitSet combineFollows(boolean exact) {
+		int top = _fsp;
+		BitSet followSet = new BitSet();
+		for (int i=top; i>=0; i--) {
+			BitSet localFollowSet = (BitSet) following[i];
+			/*
+			System.out.println("local follow depth "+i+"="+
+							   localFollowSet.toString(getTokenNames())+")");
+			*/
+			followSet.orInPlace(localFollowSet);
+			if ( exact && !localFollowSet.member(Token.EOR_TOKEN_TYPE) ) {
+				break;
+			}
+		}
+		followSet.remove(Token.EOR_TOKEN_TYPE);
+		return followSet;
+	}
+
+	/** Attempt to recover from a single missing or extra token.
+	 *
+	 *  EXTRA TOKEN
+	 *
+	 *  LA(1) is not what we are looking for.  If LA(2) has the right token,
+	 *  however, then assume LA(1) is some extra spurious token.  Delete it
+	 *  and LA(2) as if we were doing a normal match(), which advances the
+	 *  input.
+	 *
+	 *  MISSING TOKEN
+	 *
+	 *  If current token is consistent with what could come after
+	 *  ttype then it is ok to "insert" the missing token, else throw
+	 *  exception For example, Input "i=(3;" is clearly missing the
+	 *  ')'.  When the parser returns from the nested call to expr, it
+	 *  will have call chain:
+	 *
+	 *    stat -> expr -> atom
+	 *
+	 *  and it will be trying to match the ')' at this point in the
+	 *  derivation:
+	 *
+	 *       => ID '=' '(' INT ')' ('+' atom)* ';'
+	 *                          ^
+	 *  match() will see that ';' doesn't match ')' and report a
+	 *  mismatched token error.  To recover, it sees that LA(1)==';'
+	 *  is in the set of tokens that can follow the ')' token
+	 *  reference in rule atom.  It can assume that you forgot the ')'.
+	 */
+	public void recoverFromMismatchedToken(IntStream input,
+										   RecognitionException e,
+										   int ttype,
+										   BitSet follow)
+		throws RecognitionException
+	{
+		System.err.println("BR.recoverFromMismatchedToken");		
+		// if next token is what we are looking for then "delete" this token
+		if ( input.LA(2)==ttype ) {
+			reportError(e);
+			/*
+			System.err.println("recoverFromMismatchedToken deleting "+input.LT(1)+
+							   " since "+input.LT(2)+" is what we want");
+			*/
+			beginResync();
+			input.consume(); // simply delete extra token
+			endResync();
+			input.consume(); // move past ttype token as if all were ok
+			return;
+		}
+		if ( !recoverFromMismatchedElement(input,e,follow) ) {
+			throw e;
+		}
+	}
+
+	public void recoverFromMismatchedSet(IntStream input,
+										 RecognitionException e,
+										 BitSet follow)
+		throws RecognitionException
+	{
+		// TODO do single token deletion like above for Token mismatch
+		if ( !recoverFromMismatchedElement(input,e,follow) ) {
+			throw e;
+		}
+	}
+
+	/** This code is factored out from mismatched token and mismatched set
+	 *  recovery.  It handles "single token insertion" error recovery for
+	 *  both.  No tokens are consumed to recover from insertions.  Return
+	 *  true if recovery was possible else return false.
+	 */
+	protected boolean recoverFromMismatchedElement(IntStream input,
+												   RecognitionException e,
+												   BitSet follow)
+	{
+		if ( follow==null ) {
+			// we have no information about the follow; we can only consume
+			// a single token and hope for the best
+			return false;
+		}
+		//System.out.println("recoverFromMismatchedElement");
+		// compute what can follow this grammar element reference
+		if ( follow.member(Token.EOR_TOKEN_TYPE) ) {
+			BitSet viableTokensFollowingThisRule =
+				computeContextSensitiveRuleFOLLOW();
+			follow = follow.or(viableTokensFollowingThisRule);
+			follow.remove(Token.EOR_TOKEN_TYPE);
+		}
+		// if current token is consistent with what could come after set
+		// then it is ok to "insert" the missing token, else throw exception
+		//System.out.println("viable tokens="+follow.toString(getTokenNames())+")");
+		if ( follow.member(input.LA(1)) ) {
+			//System.out.println("LT(1)=="+input.LT(1)+" is consistent with what follows; inserting...");
+			reportError(e);
+			return true;
+		}
+		//System.err.println("nothing to do; throw exception");
+		return false;
+	}
+
+	public void consumeUntil(IntStream input, int tokenType) {
+		//System.out.println("consumeUntil "+tokenType);
+		int ttype = input.LA(1);
+		while (ttype != Token.EOF && ttype != tokenType) {
+			input.consume();
+			ttype = input.LA(1);
+		}
+	}
+
+	/** Consume tokens until one matches the given token set */
+	public void consumeUntil(IntStream input, BitSet set) {
+		//System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
+		int ttype = input.LA(1);
+		while (ttype != Token.EOF && !set.member(ttype) ) {
+			//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
+			input.consume();
+			ttype = input.LA(1);
+		}
+	}
+
+	/** Push a rule's follow set using our own hardcoded stack */
+	protected void pushFollow(BitSet fset) {
+		if ( (_fsp +1)>=following.length ) {
+			BitSet[] f = new BitSet[following.length*2];
+			System.arraycopy(following, 0, f, 0, following.length-1);
+			following = f;
+		}
+		following[++_fsp] = fset;
+	}
+
+	/** Return List<String> of the rules in your parser instance
+	 *  leading up to a call to this method.  You could override if
+	 *  you want more details such as the file/line info of where
+	 *  in the parser java code a rule is invoked.
+	 *
+	 *  This is very useful for error messages and for context-sensitive
+	 *  error recovery.
+	 */
+	public List getRuleInvocationStack() {
+		String parserClassName = getClass().getName();
+		return getRuleInvocationStack(new Throwable(), parserClassName);
+	}
+
+	/** A more general version of getRuleInvocationStack where you can
+	 *  pass in, for example, a RecognitionException to get it's rule
+	 *  stack trace.  This routine is shared with all recognizers, hence,
+	 *  static.
+	 *
+	 *  TODO: move to a utility class or something; weird having lexer call this
+	 */
+	public static List getRuleInvocationStack(Throwable e,
+											  String recognizerClassName)
+	{
+		List rules = new ArrayList();
+		StackTraceElement[] stack = e.getStackTrace();
+		int i = 0;
+		for (i=stack.length-1; i>=0; i--) {
+			StackTraceElement t = stack[i];
+			if ( t.getClassName().startsWith("org.antlr.runtime.") ) {
+				continue; // skip support code such as this method
+			}
+			if ( t.getMethodName().equals(NEXT_TOKEN_RULE_NAME) ) {
+				continue;
+			}
+			if ( !t.getClassName().equals(recognizerClassName) ) {
+				continue; // must not be part of this parser
+			}
+            rules.add(t.getMethodName());
+		}
+		return rules;
+	}
+
+	public int getBacktrackingLevel() {
+		return backtracking;
+	}
+
+	/** Used to print out token names like ID during debugging and
+	 *  error reporting.  The generated parsers implement a method
+	 *  that overrides this to point to their String[] tokenNames.
+	 */
+	public String[] getTokenNames() {
+		return null;
+	}
+
+	/** For debugging and other purposes, might want the grammar name.
+	 *  Have ANTLR generate an implementation for this method.
+	 */
+	public String getGrammarFileName() {
+		return null;
+	}
+
+	/** A convenience method for use most often with template rewrites.
+	 *  Convert a List<Token> to List<String>
+	 */
+	public List toStrings(List tokens) {
+		if ( tokens==null ) return null;
+		List strings = new ArrayList(tokens.size());
+		for (int i=0; i<tokens.size(); i++) {
+			strings.add(((Token)tokens.get(i)).getText());
+		}
+		return strings;
+	}
+
+	/** Convert a List<RuleReturnScope> to List<StringTemplate> by copying
+	 *  out the .st property.  Useful when converting from
+	 *  list labels to template attributes:
+	 *
+	 *    a : ids+=rule -> foo(ids={toTemplates($ids)})
+	 *      ;
+	 *  TJP: this is not needed anymore.  $ids is a List of templates
+	 *  when output=template
+	 * 
+	public List toTemplates(List retvals) {
+		if ( retvals==null ) return null;
+		List strings = new ArrayList(retvals.size());
+		for (int i=0; i<retvals.size(); i++) {
+			strings.add(((RuleReturnScope)retvals.get(i)).getTemplate());
+		}
+		return strings;
+	}
+	 */
+
+	/** Given a rule number and a start token index number, return
+	 *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
+	 *  start index.  If this rule has parsed input starting from the
+	 *  start index before, then return where the rule stopped parsing.
+	 *  It returns the index of the last token matched by the rule.
+	 *
+	 *  For now we use a hashtable and just the slow Object-based one.
+	 *  Later, we can make a special one for ints and also one that
+	 *  tosses out data after we commit past input position i.
+	 */
+	public int getRuleMemoization(int ruleIndex, int ruleStartIndex) {
+		if ( ruleMemo[ruleIndex]==null ) {
+			ruleMemo[ruleIndex] = new HashMap();
+		}
+		Integer stopIndexI =
+			(Integer)ruleMemo[ruleIndex].get(new Integer(ruleStartIndex));
+		if ( stopIndexI==null ) {
+			return MEMO_RULE_UNKNOWN;
+		}
+		return stopIndexI.intValue();
+	}
+
+	/** Has this rule already parsed input at the current index in the
+	 *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
+	 *  If we attempted but failed to parse properly before, return
+	 *  MEMO_RULE_FAILED.
+	 *
+	 *  This method has a side-effect: if we have seen this input for
+	 *  this rule and successfully parsed before, then seek ahead to
+	 *  1 past the stop token matched for this rule last time.
+	 */
+	public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+		int stopIndex = getRuleMemoization(ruleIndex, input.index());
+		if ( stopIndex==MEMO_RULE_UNKNOWN ) {
+			return false;
+		}
+		if ( stopIndex==MEMO_RULE_FAILED ) {
+			//System.out.println("rule "+ruleIndex+" will never succeed");
+			failed=true;
+		}
+		else {
+			//System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+failed);
+			input.seek(stopIndex+1); // jump to one past stop token
+		}
+		return true;
+	}
+
+	/** Record whether or not this rule parsed the input at this position
+	 *  successfully.  Use a standard java hashtable for now.
+	 */
+	public void memoize(IntStream input,
+						int ruleIndex,
+						int ruleStartIndex)
+	{
+		int stopTokenIndex = failed?MEMO_RULE_FAILED:input.index()-1;
+		if ( ruleMemo[ruleIndex]!=null ) {
+			ruleMemo[ruleIndex].put(
+				new Integer(ruleStartIndex), new Integer(stopTokenIndex)
+			);
+		}
+	}
+
+	/** Assume failure in case a rule bails out with an exception.
+	 *  Reset to rule stop index if successful.
+	public void memoizeFailure(int ruleIndex, int ruleStartIndex) {
+		ruleMemo[ruleIndex].put(
+			new Integer(ruleStartIndex), MEMO_RULE_FAILED_I
+		);
+	}
+	 */
+
+	/** After successful completion of a rule, record success for this
+	 *  rule and that it can skip ahead next time it attempts this
+	 *  rule for this input position.
+	public void memoizeSuccess(IntStream input,
+							   int ruleIndex,
+							   int ruleStartIndex)
+	{
+		ruleMemo[ruleIndex].put(
+			new Integer(ruleStartIndex), new Integer(input.index()-1)
+		);
+	}
+	 */
+
+	/** return how many rule/input-index pairs there are in total.
+	 *  TODO: this includes synpreds. :(
+	 */
+	public int getRuleMemoizationCacheSize() {
+		int n = 0;
+		for (int i = 0; ruleMemo!=null && i < ruleMemo.length; i++) {
+			Map ruleMap = ruleMemo[i];
+			if ( ruleMap!=null ) {
+				n += ruleMap.size(); // how many input indexes are recorded?
+			}
+		}
+		return n;
+	}
+
+	public void traceIn(String ruleName, int ruleIndex, Object inputSymbol)  {
+		System.out.print("enter "+ruleName+" "+inputSymbol);
+		if ( failed ) {
+			System.out.println(" failed="+failed);
+		}
+		if ( backtracking>0 ) {
+			System.out.print(" backtracking="+backtracking);
+		}
+		System.out.println();
+	}
+
+	public void traceOut(String ruleName,
+						 int ruleIndex,
+						 Object inputSymbol)
+	{
+		System.out.print("exit "+ruleName+" "+inputSymbol);
+		if ( failed ) {
+			System.out.println(" failed="+failed);
+		}
+		if ( backtracking>0 ) {
+			System.out.print(" backtracking="+backtracking);
+		}
+		System.out.println();
+	}
+
+	/** A syntactic predicate.  Returns true/false depending on whether
+	 *  the specified grammar fragment matches the current input stream.
+	 *  This resets the failed instance var afterwards.
+	public boolean synpred(IntStream input, GrammarFragmentPtr fragment) {
+		//int i = input.index();
+		//System.out.println("begin backtracking="+backtracking+" @"+i+"="+((CommonTokenStream)input).LT(1));
+		backtracking++;
+		beginBacktrack(backtracking);
+		int start = input.mark();
+		try {fragment.invoke();}
+		catch (RecognitionException re) {
+			System.err.println("impossible: "+re);
+		}
+		boolean success = !failed;
+		input.rewind(start);
+		endBacktrack(backtracking, success);
+		backtracking--;
+		//System.out.println("end backtracking="+backtracking+": "+(failed?"FAILED":"SUCCEEDED")+" @"+input.index()+" should be "+i);
+		failed=false;
+		return success;
+	}
+	 */
+}
diff --git a/runtime/Java/src/org/antlr/runtime/BitSet.java b/runtime/Java/src/org/antlr/runtime/BitSet.java
new file mode 100644
index 0000000..99bf83b
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/BitSet.java
@@ -0,0 +1,324 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import java.util.List;
+
+/**A stripped-down version of org.antlr.misc.BitSet that is just
+ * good enough to handle runtime requirements such as FOLLOW sets
+ * for automatic error recovery.
+ */
+public class BitSet implements Cloneable {
+    protected final static int BITS = 64;    // number of bits / long
+    protected final static int LOG_BITS = 6; // 2^6 == 64
+
+    /* We will often need to do a mod operator (i mod nbits).  Its
+     * turns out that, for powers of two, this mod operation is
+     * same as (i & (nbits-1)).  Since mod is slow, we use a
+     * precomputed mod mask to do the mod instead.
+     */
+    protected final static int MOD_MASK = BITS - 1;
+
+    /** The actual data bits */
+    protected long bits[];
+
+    /** Construct a bitset of size one word (64 bits) */
+    public BitSet() {
+        this(BITS);
+    }
+
+    /** Construction from a static array of longs */
+    public BitSet(long[] bits_) {
+        bits = bits_;
+    }
+
+	/** Construction from a list of integers */
+	public BitSet(List items) {
+		for (int i = 0; i < items.size(); i++) {
+			Integer v = (Integer) items.get(i);
+			add(v.intValue());
+		}
+	}
+
+    /** Construct a bitset given the size
+     * @param nbits The size of the bitset in bits
+     */
+    public BitSet(int nbits) {
+        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
+    }
+
+	public static BitSet of(int el) {
+		BitSet s = new BitSet(el + 1);
+		s.add(el);
+		return s;
+	}
+
+	public static BitSet of(int a, int b) {
+		BitSet s = new BitSet(Math.max(a,b)+1);
+		s.add(a);
+		s.add(b);
+		return s;
+	}
+
+	public static BitSet of(int a, int b, int c) {
+		BitSet s = new BitSet();
+		s.add(a);
+		s.add(b);
+		s.add(c);
+		return s;
+	}
+
+	public static BitSet of(int a, int b, int c, int d) {
+		BitSet s = new BitSet();
+		s.add(a);
+		s.add(b);
+		s.add(c);
+		s.add(d);
+		return s;
+	}
+
+	/** return this | a in a new set */
+	public BitSet or(BitSet a) {
+		if ( a==null ) {
+			return this;
+		}
+		BitSet s = (BitSet)this.clone();
+		s.orInPlace(a);
+		return s;
+	}
+
+	/** or this element into this set (grow as necessary to accommodate) */
+	public void add(int el) {
+		int n = wordNumber(el);
+		if (n >= bits.length) {
+			growToInclude(el);
+		}
+		bits[n] |= bitMask(el);
+	}
+
+	/**
+	 * Grows the set to a larger number of bits.
+	 * @param bit element that must fit in set
+	 */
+	public void growToInclude(int bit) {
+		int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
+		long newbits[] = new long[newSize];
+		System.arraycopy(bits, 0, newbits, 0, bits.length);
+		bits = newbits;
+	}
+
+	public void orInPlace(BitSet a) {
+		if ( a==null ) {
+			return;
+		}
+		// If this is smaller than a, grow this first
+		if (a.bits.length > bits.length) {
+			setSize(a.bits.length);
+		}
+		int min = Math.min(bits.length, a.bits.length);
+		for (int i = min - 1; i >= 0; i--) {
+			bits[i] |= a.bits[i];
+		}
+	}
+
+	/**
+	 * Sets the size of a set.
+	 * @param nwords how many words the new set should be
+	 */
+	private void setSize(int nwords) {
+		long newbits[] = new long[nwords];
+		int n = Math.min(nwords, bits.length);
+		System.arraycopy(bits, 0, newbits, 0, n);
+		bits = newbits;
+	}
+
+    private final static long bitMask(int bitNumber) {
+        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
+        return 1L << bitPosition;
+    }
+
+    public Object clone() {
+        BitSet s;
+        try {
+            s = (BitSet)super.clone();
+            s.bits = new long[bits.length];
+            System.arraycopy(bits, 0, s.bits, 0, bits.length);
+        }
+        catch (CloneNotSupportedException e) {
+            throw new InternalError();
+        }
+        return s;
+    }
+
+    public int size() {
+        int deg = 0;
+        for (int i = bits.length - 1; i >= 0; i--) {
+            long word = bits[i];
+            if (word != 0L) {
+                for (int bit = BITS - 1; bit >= 0; bit--) {
+                    if ((word & (1L << bit)) != 0) {
+                        deg++;
+                    }
+                }
+            }
+        }
+        return deg;
+    }
+
+    public boolean equals(Object other) {
+        if ( other == null || !(other instanceof BitSet) ) {
+            return false;
+        }
+
+        BitSet otherSet = (BitSet)other;
+
+        int n = Math.min(this.bits.length, otherSet.bits.length);
+
+        // for any bits in common, compare
+        for (int i=0; i<n; i++) {
+            if (this.bits[i] != otherSet.bits[i]) {
+                return false;
+            }
+        }
+
+        // make sure any extra bits are off
+
+        if (this.bits.length > n) {
+            for (int i = n+1; i<this.bits.length; i++) {
+                if (this.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+        else if (otherSet.bits.length > n) {
+            for (int i = n+1; i<otherSet.bits.length; i++) {
+                if (otherSet.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+
+        return true;
+    }
+
+    public boolean member(int el) {
+		if ( el<0 ) {
+			return false;
+		}
+        int n = wordNumber(el);
+        if (n >= bits.length) return false;
+        return (bits[n] & bitMask(el)) != 0;
+    }
+
+	// remove this element from this set
+	public void remove(int el) {
+		int n = wordNumber(el);
+		if (n < bits.length) {
+			bits[n] &= ~bitMask(el);
+		}
+	}
+
+    public boolean isNil() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            if (bits[i] != 0) return false;
+        }
+        return true;
+    }
+
+    private final int numWordsToHold(int el) {
+        return (el >> LOG_BITS) + 1;
+    }
+
+    public int numBits() {
+        return bits.length << LOG_BITS; // num words * bits per word
+    }
+
+    /** return how much space is being used by the bits array not
+     *  how many actually have member bits on.
+     */
+    public int lengthInLongWords() {
+        return bits.length;
+    }
+
+    /**Is this contained within a? */
+    /*
+	public boolean subset(BitSet a) {
+        if (a == null || !(a instanceof BitSet)) return false;
+        return this.and(a).equals(this);
+    }
+	*/
+
+    public int[] toArray() {
+        int[] elems = new int[size()];
+        int en = 0;
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                elems[en++] = i;
+            }
+        }
+        return elems;
+    }
+
+    public long[] toPackedArray() {
+        return bits;
+    }
+
+	private final static int wordNumber(int bit) {
+		return bit >> LOG_BITS; // bit / BITS
+	}
+
+	public String toString() {
+		return toString(null);
+	}
+
+	public String toString(String[] tokenNames) {
+		StringBuffer buf = new StringBuffer();
+		String separator = ",";
+		boolean havePrintedAnElement = false;
+		buf.append('{');
+
+		for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+			if (member(i)) {
+				if (i > 0 && havePrintedAnElement ) {
+					buf.append(separator);
+				}
+				if ( tokenNames!=null ) {
+					buf.append(tokenNames[i]);
+				}
+				else {
+					buf.append(i);
+				}
+				havePrintedAnElement = true;
+			}
+		}
+		buf.append('}');
+		return buf.toString();
+	}
+
+
+}
diff --git a/runtime/Java/src/org/antlr/runtime/CharStream.java b/runtime/Java/src/org/antlr/runtime/CharStream.java
new file mode 100644
index 0000000..fe0d406
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/CharStream.java
@@ -0,0 +1,57 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A source of characters for an ANTLR lexer */
+public interface CharStream extends IntStream {
+    public static final int EOF = -1;
+
+	/** For infinite streams, you don't need this; primarily I'm providing
+	 *  a useful interface for action code.  Just make sure actions don't
+	 *  use this on streams that don't support it.
+	 */
+	public String substring(int start, int stop);
+
+	/** Get the ith character of lookahead.  This is the same usually as
+	 *  LA(i).  This will be used for labels in the generated
+	 *  lexer code.  I'd prefer to return a char here type-wise, but it's
+	 *  probably better to be 32-bit clean and be consistent with LA.
+	 */
+	public int LT(int i);
+
+	/** ANTLR tracks the line information automatically */
+	int getLine();
+
+	/** Because this stream can rewind, we need to be able to reset the line */
+	void setLine(int line);
+
+	void setCharPositionInLine(int pos);
+
+	/** The index of the character relative to the beginning of the line 0..n-1 */
+	int getCharPositionInLine();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/CharStreamState.java b/runtime/Java/src/org/antlr/runtime/CharStreamState.java
new file mode 100644
index 0000000..5bcf116
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/CharStreamState.java
@@ -0,0 +1,45 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** When walking ahead with cyclic DFA or for syntactic predicates,
+ *  we need to record the state of the input stream (char index,
+ *  line, etc...) so that we can rewind the state after scanning ahead.
+ *
+ *  This is the complete state of a stream.
+ */
+public class CharStreamState {
+	/** Index into the char stream of next lookahead char */
+	int p;
+
+	/** What line number is the scanner at before processing buffer[p]? */
+	int line;
+	
+	/** What char position 0..n-1 in line is scanner before processing buffer[p]? */
+	int charPositionInLine;
+}
diff --git a/runtime/Java/src/org/antlr/runtime/ClassicToken.java b/runtime/Java/src/org/antlr/runtime/ClassicToken.java
new file mode 100644
index 0000000..abda501
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/ClassicToken.java
@@ -0,0 +1,107 @@
+package org.antlr.runtime;
+
+/** A Token object like we'd use in ANTLR 2.x; has an actual string created
+ *  and associated with this object.  These objects are needed for imaginary
+ *  tree nodes that have payload objects.  We need to create a Token object
+ *  that has a string; the tree node will point at this token.  CommonToken
+ *  has indexes into a char stream and hence cannot be used to introduce
+ *  new strings.
+ */
+public class ClassicToken implements Token {
+	protected String text;
+	protected int type;
+	protected int line;
+	protected int charPositionInLine;
+	protected int channel=DEFAULT_CHANNEL;
+
+	/** What token number is this from 0..n-1 tokens */
+	protected int index;
+
+	public ClassicToken(int type) {
+		this.type = type;
+	}
+
+	public ClassicToken(Token oldToken) {
+		text = oldToken.getText();
+		type = oldToken.getType();
+		line = oldToken.getLine();
+		charPositionInLine = oldToken.getCharPositionInLine();
+		channel = oldToken.getChannel();
+	}
+
+	public ClassicToken(int type, String text) {
+		this.type = type;
+		this.text = text;
+	}
+
+	public ClassicToken(int type, String text, int channel) {
+		this.type = type;
+		this.text = text;
+		this.channel = channel;
+	}
+
+	public int getType() {
+		return type;
+	}
+
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	public String getText() {
+		return text;
+	}
+
+	public void setText(String text) {
+		this.text = text;
+	}
+
+	public int getLine() {
+		return line;
+	}
+
+	public int getCharPositionInLine() {
+		return charPositionInLine;
+	}
+
+	public void setCharPositionInLine(int charPositionInLine) {
+		this.charPositionInLine = charPositionInLine;
+	}
+
+	public int getChannel() {
+		return channel;
+	}
+
+	public void setChannel(int channel) {
+		this.channel = channel;
+	}
+
+	public void setType(int type) {
+		this.type = type;
+	}
+
+	public int getTokenIndex() {
+		return index;
+	}
+
+	public void setTokenIndex(int index) {
+		this.index = index;
+	}
+
+	public String toString() {
+		String channelStr = "";
+		if ( channel>0 ) {
+			channelStr=",channel="+channel;
+		}
+		String txt = getText();
+		if ( txt!=null ) {
+			txt = txt.replaceAll("\n","\\\\n");
+			txt = txt.replaceAll("\r","\\\\r");
+			txt = txt.replaceAll("\t","\\\\t");
+		}
+		else {
+			txt = "<no text>";
+		}
+		return "[@"+getTokenIndex()+",'"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/CommonToken.java b/runtime/Java/src/org/antlr/runtime/CommonToken.java
new file mode 100644
index 0000000..89f63fb
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/CommonToken.java
@@ -0,0 +1,172 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import java.io.Serializable;
+
+public class CommonToken implements Token, Serializable {
+	protected int type;
+	protected int line;
+	protected int charPositionInLine = -1; // set to invalid position
+	protected int channel=DEFAULT_CHANNEL;
+	protected transient CharStream input;
+	/** We need to be able to change the text once in a while.  If
+	 *  this is non-null, then getText should return this.  Note that
+	 *  start/stop are not affected by changing this.
+	  */
+	protected String text;
+
+	/** What token number is this from 0..n-1 tokens; < 0 implies invalid index */
+	protected int index = -1;
+
+	/** The char position into the input buffer where this token starts */
+	protected int start;
+
+	/** The char position into the input buffer where this token stops */
+	protected int stop;
+
+	public CommonToken(int type) {
+		this.type = type;
+	}
+
+	public CommonToken(CharStream input, int type, int channel, int start, int stop) {
+		this.input = input;
+		this.type = type;
+		this.channel = channel;
+		this.start = start;
+		this.stop = stop;
+	}
+
+	public CommonToken(int type, String text) {
+		this.type = type;
+		this.channel = DEFAULT_CHANNEL;
+		this.text = text;
+	}
+
+	public CommonToken(Token oldToken) {
+		text = oldToken.getText();
+		type = oldToken.getType();
+		line = oldToken.getLine();
+		index = oldToken.getTokenIndex();
+		charPositionInLine = oldToken.getCharPositionInLine();
+		channel = oldToken.getChannel();
+	}
+
+	public int getType() {
+		return type;
+	}
+
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	public String getText() {
+		if ( text!=null ) {
+			return text;
+		}
+		if ( input==null ) {
+			return null;
+		}
+		text = input.substring(start,stop);
+		return text;
+	}
+
+	/** Override the text for this token.  getText() will return this text
+	 *  rather than pulling from the buffer.  Note that this does not mean
+	 *  that start/stop indexes are not valid.  It means that that input
+	 *  was converted to a new string in the token object.
+	 */
+	public void setText(String text) {
+		this.text = text;
+	}
+
+	public int getLine() {
+		return line;
+	}
+
+	public int getCharPositionInLine() {
+		return charPositionInLine;
+	}
+
+	public void setCharPositionInLine(int charPositionInLine) {
+		this.charPositionInLine = charPositionInLine;
+	}
+
+	public int getChannel() {
+		return channel;
+	}
+
+	public void setChannel(int channel) {
+		this.channel = channel;
+	}
+
+	public void setType(int type) {
+		this.type = type;
+	}
+
+	public int getStartIndex() {
+		return start;
+	}
+
+	public void setStartIndex(int start) {
+		this.start = start;
+	}
+
+	public int getStopIndex() {
+		return stop;
+	}
+
+	public void setStopIndex(int stop) {
+		this.stop = stop;
+	}
+
+	public int getTokenIndex() {
+		return index;
+	}
+
+	public void setTokenIndex(int index) {
+		this.index = index;
+	}
+
+	public String toString() {
+		String channelStr = "";
+		if ( channel>0 ) {
+			channelStr=",channel="+channel;
+		}
+		String txt = getText();
+		if ( txt!=null ) {
+			txt = txt.replaceAll("\n","\\\\n");
+			txt = txt.replaceAll("\r","\\\\r");
+			txt = txt.replaceAll("\t","\\\\t");
+		}
+		else {
+			txt = "<no text>";
+		}
+		return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/CommonTokenStream.java b/runtime/Java/src/org/antlr/runtime/CommonTokenStream.java
new file mode 100644
index 0000000..bb26ad3
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/CommonTokenStream.java
@@ -0,0 +1,370 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import java.util.*;
+
+/** The most common stream of tokens is one where every token is buffered up
+ *  and tokens are prefiltered for a certain channel (the parser will only
+ *  see these tokens and cannot change the filter channel number during the
+ *  parse).
+ *
+ *  TODO: how to access the full token stream?  How to track all tokens matched per rule?
+ */
+public class CommonTokenStream implements TokenStream {
+    protected TokenSource tokenSource;
+
+	/** Record every single token pulled from the source so we can reproduce
+	 *  chunks of it later.
+	 */
+	protected List tokens;
+
+	/** Map<tokentype, channel> to override some Tokens' channel numbers */
+	protected Map channelOverrideMap;
+
+	/** Set<tokentype>; discard any tokens with this type */
+	protected Set discardSet;
+
+	/** Skip tokens on any channel but this one; this is how we skip whitespace... */
+	protected int channel = Token.DEFAULT_CHANNEL;
+
+	/** By default, track all incoming tokens */
+	protected boolean discardOffChannelTokens = false;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	/** The index into the tokens list of the current token (next token
+     *  to consume).  p==-1 indicates that the tokens list is empty
+     */
+    protected int p = -1;
+
+	public CommonTokenStream() {
+		tokens = new ArrayList(500);
+	}
+
+	public CommonTokenStream(TokenSource tokenSource) {
+	    this();
+		this.tokenSource = tokenSource;
+	}
+
+	public CommonTokenStream(TokenSource tokenSource, int channel) {
+		this(tokenSource);
+		this.channel = channel;
+	}
+
+	/** Reset this token stream by setting its token source. */
+	public void setTokenSource(TokenSource tokenSource) {
+		this.tokenSource = tokenSource;
+		tokens.clear();
+		p = -1;
+		channel = Token.DEFAULT_CHANNEL;
+	}
+
+	/** Load all tokens from the token source and put in tokens.
+	 *  This is done upon first LT request because you might want to
+	 *  set some token type / channel overrides before filling buffer.
+	 */
+	protected void fillBuffer() {
+		int index = 0;
+		Token t = tokenSource.nextToken();
+		while ( t!=null && t.getType()!=CharStream.EOF ) {
+			boolean discard = false;
+			// is there a channel override for token type?
+			if ( channelOverrideMap!=null ) {
+				Integer channelI = (Integer)
+					channelOverrideMap.get(new Integer(t.getType()));
+				if ( channelI!=null ) {
+					t.setChannel(channelI.intValue());
+				}
+			}
+			if ( discardSet!=null &&
+				 discardSet.contains(new Integer(t.getType())) )
+			{
+				discard = true;
+			}
+			else if ( discardOffChannelTokens && t.getChannel()!=this.channel ) {
+				discard = true;
+			}
+			if ( !discard )	{
+				t.setTokenIndex(index);
+				tokens.add(t);
+				index++;
+			}
+			t = tokenSource.nextToken();
+		}
+		// leave p pointing at first token on channel
+		p = 0;
+		p = skipOffTokenChannels(p);
+    }
+
+	/** Move the input pointer to the next incoming token.  The stream
+	 *  must become active with LT(1) available.  consume() simply
+	 *  moves the input pointer so that LT(1) points at the next
+	 *  input symbol. Consume at least one token.
+	 *
+	 *  Walk past any token not on the channel the parser is listening to.
+	 */
+	public void consume() {
+		if ( p<tokens.size() ) {
+            p++;
+			p = skipOffTokenChannels(p); // leave p on valid token
+        }
+    }
+
+	/** Given a starting index, return the index of the first on-channel
+	 *  token.
+	 */
+	protected int skipOffTokenChannels(int i) {
+		int n = tokens.size();
+		while ( i<n && ((Token)tokens.get(i)).getChannel()!=channel ) {
+			i++;
+		}
+		return i;
+	}
+
+	protected int skipOffTokenChannelsReverse(int i) {
+		while ( i>=0 && ((Token)tokens.get(i)).getChannel()!=channel ) {
+			i--;
+		}
+		return i;
+	}
+
+	/** A simple filter mechanism whereby you can tell this token stream
+	 *  to force all tokens of type ttype to be on channel.  For example,
+	 *  when interpreting, we cannot exec actions so we need to tell
+	 *  the stream to force all WS and NEWLINE to be a different, ignored
+	 *  channel.
+	 */
+	public void setTokenTypeChannel(int ttype, int channel) {
+		if ( channelOverrideMap==null ) {
+			channelOverrideMap = new HashMap();
+		}
+        channelOverrideMap.put(new Integer(ttype), new Integer(channel));
+	}
+
+	public void discardTokenType(int ttype) {
+		if ( discardSet==null ) {
+			discardSet = new HashSet();
+		}
+        discardSet.add(new Integer(ttype));
+	}
+
+	public void discardOffChannelTokens(boolean discardOffChannelTokens) {
+		this.discardOffChannelTokens = discardOffChannelTokens;
+	}
+
+	public List getTokens() {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		return tokens;
+	}
+
+	public List getTokens(int start, int stop) {
+		return getTokens(start, stop, (BitSet)null);
+	}
+
+	/** Given a start and stop index, return a List of all tokens in
+	 *  the token type BitSet.  Return null if no tokens were found.  This
+	 *  method looks at both on and off channel tokens.
+	 */
+	public List getTokens(int start, int stop, BitSet types) {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( stop>=tokens.size() ) {
+			stop=tokens.size()-1;
+		}
+		if ( start<0 ) {
+			start=0;
+		}
+		if ( start>stop ) {
+			return null;
+		}
+
+		// list = tokens[start:stop]:{Token t, t.getType() in types}
+		List filteredTokens = new ArrayList();
+		for (int i=start; i<=stop; i++) {
+			Token t = (Token)tokens.get(i);
+			if ( types==null || types.member(t.getType()) ) {
+				filteredTokens.add(t);
+			}
+		}
+		if ( filteredTokens.size()==0 ) {
+			filteredTokens = null;
+		}
+		return filteredTokens;
+	}
+
+	public List getTokens(int start, int stop, List types) {
+		return getTokens(start,stop,new BitSet(types));
+	}
+
+	public List getTokens(int start, int stop, int ttype) {
+		return getTokens(start,stop,BitSet.of(ttype));
+	}
+
+	/** Get the ith token from the current position 1..n where k=1 is the
+	 *  first symbol of lookahead.
+	 */
+	public Token LT(int k) {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( k==0 ) {
+			return null;
+		}
+		if ( k<0 ) {
+			return LB(-k);
+		}
+		//System.out.print("LT(p="+p+","+k+")=");
+		if ( (p+k-1) >= tokens.size() ) {
+			return Token.EOF_TOKEN;
+		}
+		//System.out.println(tokens.get(p+k-1));
+		int i = p;
+		int n = 1;
+		// find k good tokens
+		while ( n<k ) {
+			// skip off-channel tokens
+			i = skipOffTokenChannels(i+1); // leave p on valid token
+			n++;
+		}
+		if ( i>=tokens.size() ) {
+			return Token.EOF_TOKEN;
+		}
+        return (Token)tokens.get(i);
+    }
+
+	/** Look backwards k tokens on-channel tokens */
+	protected Token LB(int k) {
+		//System.out.print("LB(p="+p+","+k+") ");
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( k==0 ) {
+			return null;
+		}
+		if ( (p-k)<0 ) {
+			return null;
+		}
+
+		int i = p;
+		int n = 1;
+		// find k good tokens looking backwards
+		while ( n<=k ) {
+			// skip off-channel tokens
+			i = skipOffTokenChannelsReverse(i-1); // leave p on valid token
+			n++;
+		}
+		if ( i<0 ) {
+			return null;
+		}
+		return (Token)tokens.get(i);
+	}
+
+	/** Return absolute token i; ignore which channel the tokens are on;
+	 *  that is, count all tokens not just on-channel tokens.
+	 */
+	public Token get(int i) {
+		return (Token)tokens.get(i);
+	}
+
+    public int LA(int i) {
+        return LT(i).getType();
+    }
+
+    public int mark() {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		lastMarker = index();
+		return lastMarker;
+	}
+
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	public int size() {
+		return tokens.size();
+	}
+
+    public int index() {
+        return p;
+    }
+
+	public void rewind(int marker) {
+		seek(marker);
+	}
+
+	public void rewind() {
+		seek(lastMarker);
+	}
+
+	public void seek(int index) {
+		p = index;
+	}
+
+	public TokenSource getTokenSource() {
+		return tokenSource;
+	}
+
+	public String toString() {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		return toString(0, tokens.size()-1);
+	}
+
+	public String toString(int start, int stop) {
+		if ( start<0 || stop<0 ) {
+			return null;
+		}
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( stop>=tokens.size() ) {
+			stop = tokens.size()-1;
+		}
+ 		StringBuffer buf = new StringBuffer();
+		for (int i = start; i <= stop; i++) {
+			Token t = (Token)tokens.get(i);
+			buf.append(t.getText());
+		}
+		return buf.toString();
+	}
+
+	public String toString(Token start, Token stop) {
+		if ( start!=null && stop!=null ) {
+			return toString(start.getTokenIndex(), stop.getTokenIndex());
+		}
+		return null;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/DFA.java b/runtime/Java/src/org/antlr/runtime/DFA.java
new file mode 100644
index 0000000..5abec4b
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/DFA.java
@@ -0,0 +1,187 @@
+package org.antlr.runtime;
+
+/** A DFA implemented as a set of transition tables.
+ *
+ *  Any state that has a semantic predicate edge is special; those states
+ *  are generated with if-then-else structures in a specialStateTransition()
+ *  which is generated by cyclicDFA template.
+ *
+ *  There are at most 32767 states (16-bit signed short).
+ *  Could get away with byte sometimes but would have to generate different
+ *  types and the simulation code too.  For a point of reference, the Java
+ *  lexer's Tokens rule DFA has 326 states roughly.
+ */
+public class DFA {
+	protected short[] eot;
+	protected short[] eof;
+	protected char[] min;
+    protected char[] max;
+    protected short[] accept;
+    protected short[] special;
+    protected short[][] transition;
+
+	protected int decisionNumber;
+
+	/** Which recognizer encloses this DFA?  Needed to check backtracking */
+	protected BaseRecognizer recognizer;
+
+	public static final boolean debug = false;
+
+	/** From the input stream, predict what alternative will succeed
+	 *  using this DFA (representing the covering regular approximation
+	 *  to the underlying CFL).  Return an alternative number 1..n.  Throw
+	 *  an exception upon error.
+	 */
+	public int predict(IntStream input)
+		throws RecognitionException
+	{
+		int mark = input.mark(); // remember where decision started in input
+		int s = 0; // we always start at s0
+		try {
+			while ( true ) {
+				if ( debug ) System.err.println("DFA "+decisionNumber+" state "+s+" LA(1)="+(char)input.LA(1)+"("+input.LA(1)+
+												"), index="+input.index());
+				int specialState = special[s];
+				if ( specialState>=0 ) {
+					if ( debug ) System.err.println("DFA "+decisionNumber+
+						" state "+s+" is special state "+specialState);
+					s = specialStateTransition(specialState,input);
+					input.consume();
+					continue;
+				}
+				if ( accept[s] >= 1 ) {
+					if ( debug ) System.err.println("accept; predict "+accept[s]+" from state "+s);
+					return accept[s];
+				}
+				// look for a normal char transition
+				char c = (char)input.LA(1); // -1 == \uFFFF, all tokens fit in 65000 space
+				if (c>=min[s] && c<=max[s]) {
+					int snext = transition[s][c-min[s]]; // move to next state
+					if ( snext < 0 ) {
+						// was in range but not a normal transition
+						// must check EOT, which is like the else clause.
+						// eot[s]>=0 indicates that an EOT edge goes to another
+						// state.
+						if ( eot[s]>=0 ) {  // EOT Transition to accept state?
+							if ( debug ) System.err.println("EOT transition");
+							s = eot[s];
+							input.consume();
+							// TODO: I had this as return accept[eot[s]]
+							// which assumed here that the EOT edge always
+							// went to an accept...faster to do this, but
+							// what about predicated edges coming from EOT
+							// target?
+							continue;
+						}
+						noViableAlt(s,input);
+						return 0;
+					}
+					s = snext;
+					input.consume();
+					continue;
+				}
+				if ( eot[s]>=0 ) {  // EOT Transition?
+					if ( debug ) System.err.println("EOT transition");
+					s = eot[s];
+					input.consume();
+					continue;
+				}
+				if ( c==(char)Token.EOF && eof[s]>=0 ) {  // EOF Transition to accept state?
+					if ( debug ) System.err.println("accept via EOF; predict "+accept[eof[s]]+" from "+eof[s]);
+					return accept[eof[s]];
+				}
+				// not in range and not EOF/EOT, must be invalid symbol
+				if ( debug ) {
+					System.err.println("min["+s+"]="+min[s]);
+					System.err.println("max["+s+"]="+max[s]);
+					System.err.println("eot["+s+"]="+eot[s]);
+					System.err.println("eof["+s+"]="+eof[s]);
+					for (int p=0; p<transition[s].length; p++) {
+						System.err.print(transition[s][p]+" ");
+					}
+					System.err.println();
+				}
+				noViableAlt(s,input);
+				return 0;
+			}
+		}
+		finally {
+			input.rewind(mark);
+		}
+	}
+
+	protected void noViableAlt(int s, IntStream input) throws NoViableAltException {
+		if (recognizer.backtracking>0) {
+			recognizer.failed=true;
+			return;
+		}
+		NoViableAltException nvae =
+			new NoViableAltException(getDescription(),
+									 decisionNumber,
+									 s,
+									 input);
+		error(nvae);
+		throw nvae;
+	}
+
+	/** A hook for debugging interface */
+	protected void error(NoViableAltException nvae) { ; }
+
+	public int specialStateTransition(int s, IntStream input)
+		throws NoViableAltException
+	{
+		return -1;
+	}
+
+	public String getDescription() {
+		return "n/a";
+	}
+
+	/** Given a String that has a run-length-encoding of some unsigned shorts
+	 *  like "\1\2\3\9", convert to short[] {2,9,9,9}.  We do this to avoid
+	 *  static short[] which generates so much init code that the class won't
+	 *  compile. :(
+	 */
+	public static short[] unpackEncodedString(String encodedString) {
+		// walk first to find how big it is.
+		int size = 0;
+		for (int i=0; i<encodedString.length(); i+=2) {
+			size += encodedString.charAt(i);
+		}
+		short[] data = new short[size];
+		int di = 0;
+		for (int i=0; i<encodedString.length(); i+=2) {
+			char n = encodedString.charAt(i);
+			char v = encodedString.charAt(i+1);
+			// add v n times to data
+			for (int j=1; j<=n; j++) {
+				data[di++] = (short)v;
+			}
+		}
+		return data;
+	}
+
+	/** Hideous duplication of code, but I need different typed arrays out :( */
+	public static char[] unpackEncodedStringToUnsignedChars(String encodedString) {
+		// walk first to find how big it is.
+		int size = 0;
+		for (int i=0; i<encodedString.length(); i+=2) {
+			size += encodedString.charAt(i);
+		}
+		char[] data = new char[size];
+		int di = 0;
+		for (int i=0; i<encodedString.length(); i+=2) {
+			char n = encodedString.charAt(i);
+			char v = encodedString.charAt(i+1);
+			// add v n times to data
+			for (int j=1; j<=n; j++) {
+				data[di++] = v;
+			}
+		}
+		return data;
+	}
+
+	public int specialTransition(int state, int symbol) {
+		return 0;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java b/runtime/Java/src/org/antlr/runtime/EarlyExitException.java
new file mode 100644
index 0000000..29f0865
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/EarlyExitException.java
@@ -0,0 +1,41 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/**  The recognizer did not match anything for a (..)+ loop. */
+public class EarlyExitException extends RecognitionException {
+	public int decisionNumber;
+
+	/** Used for remote debugger deserialization */
+	public EarlyExitException() {;}
+	
+	public EarlyExitException(int decisionNumber, IntStream input) {
+		super(input);
+		this.decisionNumber = decisionNumber;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/FailedPredicateException.java b/runtime/Java/src/org/antlr/runtime/FailedPredicateException.java
new file mode 100644
index 0000000..b90fe0b
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/FailedPredicateException.java
@@ -0,0 +1,54 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A semantic predicate failed during validation.  Validation of predicates
+ *  occurs when normally parsing the alternative just like matching a token.
+ *  Disambiguating predicate evaluation occurs when we hoist a predicate into
+ *  a prediction decision.
+ */
+public class FailedPredicateException extends RecognitionException {
+	public String ruleName;
+	public String predicateText;
+
+	/** Used for remote debugger deserialization */
+	public FailedPredicateException() {;}
+
+	public FailedPredicateException(IntStream input,
+									String ruleName,
+									String predicateText)
+	{
+		super(input);
+		this.ruleName = ruleName;
+		this.predicateText = predicateText;
+	}
+
+	public String toString() {
+		return "FailedPredicateException("+ruleName+",{"+predicateText+"}?)";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/IntStream.java b/runtime/Java/src/org/antlr/runtime/IntStream.java
new file mode 100644
index 0000000..d6c5ab3
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/IntStream.java
@@ -0,0 +1,116 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A simple stream of integers used when all I care about is the char
+ *  or token type sequence (such as interpretation).
+ */
+public interface IntStream {
+	void consume();
+
+	/** Get int at current input pointer + i ahead where i=1 is next int.
+	 *  Negative indexes are allowed.  LA(-1) is previous token (token
+	 *  just matched).  LA(-i) where i is before first token should
+	 *  yield -1, invalid char / EOF.
+	 */
+	int LA(int i);
+
+	/** Tell the stream to start buffering if it hasn't already.  Return
+     *  current input position, index(), or some other marker so that
+	 *  when passed to rewind() you get back to the same spot.
+	 *  rewind(mark()) should not affect the input cursor.  The Lexer
+	 *  track line/col info as well as input index so its markers are
+	 *  not pure input indexes.  Same for tree node streams.
+     */
+	int mark();
+
+	/** Return the current input symbol index 0..n where n indicates the
+     *  last symbol has been read.  The index is the symbol about to be
+	 *  read not the most recently read symbol.
+     */
+	int index();
+
+	/** Reset the stream so that next call to index would return marker.
+	 *  The marker will usually be index() but it doesn't have to be.  It's
+	 *  just a marker to indicate what state the stream was in.  This is
+	 *  essentially calling release() and seek().  If there are markers
+	 *  created after this marker argument, this routine must unroll them
+	 *  like a stack.  Assume the state the stream was in when this marker
+	 *  was created.
+	 */
+	void rewind(int marker);
+
+	/** Rewind to the input position of the last marker.
+	 *  Used currently only after a cyclic DFA and just
+	 *  before starting a sem/syn predicate to get the
+	 *  input position back to the start of the decision.
+	 *  Do not "pop" the marker off the state.  mark(i)
+	 *  and rewind(i) should balance still. It is
+	 *  like invoking rewind(last marker) but it should not "pop"
+	 *  the marker off.  It's like seek(last marker's input position).
+	 */
+	void rewind();
+
+	/** You may want to commit to a backtrack but don't want to force the
+	 *  stream to keep bookkeeping objects around for a marker that is
+	 *  no longer necessary.  This will have the same behavior as
+	 *  rewind() except it releases resources without the backward seek.
+	 *  This must throw away resources for all markers back to the marker
+	 *  argument.  So if you're nested 5 levels of mark(), and then release(2)
+	 *  you have to release resources for depths 2..5.
+	 */
+	void release(int marker);
+
+	/** Set the input cursor to the position indicated by index.  This is
+	 *  normally used to seek ahead in the input stream.  No buffering is
+	 *  required to do this unless you know your stream will use seek to
+	 *  move backwards such as when backtracking.
+	 *
+	 *  This is different from rewind in its multi-directional
+	 *  requirement and in that its argument is strictly an input cursor (index).
+	 *
+	 *  For char streams, seeking forward must update the stream state such
+	 *  as line number.  For seeking backwards, you will be presumably
+	 *  backtracking using the mark/rewind mechanism that restores state and
+	 *  so this method does not need to update state when seeking backwards.
+	 *
+	 *  Currently, this method is only used for efficient backtracking using
+	 *  memoization, but in the future it may be used for incremental parsing.
+	 *
+	 *  The index is 0..n-1.  A seek to position i means that LA(1) will
+	 *  return the ith symbol.  So, seeking to 0 means LA(1) will return the
+	 *  first element in the stream. 
+	 */
+	void seek(int index);
+
+	/** Only makes sense for streams that buffer everything up probably, but
+	 *  might be useful to display the entire stream or for testing.  This
+	 *  value includes a single EOF.
+	 */
+	int size();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/Lexer.java b/runtime/Java/src/org/antlr/runtime/Lexer.java
new file mode 100644
index 0000000..9f069a8
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/Lexer.java
@@ -0,0 +1,345 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A lexer is recognizer that draws input symbols from a character stream.
+ *  lexer grammars result in a subclass of this object. A Lexer object
+ *  uses simplified match() and error recovery mechanisms in the interest
+ *  of speed.
+ */
+public abstract class Lexer extends BaseRecognizer implements TokenSource {
+	/** Where is the lexer drawing characters from? */
+    protected CharStream input;
+
+	/** The goal of all lexer rules/methods is to create a token object.
+	 *  This is an instance variable as multiple rules may collaborate to
+	 *  create a single token.  nextToken will return this object after
+	 *  matching lexer rule(s).  If you subclass to allow multiple token
+	 *  emissions, then set this to the last token to be matched or
+	 *  something nonnull so that the auto token emit mechanism will not
+	 *  emit another token.
+	 */
+    protected Token token;
+
+	/** What character index in the stream did the current token start at?
+	 *  Needed, for example, to get the text for current token.  Set at
+	 *  the start of nextToken.
+ 	 */
+	protected int tokenStartCharIndex = -1;
+
+	/** The line on which the first character of the token resides */
+	protected int tokenStartLine;
+
+	/** The character position of first character within the line */
+	protected int tokenStartCharPositionInLine;
+
+	/** The channel number for the current token */
+	protected int channel;
+
+	/** The token type for the current token */
+	protected int type;
+
+	/** You can set the text for the current token to override what is in
+	 *  the input char buffer.  Use setText() or can set this instance var.
+ 	 */
+	protected String text;
+
+	public Lexer() {
+	}
+
+	public Lexer(CharStream input) {
+		this.input = input;
+	}
+
+	public void reset() {
+		super.reset(); // reset all recognizer state variables
+		// wack Lexer state variables
+		token = null;
+		type = Token.INVALID_TOKEN_TYPE;
+		channel = Token.DEFAULT_CHANNEL;
+		tokenStartCharIndex = -1;
+		tokenStartCharPositionInLine = -1;
+		tokenStartLine = -1;
+		text = null;
+		if ( input!=null ) {
+			input.seek(0); // rewind the input
+		}
+	}
+
+	/** Return a token from this source; i.e., match a token on the char
+	 *  stream.
+	 */
+    public Token nextToken() {
+		while (true) {
+			token = null;
+			channel = Token.DEFAULT_CHANNEL;
+			tokenStartCharIndex = input.index();
+			tokenStartCharPositionInLine = input.getCharPositionInLine();
+			tokenStartLine = input.getLine();
+			text = null;
+			if ( input.LA(1)==CharStream.EOF ) {
+                return Token.EOF_TOKEN;
+            }
+            try {
+                mTokens();
+				if ( token==null ) {
+					emit();
+				}
+				else if ( token==Token.SKIP_TOKEN ) {
+					continue;
+				}
+				return token;
+			}
+            catch (RecognitionException re) {
+                reportError(re);
+                recover(re);
+            }
+        }
+    }
+
+	/** Instruct the lexer to skip creating a token for current lexer rule
+	 *  and look for another token.  nextToken() knows to keep looking when
+	 *  a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
+	 *  if token==null at end of any token rule, it creates one for you
+	 *  and emits it.
+	 */
+	public void skip() {
+		token = Token.SKIP_TOKEN;
+	}
+
+	/** This is the lexer entry point that sets instance var 'token' */
+	public abstract void mTokens() throws RecognitionException;
+
+	/** Set the char stream and reset the lexer */
+	public void setCharStream(CharStream input) {
+		this.input = null;
+		reset();
+		this.input = input;
+	}
+
+	/** Currently does not support multiple emits per nextToken invocation
+	 *  for efficiency reasons.  Subclass and override this method and
+	 *  nextToken (to push tokens into a list and pull from that list rather
+	 *  than a single variable as this implementation does).
+	 */
+	public void emit(Token token) {
+		this.token = token;
+	}
+
+	/** The standard method called to automatically emit a token at the
+	 *  outermost lexical rule.  The token object should point into the
+	 *  char buffer start..stop.  If there is a text override in 'text',
+	 *  use that to set the token's text.  Override this method to emit
+	 *  custom Token objects.
+	 */
+	public Token emit() {
+		Token t = new CommonToken(input, type, channel, tokenStartCharIndex, getCharIndex()-1);
+		t.setLine(tokenStartLine);
+		t.setText(text);
+		t.setCharPositionInLine(tokenStartCharPositionInLine);
+		emit(t);
+		return t;
+	}
+
+	public void match(String s) throws MismatchedTokenException {
+        int i = 0;
+        while ( i<s.length() ) {
+            if ( input.LA(1)!=s.charAt(i) ) {
+				if ( backtracking>0 ) {
+					failed = true;
+					return;
+				}
+				MismatchedTokenException mte =
+					new MismatchedTokenException(s.charAt(i), input);
+				recover(mte);
+				throw mte;
+            }
+            i++;
+            input.consume();
+			failed = false;
+        }
+    }
+
+    public void matchAny() {
+        input.consume();
+    }
+
+    public void match(int c) throws MismatchedTokenException {
+        if ( input.LA(1)!=c ) {
+			if ( backtracking>0 ) {
+				failed = true;
+				return;
+			}
+			MismatchedTokenException mte =
+				new MismatchedTokenException(c, input);
+			recover(mte);
+			throw mte;
+        }
+        input.consume();
+		failed = false;
+    }
+
+    public void matchRange(int a, int b)
+		throws MismatchedRangeException
+	{
+        if ( input.LA(1)<a || input.LA(1)>b ) {
+			if ( backtracking>0 ) {
+				failed = true;
+				return;
+			}
+            MismatchedRangeException mre =
+				new MismatchedRangeException(a,b,input);
+			recover(mre);
+			throw mre;
+        }
+        input.consume();
+		failed = false;
+    }
+
+    public int getLine() {
+        return input.getLine();
+    }
+
+    public int getCharPositionInLine() {
+        return input.getCharPositionInLine();
+    }
+
+	/** What is the index of the current character of lookahead? */
+	public int getCharIndex() {
+		return input.index();
+	}
+
+	/** Return the text matched so far for the current token or any
+	 *  text override.
+	 */
+	public String getText() {
+		if ( text!=null ) {
+			return text;
+		}
+		return input.substring(tokenStartCharIndex,getCharIndex()-1);
+	}
+
+	/** Set the complete text of this token; it wipes any previous
+	 *  changes to the text.
+	 */
+	public void setText(String text) {
+		this.text = text;
+	}
+
+	public void reportError(RecognitionException e) {
+		/** TODO: not thought about recovery in lexer yet.
+		 *
+		// if we've already reported an error and have not matched a token
+		// yet successfully, don't report any errors.
+		if ( errorRecovery ) {
+			//System.err.print("[SPURIOUS] ");
+			return;
+		}
+		errorRecovery = true;
+		 */
+
+		displayRecognitionError(this.getTokenNames(), e);
+	}
+
+	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
+		String msg = null;
+		if ( e instanceof MismatchedTokenException ) {
+			MismatchedTokenException mte = (MismatchedTokenException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting "+getCharErrorDisplay(mte.expecting);
+		}
+		else if ( e instanceof NoViableAltException ) {
+			NoViableAltException nvae = (NoViableAltException)e;
+			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+			// and "(decision="+nvae.decisionNumber+") and
+			// "state "+nvae.stateNumber
+			msg = "no viable alternative at character "+getCharErrorDisplay(e.c);
+		}
+		else if ( e instanceof EarlyExitException ) {
+			EarlyExitException eee = (EarlyExitException)e;
+			// for development, can add "(decision="+eee.decisionNumber+")"
+			msg = "required (...)+ loop did not match anything at character "+getCharErrorDisplay(e.c);
+		}
+		else if ( e instanceof MismatchedNotSetException ) {
+			MismatchedNotSetException mse = (MismatchedNotSetException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof MismatchedSetException ) {
+			MismatchedSetException mse = (MismatchedSetException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof MismatchedRangeException ) {
+			MismatchedRangeException mre = (MismatchedRangeException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+
+				getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b);
+		}
+		else {
+			msg = super.getErrorMessage(e, tokenNames);
+		}
+		return msg;
+	}
+
+	public String getCharErrorDisplay(int c) {
+		String s = String.valueOf((char)c);
+		switch ( c ) {
+			case Token.EOF :
+				s = "<EOF>";
+				break;
+			case '\n' :
+				s = "\\n";
+				break;
+			case '\t' :
+				s = "\\t";
+				break;
+			case '\r' :
+				s = "\\r";
+				break;
+		}
+		return "'"+s+"'";
+	}
+
+	/** Lexers can normally match any char in it's vocabulary after matching
+	 *  a token, so do the easy thing and just kill a character and hope
+	 *  it all works out.  You can instead use the rule invocation stack
+	 *  to do sophisticated error recovery if you are in a fragment rule.
+	 */
+	public void recover(RecognitionException re) {
+		//System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
+		//re.printStackTrace();
+		input.consume();
+	}
+
+	public void traceIn(String ruleName, int ruleIndex)  {
+		String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
+		super.traceIn(ruleName, ruleIndex, inputSymbol);
+	}
+
+	public void traceOut(String ruleName, int ruleIndex)  {
+		String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
+		super.traceOut(ruleName, ruleIndex, inputSymbol);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java b/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
new file mode 100644
index 0000000..373b123
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
@@ -0,0 +1,41 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+public class MismatchedNotSetException extends MismatchedSetException {
+	/** Used for remote debugger deserialization */
+	public MismatchedNotSetException() {;}
+
+	public MismatchedNotSetException(BitSet expecting, IntStream input) {
+		super(expecting, input);
+	}
+
+	public String toString() {
+		return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java b/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java
new file mode 100644
index 0000000..b048aaf
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java
@@ -0,0 +1,42 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+public class MismatchedRangeException extends RecognitionException {
+	public int a,b;
+
+	public MismatchedRangeException(int a, int b, IntStream input) {
+		super(input);
+		this.a = a;
+		this.b = b;
+	}
+
+	public String toString() {
+		return "MismatchedNotSetException("+getUnexpectedType()+" not in ["+a+","+b+"])";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedSetException.java b/runtime/Java/src/org/antlr/runtime/MismatchedSetException.java
new file mode 100644
index 0000000..5794b08
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/MismatchedSetException.java
@@ -0,0 +1,44 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+public class MismatchedSetException extends RecognitionException {
+	public BitSet expecting;
+
+	/** Used for remote debugger deserialization */
+	public MismatchedSetException() {;}
+
+	public MismatchedSetException(BitSet expecting, IntStream input) {
+		super(input);
+		this.expecting = expecting;
+	}
+
+	public String toString() {
+		return "MismatchedSetException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java b/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
new file mode 100644
index 0000000..97a7d34
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
@@ -0,0 +1,44 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+public class MismatchedTokenException extends RecognitionException {
+	public int expecting;
+
+	public MismatchedTokenException() {
+	}
+
+	public MismatchedTokenException(int expecting, IntStream input) {
+		super(input);
+		this.expecting = expecting;
+	}
+
+	public String toString() {
+		return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTreeNodeException.java b/runtime/Java/src/org/antlr/runtime/MismatchedTreeNodeException.java
new file mode 100644
index 0000000..e3d223a
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/MismatchedTreeNodeException.java
@@ -0,0 +1,22 @@
+package org.antlr.runtime;
+
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.runtime.tree.Tree;
+
+/**
+ */
+public class MismatchedTreeNodeException extends RecognitionException {
+	public int expecting;
+
+	public MismatchedTreeNodeException() {
+	}
+
+	public MismatchedTreeNodeException(int expecting, TreeNodeStream input) {
+		super(input);
+		this.expecting = expecting;
+	}
+
+	public String toString() {
+		return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/NoViableAltException.java b/runtime/Java/src/org/antlr/runtime/NoViableAltException.java
new file mode 100644
index 0000000..02653ae
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/NoViableAltException.java
@@ -0,0 +1,52 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+public class NoViableAltException extends RecognitionException {
+	public String grammarDecisionDescription;
+	public int decisionNumber;
+	public int stateNumber;
+
+	/** Used for remote debugger deserialization */
+	public NoViableAltException() {;}
+	
+	public NoViableAltException(String grammarDecisionDescription,
+								int decisionNumber,
+								int stateNumber,
+								IntStream input)
+	{
+		super(input);
+		this.grammarDecisionDescription = grammarDecisionDescription;
+		this.decisionNumber = decisionNumber;
+		this.stateNumber = stateNumber;
+	}
+
+	public String toString() {
+		return "NoViableAltException("+getUnexpectedType()+"!=["+grammarDecisionDescription+"])";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/Parser.java b/runtime/Java/src/org/antlr/runtime/Parser.java
new file mode 100644
index 0000000..1000a52
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/Parser.java
@@ -0,0 +1,65 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A parser for TokenStreams.  "parser grammars" result in a subclass
+ *  of this.
+ */
+public class Parser extends BaseRecognizer {
+    protected TokenStream input;
+
+	public Parser(TokenStream input) {
+        setTokenStream(input);
+    }
+
+	public void reset() {
+		super.reset(); // reset all recognizer state variables
+		if ( input!=null ) {
+			input.seek(0); // rewind the input
+		}
+	}
+
+	/** Set the token stream and reset the parser */
+	public void setTokenStream(TokenStream input) {
+		this.input = null;
+		reset();
+		this.input = input;
+	}
+
+    public TokenStream getTokenStream() {
+		return input;
+	}
+
+	public void traceIn(String ruleName, int ruleIndex)  {
+		super.traceIn(ruleName, ruleIndex, input.LT(1));
+	}
+
+	public void traceOut(String ruleName, int ruleIndex)  {
+		super.traceOut(ruleName, ruleIndex, input.LT(1));
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/ParserRuleReturnScope.java b/runtime/Java/src/org/antlr/runtime/ParserRuleReturnScope.java
new file mode 100644
index 0000000..9299db9
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/ParserRuleReturnScope.java
@@ -0,0 +1,49 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** Rules that return more than a single value must return an object
+ *  containing all the values.  Besides the properties defined in
+ *  RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
+ *  return values.  This class simply defines the minimum properties that
+ *  are always defined and methods to access the others that might be
+ *  available depending on output option such as template and tree.
+ *
+ *  Note text is not an actual property of the return value, it is computed
+ *  from start and stop using the input stream's toString() method.  I
+ *  could add a ctor to this so that we can pass in and store the input
+ *  stream, but I'm not sure we want to do that.  It would seem to be undefined
+ *  to get the .text property anyway if the rule matches tokens from multiple
+ *  input streams.
+ *
+ *  I do not use getters for fields of objects that are used simply to
+ *  group values such as this aggregate.
+ */
+public class ParserRuleReturnScope extends RuleReturnScope {
+	public Token start, stop;
+}
diff --git a/runtime/Java/src/org/antlr/runtime/RecognitionException.java b/runtime/Java/src/org/antlr/runtime/RecognitionException.java
new file mode 100644
index 0000000..4ca52ba
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/RecognitionException.java
@@ -0,0 +1,180 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import org.antlr.runtime.tree.*;
+
+/** The root of the ANTLR exception hierarchy.
+ *
+ *  To avoid English-only error messages and to generally make things
+ *  as flexible as possible, these exceptions are not created with strings,
+ *  but rather the information necessary to generate an error.  Then
+ *  the various reporting methods in Parser and Lexer can be overridden
+ *  to generate a localized error message.  For example, MismatchedToken
+ *  exceptions are built with the expected token type.
+ *  So, don't expect getMessage() to return anything.
+ *
+ *  Note that as of Java 1.4, you can access the stack trace, which means
+ *  that you can compute the complete trace of rules from the start symbol.
+ *  This gives you considerable context information with which to generate
+ *  useful error messages.
+ *
+ *  ANTLR generates code that throws exceptions upon recognition error and
+ *  also generates code to catch these exceptions in each rule.  If you
+ *  want to quit upon first error, you can turn off the automatic error
+ *  handling mechanism using rulecatch action, but you still need to
+ *  override methods mismatch and recoverFromMismatchSet.
+ *
+ *  In general, the recognition exceptions can track where in a grammar a
+ *  problem occurred and/or what was the expected input.  While the parser
+ *  knows its state (such as current input symbol and line info) that
+ *  state can change before the exception is reported so current token index
+ *  is computed and stored at exception time.  From this info, you can
+ *  perhaps print an entire line of input not just a single token, for example.
+ *  Better to just say the recognizer had a problem and then let the parser
+ *  figure out a fancy report.
+ */
+public class RecognitionException extends Exception {
+	/** What input stream did the error occur in? */
+	public transient IntStream input;
+
+	/** What is index of token/char were we looking at when the error occurred? */
+	public int index;
+
+	/** The current Token when an error occurred.  Since not all streams
+	 *  can retrieve the ith Token, we have to track the Token object.
+	 *  For parsers.  Even when it's a tree parser, token might be set.
+	 */
+	public Token token;
+
+	/** If this is a tree parser exception, node is set to the node with
+	 *  the problem.
+	 */
+	public Object node;
+
+	/** The current char when an error occurred. For lexers. */
+	public int c;
+
+	/** Track the line at which the error occurred in case this is
+	 *  generated from a lexer.  We need to track this since the
+	 *  unexpected char doesn't carry the line info.
+	 */
+	public int line;
+
+	public int charPositionInLine;
+
+	/** If you are parsing a tree node stream, you will encounter som
+	 *  imaginary nodes w/o line/col info.  We now search backwards looking
+	 *  for most recent token with line/col info, but notify getErrorHeader()
+	 *  that info is approximate.
+	 */
+	public boolean approximateLineInfo;
+
+	/** Used for remote debugger deserialization */
+	public RecognitionException() {
+	}
+
+	public RecognitionException(IntStream input) {
+		this.input = input;
+		this.index = input.index();
+		if ( input instanceof TokenStream ) {
+			this.token = ((TokenStream)input).LT(1);
+			this.line = token.getLine();
+			this.charPositionInLine = token.getCharPositionInLine();
+		}
+		if ( input instanceof TreeNodeStream ) {
+			extractInformationFromTreeNodeStream(input);
+		}
+		else if ( input instanceof CharStream ) {
+			this.c = input.LA(1);
+			this.line = ((CharStream)input).getLine();
+			this.charPositionInLine = ((CharStream)input).getCharPositionInLine();
+		}
+		else {
+			this.c = input.LA(1);
+		}
+	}
+
+	protected void extractInformationFromTreeNodeStream(IntStream input) {
+		TreeNodeStream nodes = (TreeNodeStream)input;
+		this.node = nodes.LT(1);
+		TreeAdaptor adaptor = nodes.getTreeAdaptor();
+		Token payload = adaptor.getToken(node);
+		if ( payload!=null ) {
+			this.token = payload;
+			if ( payload.getLine()<= 0 ) {
+				// imaginary node; no line/pos info; scan backwards
+				int i = -1;
+				Object priorNode = nodes.LT(i);
+				while ( priorNode!=null ) {
+					Token priorPayload = adaptor.getToken(priorNode);
+					if ( priorPayload!=null && priorPayload.getLine()>0 ) {
+						// we found the most recent real line / pos info
+						this.line = priorPayload.getLine();
+						this.charPositionInLine = priorPayload.getCharPositionInLine();
+						this.approximateLineInfo = true;
+						break;
+					}
+					--i;
+					priorNode = nodes.LT(i);
+				}
+			}
+			else { // node created from real token
+				this.line = payload.getLine();
+				this.charPositionInLine = payload.getCharPositionInLine();
+			}
+		}
+		else if ( this.node instanceof Tree) {
+			this.line = ((Tree)this.node).getLine();
+			this.charPositionInLine = ((Tree)this.node).getCharPositionInLine();
+			if ( this.node instanceof CommonTree) {
+				this.token = ((CommonTree)this.node).token;
+			}
+		}
+		else {
+			int type = adaptor.getType(this.node);
+			String text = adaptor.getText(this.node);
+			this.token = new CommonToken(type, text);
+		}
+	}
+
+	/** Return the token type or char of the unexpected input element */
+	public int getUnexpectedType() {
+		if ( input instanceof TokenStream ) {
+			return token.getType();
+		}
+		else if ( input instanceof TreeNodeStream ) {
+			TreeNodeStream nodes = (TreeNodeStream)input;
+			TreeAdaptor adaptor = nodes.getTreeAdaptor();
+			return adaptor.getType(node);
+		}
+		else {
+			return c;
+		}
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/RuleReturnScope.java b/runtime/Java/src/org/antlr/runtime/RuleReturnScope.java
new file mode 100644
index 0000000..cb997c0
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/RuleReturnScope.java
@@ -0,0 +1,15 @@
+package org.antlr.runtime;
+
+/** Rules can return start/stop info as well as possible trees and templates */
+public class RuleReturnScope {
+	/** Return the start token or tree */
+	public Object getStart() { return null; }
+	/** Return the stop token or tree */
+	public Object getStop() { return null; }
+	/** Has a value potentially if output=AST; */
+	public Object getTree() { return null; }
+	/** Has a value potentially if output=template; Don't use StringTemplate
+	 *  type as it then causes a dependency with ST lib.
+	 */
+	public Object getTemplate() { return null; }
+}
diff --git a/runtime/Java/src/org/antlr/runtime/Token.java b/runtime/Java/src/org/antlr/runtime/Token.java
new file mode 100644
index 0000000..d765459
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/Token.java
@@ -0,0 +1,84 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+public interface Token {
+	public static final int EOR_TOKEN_TYPE = 1;
+
+	/** imaginary tree navigation type; traverse "get child" link */
+	public static final int DOWN = 2;
+	/** imaginary tree navigation type; finish with a child list */
+	public static final int UP = 3;
+
+	public static final int MIN_TOKEN_TYPE = UP+1;
+
+    public static final int EOF = CharStream.EOF;
+	public static final Token EOF_TOKEN = new CommonToken(EOF);
+	
+	public static final int INVALID_TOKEN_TYPE = 0;
+	public static final Token INVALID_TOKEN = new CommonToken(INVALID_TOKEN_TYPE);
+
+	/** In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
+	 *  will avoid creating a token for this symbol and try to fetch another.
+	 */
+	public static final Token SKIP_TOKEN = new CommonToken(INVALID_TOKEN_TYPE);
+
+	/** All tokens go to the parser (unless skip() is called in that rule)
+	 *  on a particular "channel".  The parser tunes to a particular channel
+	 *  so that whitespace etc... can go to the parser on a "hidden" channel.
+	 */
+	public static final int DEFAULT_CHANNEL = 0;
+	
+	/** Anything on different channel than DEFAULT_CHANNEL is not parsed
+	 *  by parser.
+	 */
+	public static final int HIDDEN_CHANNEL = 99;
+
+	/** Get the text of the token */
+	public abstract String getText();
+	public abstract void setText(String text);
+
+	public abstract int getType();
+	public abstract void setType(int ttype);
+	/**  The line number on which this token was matched; line=1..n */
+	public abstract int getLine();
+    public abstract void setLine(int line);
+
+	/** The index of the first character relative to the beginning of the line 0..n-1 */
+	public abstract int getCharPositionInLine();
+	public abstract void setCharPositionInLine(int pos);
+
+	public abstract int getChannel();
+	public abstract void setChannel(int channel);
+
+	/** An index from 0..n-1 of the token object in the input stream.
+	 *  This must be valid in order to use the ANTLRWorks debugger.
+	 */
+	public abstract int getTokenIndex();
+	public abstract void setTokenIndex(int index);
+}
diff --git a/runtime/Java/src/org/antlr/runtime/TokenRewriteStream.java b/runtime/Java/src/org/antlr/runtime/TokenRewriteStream.java
new file mode 100644
index 0000000..55de81c
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/TokenRewriteStream.java
@@ -0,0 +1,512 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+import java.util.*;
+
+/** Useful for dumping out the input stream after doing some
+ *  augmentation or other manipulations.
+ *
+ *  You can insert stuff, replace, and delete chunks.  Note that the
+ *  operations are done lazily--only if you convert the buffer to a
+ *  String.  This is very efficient because you are not moving data around
+ *  all the time.  As the buffer of tokens is converted to strings, the
+ *  toString() method(s) check to see if there is an operation at the
+ *  current index.  If so, the operation is done and then normal String
+ *  rendering continues on the buffer.  This is like having multiple Turing
+ *  machine instruction streams (programs) operating on a single input tape. :)
+ *
+ *  Since the operations are done lazily at toString-time, operations do not
+ *  screw up the token index values.  That is, an insert operation at token
+ *  index i does not change the index values for tokens i+1..n-1.
+ *
+ *  Because operations never actually alter the buffer, you may always get
+ *  the original token stream back without undoing anything.  Since
+ *  the instructions are queued up, you can easily simulate transactions and
+ *  roll back any changes if there is an error just by removing instructions.
+ *  For example,
+ *
+ *   CharStream input = new ANTLRFileStream("input");
+ *   TLexer lex = new TLexer(input);
+ *   TokenRewriteStream tokens = new TokenRewriteStream(lex);
+ *   T parser = new T(tokens);
+ *   parser.startRule();
+ *
+ * 	 Then in the rules, you can execute
+ *      Token t,u;
+ *      ...
+ *      input.insertAfter(t, "text to put after t");}
+ * 		input.insertAfter(u, "text after u");}
+ * 		System.out.println(tokens.toString());
+ *
+ *  Actually, you have to cast the 'input' to a TokenRewriteStream. :(
+ *
+ *  You can also have multiple "instruction streams" and get multiple
+ *  rewrites from a single pass over the input.  Just name the instruction
+ *  streams and use that name again when printing the buffer.  This could be
+ *  useful for generating a C file and also its header file--all from the
+ *  same buffer:
+ *
+ *      tokens.insertAfter("pass1", t, "text to put after t");}
+ * 		tokens.insertAfter("pass2", u, "text after u");}
+ * 		System.out.println(tokens.toString("pass1"));
+ * 		System.out.println(tokens.toString("pass2"));
+ *
+ *  If you don't use named rewrite streams, a "default" stream is used as
+ *  the first example shows.
+ */
+public class TokenRewriteStream extends CommonTokenStream {
+	public static final String DEFAULT_PROGRAM_NAME = "default";
+    public static final int PROGRAM_INIT_SIZE = 100;
+	public static final int MIN_TOKEN_INDEX = 0;
+
+	// Define the rewrite operation hierarchy
+
+	static class RewriteOperation {
+		protected int index;
+		protected Object text;
+		protected RewriteOperation(int index, Object text) {
+			this.index = index;
+			this.text = text;
+		}
+		/** Execute the rewrite operation by possibly adding to the buffer.
+		 *  Return the index of the next token to operate on.
+		 */
+		public int execute(StringBuffer buf) {
+			return index;
+		}
+		public String toString() {
+			String opName = getClass().getName();
+			int $index = opName.indexOf('$');
+			opName = opName.substring($index+1, opName.length());
+			return opName+"@"+index+'"'+text+'"';
+		}
+	}
+
+	static class InsertBeforeOp extends RewriteOperation {
+		public InsertBeforeOp(int index, Object text) {
+			super(index,text);
+		}
+		public int execute(StringBuffer buf) {
+			buf.append(text);
+			return index;
+		}
+	}
+
+	/** TODO: make insertAfters append after each other.
+	static class InsertAfterOp extends InsertBeforeOp {
+		public InsertAfterOp(int index, String text) {
+			super(index,text);
+		}
+	}
+	 */
+
+	/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+	 *  instructions.
+	 */
+	static class ReplaceOp extends RewriteOperation {
+		protected int lastIndex;
+		public ReplaceOp(int from, int to, Object text) {
+			super(from,text);
+			lastIndex = to;
+		}
+		public int execute(StringBuffer buf) {
+			if ( text!=null ) {
+				buf.append(text);
+			}
+			return lastIndex+1;
+		}
+	}
+
+	static class DeleteOp extends ReplaceOp {
+		public DeleteOp(int from, int to) {
+			super(from, to, null);
+		}
+	}
+
+	/** You may have multiple, named streams of rewrite operations.
+	 *  I'm calling these things "programs."
+	 *  Maps String (name) -> rewrite (List)
+	 */
+	protected Map programs = null;
+
+	/** Map String (program name) -> Integer index */
+	protected Map lastRewriteTokenIndexes = null;
+
+	public TokenRewriteStream() {
+		init();
+	}
+
+	protected void init() {
+		programs = new HashMap();
+		programs.put(DEFAULT_PROGRAM_NAME, new ArrayList(PROGRAM_INIT_SIZE));
+		lastRewriteTokenIndexes = new HashMap();
+	}
+
+	public TokenRewriteStream(TokenSource tokenSource) {
+	    super(tokenSource);
+		init();
+	}
+
+	public TokenRewriteStream(TokenSource tokenSource, int channel) {
+		super(tokenSource, channel);
+		init();
+	}
+
+	public void rollback(int instructionIndex) {
+		rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
+	}
+
+	/** Rollback the instruction stream for a program so that
+	 *  the indicated instruction (via instructionIndex) is no
+	 *  longer in the stream.  UNTESTED!
+	 */
+	public void rollback(String programName, int instructionIndex) {
+		List is = (List)programs.get(programName);
+		if ( is!=null ) {
+			programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex));
+		}
+	}
+
+	public void deleteProgram() {
+		deleteProgram(DEFAULT_PROGRAM_NAME);
+	}
+
+	/** Reset the program so that no instructions exist */
+	public void deleteProgram(String programName) {
+		rollback(programName, MIN_TOKEN_INDEX);
+	}
+
+	/** If op.index > lastRewriteTokenIndexes, just add to the end.
+	 *  Otherwise, do linear */
+	protected void addToSortedRewriteList(RewriteOperation op) {
+		addToSortedRewriteList(DEFAULT_PROGRAM_NAME, op);
+	}
+
+	/** Add an instruction to the rewrite instruction list ordered by
+	 *  the instruction number (use a binary search for efficiency).
+	 *  The list is ordered so that toString() can be done efficiently.
+	 *
+	 *  When there are multiple instructions at the same index, the instructions
+	 *  must be ordered to ensure proper behavior.  For example, a delete at
+	 *  index i must kill any replace operation at i.  Insert-before operations
+	 *  must come before any replace / delete instructions.  If there are
+	 *  multiple insert instructions for a single index, they are done in
+	 *  reverse insertion order so that "insert foo" then "insert bar" yields
+	 *  "foobar" in front rather than "barfoo".  This is convenient because
+	 *  I can insert new InsertOp instructions at the index returned by
+	 *  the binary search.  A ReplaceOp kills any previous replace op.  Since
+	 *  delete is the same as replace with null text, i can check for
+	 *  ReplaceOp and cover DeleteOp at same time. :)
+	 */
+	protected void addToSortedRewriteList(String programName, RewriteOperation op) {
+		List rewrites = getProgram(programName);
+		//System.out.println("### add "+op+"; rewrites="+rewrites);
+		Comparator comparator = new Comparator() {
+			public int compare(Object o, Object o1) {
+				RewriteOperation a = (RewriteOperation)o;
+				RewriteOperation b = (RewriteOperation)o1;
+				if ( a.index<b.index ) return -1;
+				if ( a.index>b.index ) return 1;
+				return 0;
+			}
+		};
+        int pos = Collections.binarySearch(rewrites, op, comparator);
+		//System.out.println("bin search returns: pos="+pos);
+
+		if ( pos>=0 ) {
+			// binarySearch does not guarantee first element when multiple
+			// are found.  I must seach backwards for first op with op.index
+			for (; pos>=0; pos--) {
+				RewriteOperation prevOp = (RewriteOperation)rewrites.get(pos);
+				if ( prevOp.index<op.index ) {
+					break;
+				}
+			}
+			pos++; // pos points at first op before ops with op.index; go back up one
+			// now pos is the index in rewrites of first op with op.index
+			//System.out.println("first op with op.index: pos="+pos);
+
+			// an instruction operating already on that index was found;
+			// make this one happen after all the others
+			//System.out.println("found instr for index="+op.index);
+			if ( op instanceof ReplaceOp ) {
+				boolean replaced = false;
+				int i;
+				// look for an existing replace
+				for (i=pos; i<rewrites.size(); i++) {
+					RewriteOperation prevOp = (RewriteOperation)rewrites.get(pos);
+					if ( prevOp.index!=op.index ) {
+						break;
+					}
+					if ( prevOp instanceof ReplaceOp ) {
+						rewrites.set(pos, op); // replace old with new
+						replaced=true;
+						break;
+					}
+					// keep going; must be an insert
+				}
+				if ( !replaced ) {
+					// add replace op to the end of all the inserts
+					rewrites.add(i, op);
+				}
+			}
+			else {
+				// inserts are added in front of existing inserts
+				rewrites.add(pos, op);
+			}
+		}
+		else {
+			//System.out.println("no instruction at pos=="+pos);
+			rewrites.add(-pos-1, op);
+		}
+		//System.out.println("after, rewrites="+rewrites);
+	}
+
+	public void insertAfter(Token t, Object text) {
+		insertAfter(DEFAULT_PROGRAM_NAME, t, text);
+	}
+
+	public void insertAfter(int index, Object text) {
+		insertAfter(DEFAULT_PROGRAM_NAME, index, text);
+	}
+
+	public void insertAfter(String programName, Token t, Object text) {
+		insertAfter(programName,t.getTokenIndex(), text);
+	}
+
+	public void insertAfter(String programName, int index, Object text) {
+		// to insert after, just insert before next index (even if past end)
+		insertBefore(programName,index+1, text);
+		//addToSortedRewriteList(programName, new InsertAfterOp(index,text));
+	}
+
+	public void insertBefore(Token t, Object text) {
+		insertBefore(DEFAULT_PROGRAM_NAME, t, text);
+	}
+
+	public void insertBefore(int index, Object text) {
+		insertBefore(DEFAULT_PROGRAM_NAME, index, text);
+	}
+
+	public void insertBefore(String programName, Token t, Object text) {
+		insertBefore(programName, t.getTokenIndex(), text);
+	}
+
+	public void insertBefore(String programName, int index, Object text) {
+		addToSortedRewriteList(programName, new InsertBeforeOp(index,text));
+	}
+
+	public void replace(int index, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, index, index, text);
+	}
+
+	public void replace(int from, int to, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, from, to, text);
+	}
+
+	public void replace(Token indexT, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
+	}
+
+	public void replace(Token from, Token to, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, from, to, text);
+	}
+
+	public void replace(String programName, int from, int to, Object text) {
+		if ( from > to || from<0 || to<0 ) {
+			return;
+		}
+		addToSortedRewriteList(programName, new ReplaceOp(from, to, text));
+		/*
+		// replace from..to by deleting from..to-1 and then do a replace
+		// on last index
+		for (int i=from; i<to; i++) {
+			addToSortedRewriteList(new DeleteOp(i,i));
+		}
+		addToSortedRewriteList(new ReplaceOp(to, to, text));
+		*/
+	}
+
+	public void replace(String programName, Token from, Token to, Object text) {
+		replace(programName,
+				from.getTokenIndex(),
+				to.getTokenIndex(),
+				text);
+	}
+
+	public void delete(int index) {
+		delete(DEFAULT_PROGRAM_NAME, index, index);
+	}
+
+	public void delete(int from, int to) {
+		delete(DEFAULT_PROGRAM_NAME, from, to);
+	}
+
+	public void delete(Token indexT) {
+		delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
+	}
+
+	public void delete(Token from, Token to) {
+		delete(DEFAULT_PROGRAM_NAME, from, to);
+	}
+
+	public void delete(String programName, int from, int to) {
+		replace(programName,from,to,null);
+	}
+
+	public void delete(String programName, Token from, Token to) {
+		replace(programName,from,to,null);
+	}
+
+	public int getLastRewriteTokenIndex() {
+		return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
+	}
+
+	protected int getLastRewriteTokenIndex(String programName) {
+		Integer I = (Integer)lastRewriteTokenIndexes.get(programName);
+		if ( I==null ) {
+			return -1;
+		}
+		return I.intValue();
+	}
+
+	protected void setLastRewriteTokenIndex(String programName, int i) {
+		lastRewriteTokenIndexes.put(programName, new Integer(i));
+	}
+
+	protected List getProgram(String name) {
+		List is = (List)programs.get(name);
+		if ( is==null ) {
+			is = initializeProgram(name);
+		}
+		return is;
+	}
+
+	private List initializeProgram(String name) {
+		List is = new ArrayList(PROGRAM_INIT_SIZE);
+		programs.put(name, is);
+		return is;
+	}
+
+	public String toOriginalString() {
+		return toOriginalString(MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toOriginalString(int start, int end) {
+		StringBuffer buf = new StringBuffer();
+		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
+			buf.append(get(i).getText());
+		}
+		return buf.toString();
+	}
+
+	public String toString() {
+		return toString(MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toString(String programName) {
+		return toString(programName, MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toString(int start, int end) {
+		return toString(DEFAULT_PROGRAM_NAME, start, end);
+	}
+
+	public String toString(String programName, int start, int end) {
+		List rewrites = (List)programs.get(programName);
+		if ( rewrites==null || rewrites.size()==0 ) {
+			return toOriginalString(start,end); // no instructions to execute
+		}
+		StringBuffer buf = new StringBuffer();
+
+		/// Index of first rewrite we have not done
+		int rewriteOpIndex = 0;
+
+		int tokenCursor=start;
+		while ( tokenCursor>=MIN_TOKEN_INDEX &&
+				tokenCursor<=end &&
+				tokenCursor<tokens.size() )
+		{
+			//System.out.println("tokenCursor="+tokenCursor);
+			// execute instructions associated with this token index
+			if ( rewriteOpIndex<rewrites.size() ) {
+				RewriteOperation op =
+						(RewriteOperation)rewrites.get(rewriteOpIndex);
+
+				// skip all ops at lower index
+				while ( op.index<tokenCursor && rewriteOpIndex<rewrites.size() ) {
+					rewriteOpIndex++;
+					if ( rewriteOpIndex<rewrites.size() ) {
+						op = (RewriteOperation)rewrites.get(rewriteOpIndex);
+					}
+				}
+
+				// while we have ops for this token index, exec them
+				while ( tokenCursor==op.index && rewriteOpIndex<rewrites.size() ) {
+					//System.out.println("execute "+op+" at instruction "+rewriteOpIndex);
+					tokenCursor = op.execute(buf);
+					//System.out.println("after execute tokenCursor = "+tokenCursor);
+					rewriteOpIndex++;
+					if ( rewriteOpIndex<rewrites.size() ) {
+						op = (RewriteOperation)rewrites.get(rewriteOpIndex);
+					}
+				}
+			}
+			// dump the token at this index
+			if ( tokenCursor<=end ) {
+				buf.append(get(tokenCursor).getText());
+				tokenCursor++;
+			}
+		}
+		// now see if there are operations (append) beyond last token index
+		for (int opi=rewriteOpIndex; opi<rewrites.size(); opi++) {
+			RewriteOperation op =
+					(RewriteOperation)rewrites.get(opi);
+			if ( op.index>=size() ) {
+				op.execute(buf); // must be insertions if after last token
+			}
+			//System.out.println("execute "+op+" at "+opi);
+			//op.execute(buf); // must be insertions if after last token
+		}
+
+		return buf.toString();
+	}
+
+	public String toDebugString() {
+		return toDebugString(MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toDebugString(int start, int end) {
+		StringBuffer buf = new StringBuffer();
+		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
+			buf.append(get(i));
+		}
+		return buf.toString();
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/TokenSource.java b/runtime/Java/src/org/antlr/runtime/TokenSource.java
new file mode 100644
index 0000000..225c594
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/TokenSource.java
@@ -0,0 +1,49 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A source of tokens must provide a sequence of tokens via nextToken()
+ *  and also must reveal it's source of characters; CommonToken's text is
+ *  computed from a CharStream; it only store indices into the char stream.
+ *
+ *  Errors from the lexer are never passed to the parser.  Either you want
+ *  to keep going or you do not upon token recognition error.  If you do not
+ *  want to continue lexing then you do not want to continue parsing.  Just
+ *  throw an exception not under RecognitionException and Java will naturally
+ *  toss you all the way out of the recognizers.  If you want to continue
+ *  lexing then you should not throw an exception to the parser--it has already
+ *  requested a token.  Keep lexing until you get a valid one.  Just report
+ *  errors and keep going, looking for a valid token.
+ */
+public interface TokenSource {
+	/** Return a Token object from your input stream (usually a CharStream).
+	 *  Do not fail/return upon lexing error; keep chewing on the characters
+	 *  until you get a good one; errors are not passed through to the parser.
+	 */
+	public Token nextToken();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/TokenStream.java b/runtime/Java/src/org/antlr/runtime/TokenStream.java
new file mode 100644
index 0000000..b7c5903
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/TokenStream.java
@@ -0,0 +1,68 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime;
+
+/** A stream of tokens accessing tokens from a TokenSource */
+public interface TokenStream extends IntStream {
+    /** Get Token at current input pointer + i ahead where i=1 is next Token.
+	 *  i<0 indicates tokens in the past.  So -1 is previous token and -2 is
+	 *  two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
+	 *  Return null for LT(0) and any index that results in an absolute address
+	 *  that is negative.
+	 */
+    public Token LT(int k);
+
+	/** Get a token at an absolute index i; 0..n-1.  This is really only
+	 *  needed for profiling and debugging and token stream rewriting.
+	 *  If you don't want to buffer up tokens, then this method makes no
+	 *  sense for you.  Naturally you can't use the rewrite stream feature.
+	 *  I believe DebugTokenStream can easily be altered to not use
+	 *  this method, removing the dependency.
+	 */
+	public Token get(int i);
+
+	/** Where is this stream pulling tokens from?  This is not the name, but
+	 *  the object that provides Token objects.
+	 */
+	public TokenSource getTokenSource();
+
+	/** Return the text of all tokens from start to stop, inclusive.
+	 *  If the stream does not buffer all the tokens then it can just
+	 *  return "" or null;  Users should not access $ruleLabel.text in
+	 *  an action of course in that case.
+	 */
+	public String toString(int start, int stop);
+
+	/** Because the user is not required to use a token with an index stored
+	 *  in it, we must provide a means for two token objects themselves to
+	 *  indicate the start/end location.  Most often this will just delegate
+	 *  to the other toString(int,int).  This is also parallel with
+	 *  the TreeNodeStream.toString(Object,Object).
+	 */
+	public String toString(Token start, Token stop);
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/BlankDebugEventListener.java b/runtime/Java/src/org/antlr/runtime/debug/BlankDebugEventListener.java
new file mode 100755
index 0000000..05b6d28
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/BlankDebugEventListener.java
@@ -0,0 +1,77 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+
+/** A blank listener that does nothing; useful for real classes so
+ *  they don't have to have lots of blank methods and are less
+ *  sensitive to updates to debug interface.
+ */
+public class BlankDebugEventListener implements DebugEventListener {
+	public void enterRule(String ruleName) {}
+	public void exitRule(String ruleName) {}
+	public void enterAlt(int alt) {}
+	public void enterSubRule(int decisionNumber) {}
+	public void exitSubRule(int decisionNumber) {}
+	public void enterDecision(int decisionNumber) {}
+	public void exitDecision(int decisionNumber) {}
+	public void location(int line, int pos) {}
+	public void consumeToken(Token token) {}
+	public void consumeHiddenToken(Token token) {}
+	public void LT(int i, Token t) {}
+	public void mark(int i) {}
+	public void rewind(int i) {}
+	public void rewind() {}
+	public void beginBacktrack(int level) {}
+	public void endBacktrack(int level, boolean successful) {}
+	public void recognitionException(RecognitionException e) {}
+	public void beginResync() {}
+	public void endResync() {}
+	public void semanticPredicate(boolean result, String predicate) {}
+	public void commence() {}
+	public void terminate() {}
+
+	// Tree parsing stuff
+
+	public void consumeNode(Object t) {}
+	public void LT(int i, Object t) {}
+
+	// AST Stuff
+
+	public void nilNode(Object t) {}
+	//public void setSubTreeRoot(String name, int ID) {}
+	public void createNode(Object t) {}
+	public void createNode(Object node, Token token) {}
+	public void becomeRoot(Object newRoot, Object oldRoot) {}
+	public void addChild(Object root, Object child) {}
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {}
+}
+
+
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventHub.java b/runtime/Java/src/org/antlr/runtime/debug/DebugEventHub.java
new file mode 100644
index 0000000..90ee112
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugEventHub.java
@@ -0,0 +1,258 @@
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.RecognitionException;
+
+import java.util.List;
+import java.util.ArrayList;
+
+/** Broadcast debug events to multiple listeners.  Lets you debug and still
+ *  use the event mechanism to build parse trees etc...  Not thread-safe.
+ *  Don't add events in one thread while parser fires events in another.
+ * 
+ *  @see also DebugEventRepeater
+ */
+public class DebugEventHub implements DebugEventListener {
+	protected List listeners = new ArrayList();
+
+	public DebugEventHub(DebugEventListener listener) {
+		listeners.add(listener);
+	}
+
+	public DebugEventHub(DebugEventListener a, DebugEventListener b) {
+		listeners.add(a);
+		listeners.add(b);
+	}
+
+	/** Add another listener to broadcast events too.  Not thread-safe.
+	 *  Don't add events in one thread while parser fires events in another.
+	 */
+	public void addListener(DebugEventListener listener) {
+		listeners.add(listeners);
+	}
+	
+	/* To avoid a mess like this:
+		public void enterRule(final String ruleName) {
+			broadcast(new Code(){
+				public void exec(DebugEventListener listener) {listener.enterRule(ruleName);}}
+				);
+		}
+		I am dup'ing the for-loop in each.  Where are Java closures!? blech!
+	 */
+
+	public void enterRule(String ruleName) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.enterRule(ruleName);
+		}
+	}
+
+	public void exitRule(String ruleName) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.exitRule(ruleName);
+		}
+	}
+
+	public void enterAlt(int alt) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.enterAlt(alt);
+		}
+	}
+
+	public void enterSubRule(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.enterSubRule(decisionNumber);
+		}
+	}
+
+	public void exitSubRule(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.exitSubRule(decisionNumber);
+		}
+	}
+
+	public void enterDecision(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.enterDecision(decisionNumber);
+		}
+	}
+
+	public void exitDecision(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.exitDecision(decisionNumber);
+		}
+	}
+
+	public void location(int line, int pos) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.location(line, pos);
+		}
+	}
+
+	public void consumeToken(Token token) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.consumeToken(token);
+		}
+	}
+
+	public void consumeHiddenToken(Token token) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.consumeHiddenToken(token);
+		}
+	}
+
+	public void LT(int index, Token t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.LT(index, t);
+		}
+	}
+
+	public void mark(int index) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.mark(index);
+		}
+	}
+
+	public void rewind(int index) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.rewind(index);
+		}
+	}
+
+	public void rewind() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.rewind();
+		}
+	}
+
+	public void beginBacktrack(int level) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.beginBacktrack(level);
+		}
+	}
+
+	public void endBacktrack(int level, boolean successful) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.endBacktrack(level, successful);
+		}
+	}
+
+	public void recognitionException(RecognitionException e) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.recognitionException(e);
+		}
+	}
+
+	public void beginResync() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.beginResync();
+		}
+	}
+
+	public void endResync() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.endResync();
+		}
+	}
+
+	public void semanticPredicate(boolean result, String predicate) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.semanticPredicate(result, predicate);
+		}
+	}
+
+	public void commence() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.commence();
+		}
+	}
+
+	public void terminate() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.terminate();
+		}
+	}
+
+
+	// Tree parsing stuff
+
+	public void consumeNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.consumeNode(t);
+		}
+	}
+
+	public void LT(int index, Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.LT(index, t);
+		}
+	}
+
+
+	// AST Stuff
+
+	public void nilNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.nilNode(t);
+		}
+	}
+
+	public void createNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.createNode(t);
+		}
+	}
+
+	public void createNode(Object node, Token token) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.createNode(node, token);
+		}
+	}
+
+	public void becomeRoot(Object newRoot, Object oldRoot) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.becomeRoot(newRoot, oldRoot);
+		}
+	}
+
+	public void addChild(Object root, Object child) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.addChild(root, child);
+		}
+	}
+
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.setTokenBoundaries(t, tokenStartIndex, tokenStopIndex);
+		}
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventListener.java b/runtime/Java/src/org/antlr/runtime/debug/DebugEventListener.java
new file mode 100644
index 0000000..0cd76d8
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugEventListener.java
@@ -0,0 +1,312 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+
+/** All debugging events that a recognizer can trigger.
+ *
+ *  I did not create a separate AST debugging interface as it would create
+ *  lots of extra classes and DebugParser has a dbg var defined, which makes
+ *  it hard to change to ASTDebugEventListener.  I looked hard at this issue
+ *  and it is easier to understand as one monolithic event interface for all
+ *  possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
+ *  for future. 4/26/2006.
+ */
+public interface DebugEventListener {
+	public static final String PROTOCOL_VERSION = "1";
+	
+	/** serialized version of true */
+	public static final int TRUE = 1;
+	public static final int FALSE = 0;
+
+	/** The parser has just entered a rule.  No decision has been made about
+	 *  which alt is predicted.  This is fired AFTER init actions have been
+	 *  executed.  Attributes are defined and available etc...
+	 */
+	public void enterRule(String ruleName);
+
+	/** Because rules can have lots of alternatives, it is very useful to
+	 *  know which alt you are entering.  This is 1..n for n alts.
+	 */
+	public void enterAlt(int alt);
+
+	/** This is the last thing executed before leaving a rule.  It is
+	 *  executed even if an exception is thrown.  This is triggered after
+	 *  error reporting and recovery have occurred (unless the exception is
+	 *  not caught in this rule).  This implies an "exitAlt" event.
+	 */
+	public void exitRule(String ruleName);
+
+	/** Track entry into any (...) subrule other EBNF construct */
+	public void enterSubRule(int decisionNumber);
+
+	public void exitSubRule(int decisionNumber);
+
+	/** Every decision, fixed k or arbitrary, has an enter/exit event
+	 *  so that a GUI can easily track what LT/consume events are
+	 *  associated with prediction.  You will see a single enter/exit
+	 *  subrule but multiple enter/exit decision events, one for each
+	 *  loop iteration.
+	 */
+	public void enterDecision(int decisionNumber);
+
+	public void exitDecision(int decisionNumber);
+
+	/** An input token was consumed; matched by any kind of element.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 */
+	public void consumeToken(Token t);
+
+	/** An off-channel input token was consumed.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 *  (unless of course the hidden token is first stuff in the input stream).
+	 */
+	public void consumeHiddenToken(Token t);
+
+	/** Somebody (anybody) looked ahead.  Note that this actually gets
+	 *  triggered by both LA and LT calls.  The debugger will want to know
+	 *  which Token object was examined.  Like consumeToken, this indicates
+	 *  what token was seen at that depth.  A remote debugger cannot look
+	 *  ahead into a file it doesn't have so LT events must pass the token
+	 *  even if the info is redundant.
+	 */
+	public void LT(int i, Token t);
+
+	/** The parser is going to look arbitrarily ahead; mark this location,
+	 *  the token stream's marker is sent in case you need it.
+	 */
+	public void mark(int marker);
+
+	/** After an arbitrairly long lookahead as with a cyclic DFA (or with
+	 *  any backtrack), this informs the debugger that stream should be
+	 *  rewound to the position associated with marker.
+	 */
+	public void rewind(int marker);
+
+	/** Rewind to the input position of the last marker.
+	 *  Used currently only after a cyclic DFA and just
+	 *  before starting a sem/syn predicate to get the
+	 *  input position back to the start of the decision.
+	 *  Do not "pop" the marker off the state.  mark(i)
+	 *  and rewind(i) should balance still.
+	 */
+	public void rewind();
+
+	public void beginBacktrack(int level);
+
+	public void endBacktrack(int level, boolean successful);
+
+	/** To watch a parser move through the grammar, the parser needs to
+	 *  inform the debugger what line/charPos it is passing in the grammar.
+	 *  For now, this does not know how to switch from one grammar to the
+	 *  other and back for island grammars etc...
+	 *
+	 *  This should also allow breakpoints because the debugger can stop
+	 *  the parser whenever it hits this line/pos.
+	 */
+	public void location(int line, int pos);
+
+	/** A recognition exception occurred such as NoViableAltException.  I made
+	 *  this a generic event so that I can alter the exception hierachy later
+	 *  without having to alter all the debug objects.
+	 *
+	 *  Upon error, the stack of enter rule/subrule must be properly unwound.
+	 *  If no viable alt occurs it is within an enter/exit decision, which
+	 *  also must be rewound.  Even the rewind for each mark must be unwount.
+	 *  In the Java target this is pretty easy using try/finally, if a bit
+	 *  ugly in the generated code.  The rewind is generated in DFA.predict()
+	 *  actually so no code needs to be generated for that.  For languages
+	 *  w/o this "finally" feature (C++?), the target implementor will have
+	 *  to build an event stack or something.
+	 *
+	 *  Across a socket for remote debugging, only the RecognitionException
+	 *  data fields are transmitted.  The token object or whatever that
+	 *  caused the problem was the last object referenced by LT.  The
+	 *  immediately preceding LT event should hold the unexpected Token or
+	 *  char.
+	 *
+	 *  Here is a sample event trace for grammar:
+	 *
+	 *  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
+     *    | D
+     *    ;
+     *
+	 *  The sequence for this rule (with no viable alt in the subrule) for
+	 *  input 'c c' (there are 3 tokens) is:
+	 *
+	 *		commence
+	 *		LT(1)
+	 *		enterRule b
+	 *		location 7 1
+	 *		enter decision 3
+	 *		LT(1)
+	 *		exit decision 3
+	 *		enterAlt1
+	 *		location 7 5
+	 *		LT(1)
+	 *		consumeToken [c/<4>,1:0]
+	 *		location 7 7
+	 *		enterSubRule 2
+	 *		enter decision 2
+	 *		LT(1)
+	 *		LT(1)
+	 *		recognitionException NoViableAltException 2 1 2
+	 *		exit decision 2
+	 *		exitSubRule 2
+	 *		beginResync
+	 *		LT(1)
+	 *		consumeToken [c/<4>,1:1]
+	 *		LT(1)
+	 *		endResync
+	 *		LT(-1)
+	 *		exitRule b
+	 *		terminate
+	 */
+	public void recognitionException(RecognitionException e);
+
+	/** Indicates the recognizer is about to consume tokens to resynchronize
+	 *  the parser.  Any consume events from here until the recovered event
+	 *  are not part of the parse--they are dead tokens.
+	 */
+	public void beginResync();
+
+	/** Indicates that the recognizer has finished consuming tokens in order
+	 *  to resychronize.  There may be multiple beginResync/endResync pairs
+	 *  before the recognizer comes out of errorRecovery mode (in which
+	 *  multiple errors are suppressed).  This will be useful
+	 *  in a gui where you want to probably grey out tokens that are consumed
+	 *  but not matched to anything in grammar.  Anything between
+	 *  a beginResync/endResync pair was tossed out by the parser.
+	 */
+	public void endResync();
+
+	/** A semantic predicate was evaluate with this result and action text */
+	public void semanticPredicate(boolean result, String predicate);
+
+	/** Announce that parsing has begun.  Not technically useful except for
+	 *  sending events over a socket.  A GUI for example will launch a thread
+	 *  to connect and communicate with a remote parser.  The thread will want
+	 *  to notify the GUI when a connection is made.  ANTLR parsers
+	 *  trigger this upon entry to the first rule (the ruleLevel is used to
+	 *  figure this out).
+	 */
+	public void commence();
+
+	/** Parsing is over; successfully or not.  Mostly useful for telling
+	 *  remote debugging listeners that it's time to quit.  When the rule
+	 *  invocation level goes to zero at the end of a rule, we are done
+	 *  parsing.
+	 */
+	public void terminate();
+
+
+	// T r e e  P a r s i n g
+
+	/** Input for a tree parser is an AST, but we know nothing for sure
+	 *  about a node except its type and text (obtained from the adaptor).
+	 *  This is the analog of the consumeToken method.  Again, the ID is
+	 *  the hashCode usually of the node so it only works if hashCode is
+	 *  not implemented.  If the type is UP or DOWN, then
+	 *  the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 * @param t
+	 */
+	public void consumeNode(Object t);
+
+	/** The tree parser lookedahead.  If the type is UP or DOWN,
+	 *  then the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 */
+	public void LT(int i, Object t);
+
+
+	// A S T  E v e n t s
+
+	/** A nil was created (even nil nodes have a unique ID...
+	 *  they are not "null" per se).  As of 4/28/2006, this
+	 *  seems to be uniquely triggered when starting a new subtree
+	 *  such as when entering a subrule in automatic mode and when
+	 *  building a tree in rewrite mode.
+     *
+ 	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	public void nilNode(Object t);
+
+	/** Announce a new node built from token elements such as type etc...
+	 * 
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID, type, text are
+	 *  set.
+	 */
+	public void createNode(Object t);
+
+	/** Announce a new node built from an existing token.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only node.ID and token.tokenIndex
+	 *  are set.
+	 */
+	public void createNode(Object node, Token token);
+
+	/** Make a node the new root of an existing root.  See
+	 *
+	 *  Note: the newRootID parameter is possibly different
+	 *  than the TreeAdaptor.becomeRoot() newRoot parameter.
+	 *  In our case, it will always be the result of calling
+	 *  TreeAdaptor.becomeRoot() and not root_n or whatever.
+	 *
+	 *  The listener should assume that this event occurs
+	 *  only when the current subrule (or rule) subtree is
+	 *  being reset to newRootID.
+	 * 
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 *
+	 *  @see org.antlr.runtime.tree.TreeAdaptor.becomeRoot()
+	 */
+	public void becomeRoot(Object newRoot, Object oldRoot);
+
+	/** Make childID a child of rootID.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 * 
+	 *  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
+	 */
+	public void addChild(Object root, Object child);
+
+	/** Set the token start/stop token index for a subtree root or node.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex);
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventRepeater.java b/runtime/Java/src/org/antlr/runtime/debug/DebugEventRepeater.java
new file mode 100644
index 0000000..fd74691
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugEventRepeater.java
@@ -0,0 +1,60 @@
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.RecognitionException;
+
+/** A simple event repeater (proxy) that delegates all functionality to the
+ *  listener sent into the ctor.  Useful if you want to listen in on a few
+ *  debug events w/o interrupting the debugger.  Just subclass the repeater
+ *  and override the methods you want to listen in on.  Remember to call
+ *  the method in this class so the event will continue on to the original
+ *  recipient.
+ *
+ *  @see also DebugEventHub
+ */
+public class DebugEventRepeater implements DebugEventListener {
+	protected DebugEventListener listener;
+
+	public DebugEventRepeater(DebugEventListener listener) {
+		this.listener = listener;
+	}
+	
+	public void enterRule(String ruleName) { listener.enterRule(ruleName); }
+	public void exitRule(String ruleName) { listener.exitRule(ruleName); }
+	public void enterAlt(int alt) { listener.enterAlt(alt); }
+	public void enterSubRule(int decisionNumber) { listener.enterSubRule(decisionNumber); }
+	public void exitSubRule(int decisionNumber) { listener.exitSubRule(decisionNumber); }
+	public void enterDecision(int decisionNumber) { listener.enterDecision(decisionNumber); }
+	public void exitDecision(int decisionNumber) { listener.exitDecision(decisionNumber); }
+	public void location(int line, int pos) { listener.location(line, pos); }
+	public void consumeToken(Token token) { listener.consumeToken(token); }
+	public void consumeHiddenToken(Token token) { listener.consumeHiddenToken(token); }
+	public void LT(int i, Token t) { listener.LT(i, t); }
+	public void mark(int i) { listener.mark(i); }
+	public void rewind(int i) { listener.rewind(i); }
+	public void rewind() { listener.rewind(); }
+	public void beginBacktrack(int level) { listener.beginBacktrack(level); }
+	public void endBacktrack(int level, boolean successful) { listener.endBacktrack(level, successful); }
+	public void recognitionException(RecognitionException e) { listener.recognitionException(e); }
+	public void beginResync() { listener.beginResync(); }
+	public void endResync() { listener.endResync(); }
+	public void semanticPredicate(boolean result, String predicate) { listener.semanticPredicate(result, predicate); }
+	public void commence() { listener.commence(); }
+	public void terminate() { listener.terminate(); }
+
+	// Tree parsing stuff
+
+	public void consumeNode(Object t) { listener.consumeNode(t); }
+	public void LT(int i, Object t) { listener.LT(i, t); }
+
+	// AST Stuff
+
+	public void nilNode(Object t) { listener.nilNode(t); }
+	public void createNode(Object t) { listener.createNode(t); }
+	public void createNode(Object node, Token token) { listener.createNode(node, token); }
+	public void becomeRoot(Object newRoot, Object oldRoot) { listener.becomeRoot(newRoot, oldRoot); }
+	public void addChild(Object root, Object child) { listener.addChild(root, child); }
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		listener.setTokenBoundaries(t, tokenStartIndex, tokenStopIndex);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventSocketProxy.java b/runtime/Java/src/org/antlr/runtime/debug/DebugEventSocketProxy.java
new file mode 100644
index 0000000..90fc868
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugEventSocketProxy.java
@@ -0,0 +1,338 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.BaseRecognizer;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+import java.io.*;
+import java.net.ServerSocket;
+import java.net.Socket;
+
+/** A proxy debug event listener that forwards events over a socket to
+ *  a debugger (or any other listener) using a simple text-based protocol;
+ *  one event per line.  ANTLRWorks listens on server socket with a
+ *  RemoteDebugEventSocketListener instance.  These two objects must therefore
+ *  be kept in sync.  New events must be handled on both sides of socket.
+ */
+public class DebugEventSocketProxy extends BlankDebugEventListener {
+	public static final int DEFAULT_DEBUGGER_PORT = 0xC001;
+	protected int port = DEFAULT_DEBUGGER_PORT;
+	protected ServerSocket serverSocket;
+	protected Socket socket;
+	protected String grammarFileName;
+	protected PrintWriter out;
+	protected BufferedReader in;
+
+	/** Who am i debugging? */
+	protected BaseRecognizer recognizer;
+
+	/** Almost certainly the recognizer will have adaptor set, but
+	 *  we don't know how to cast it (Parser or TreeParser) to get
+	 *  the adaptor field.  Must be set with a constructor. :(
+	 */
+	protected TreeAdaptor adaptor;
+
+	public DebugEventSocketProxy(BaseRecognizer recognizer, TreeAdaptor adaptor) {
+		this(recognizer, DEFAULT_DEBUGGER_PORT, adaptor);
+	}
+
+	public DebugEventSocketProxy(BaseRecognizer recognizer, int port, TreeAdaptor adaptor) {
+		this.grammarFileName = recognizer.getGrammarFileName();
+		this.adaptor = adaptor;
+		this.port = port;
+	}
+
+	public void handshake() throws IOException {
+		if ( serverSocket==null ) {
+			serverSocket = new ServerSocket(port);
+			socket = serverSocket.accept();
+			socket.setTcpNoDelay(true);
+			OutputStream os = socket.getOutputStream();
+			OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
+			out = new PrintWriter(new BufferedWriter(osw));
+			InputStream is = socket.getInputStream();
+			InputStreamReader isr = new InputStreamReader(is, "UTF8");
+			in = new BufferedReader(isr);
+			out.println("ANTLR "+ DebugEventListener.PROTOCOL_VERSION);
+			out.println("grammar \""+ grammarFileName);
+			out.flush();
+		}
+	}
+
+	public void commence() {
+		// don't bother sending event; listener will trigger upon connection
+	}
+
+	public void terminate() {
+		transmit("terminate");
+		out.close();
+		try {
+			socket.close();
+		}
+		catch (IOException ioe) {
+			ioe.printStackTrace(System.err);
+		}
+	}
+
+	protected void ack() {
+		try {
+			in.readLine();
+		}
+		catch (IOException ioe) {
+			ioe.printStackTrace(System.err);
+		}
+
+	}
+
+	protected void transmit(String event) {
+		out.println(event);
+		out.flush();
+		ack();
+	}
+
+	public void enterRule(String ruleName) {
+		transmit("enterRule "+ruleName);
+	}
+
+	public void enterAlt(int alt) {
+		transmit("enterAlt "+alt);
+	}
+
+	public void exitRule(String ruleName) {
+		transmit("exitRule "+ruleName);
+	}
+
+	public void enterSubRule(int decisionNumber) {
+		transmit("enterSubRule "+decisionNumber);
+	}
+
+	public void exitSubRule(int decisionNumber) {
+		transmit("exitSubRule "+decisionNumber);
+	}
+
+	public void enterDecision(int decisionNumber) {
+		transmit("enterDecision "+decisionNumber);
+	}
+
+	public void exitDecision(int decisionNumber) {
+		transmit("exitDecision "+decisionNumber);
+	}
+
+	public void consumeToken(Token t) {
+		String buf = serializeToken(t);
+		transmit("consumeToken "+buf);
+	}
+
+	public void consumeHiddenToken(Token t) {
+		String buf = serializeToken(t);
+		transmit("consumeHiddenToken "+buf);
+	}
+
+	public void LT(int i, Token t) {
+        if(t != null)
+            transmit("LT "+i+" "+serializeToken(t));
+	}
+
+	public void mark(int i) {
+		transmit("mark "+i);
+	}
+
+	public void rewind(int i) {
+		transmit("rewind "+i);
+	}
+
+	public void rewind() {
+		transmit("rewind");
+	}
+
+	public void beginBacktrack(int level) {
+		transmit("beginBacktrack "+level);
+	}
+
+	public void endBacktrack(int level, boolean successful) {
+		transmit("endBacktrack "+level+" "+(successful?TRUE:FALSE));
+	}
+
+	public void location(int line, int pos) {
+		transmit("location "+line+" "+pos);
+	}
+
+	public void recognitionException(RecognitionException e) {
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("exception ");
+		buf.append(e.getClass().getName());
+		// dump only the data common to all exceptions for now
+		buf.append(" ");
+		buf.append(e.index);
+		buf.append(" ");
+		buf.append(e.line);
+		buf.append(" ");
+		buf.append(e.charPositionInLine);
+		transmit(buf.toString());
+	}
+
+	public void beginResync() {
+		transmit("beginResync");
+	}
+
+	public void endResync() {
+		transmit("endResync");
+	}
+
+	public void semanticPredicate(boolean result, String predicate) {
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("semanticPredicate ");
+		buf.append(result);
+		serializeText(buf, predicate);
+		transmit(buf.toString());
+	}
+
+	// A S T  P a r s i n g  E v e n t s
+
+	public void consumeNode(Object t) {
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("consumeNode");
+		serializeNode(buf, t);
+		transmit(buf.toString());
+	}
+
+	public void LT(int i, Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("LN "); // lookahead node; distinguish from LT in protocol
+		buf.append(i);
+		serializeNode(buf, t);
+		transmit(buf.toString());
+	}
+
+	protected void serializeNode(StringBuffer buf, Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		buf.append(" ");
+		buf.append(ID);
+		buf.append(" ");
+		buf.append(type);
+		Token token = adaptor.getToken(t);
+		int line = -1;
+		int pos = -1;
+		if ( token!=null ) {
+			line = token.getLine();
+			pos = token.getCharPositionInLine();
+		}
+		buf.append(" ");
+		buf.append(line);
+		buf.append(" ");
+		buf.append(pos);
+		int tokenIndex = adaptor.getTokenStartIndex(t);
+		buf.append(" ");
+		buf.append(tokenIndex);
+		serializeText(buf, text);
+	}
+
+	
+	// A S T  E v e n t s
+
+	public void nilNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		transmit("nilNode "+ID);
+	}
+
+	public void createNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("createNodeFromTokenElements ");
+		buf.append(ID);
+		buf.append(" ");
+		buf.append(type);
+		serializeText(buf, text);
+		transmit(buf.toString());
+	}
+
+	public void createNode(Object node, Token token) {
+		int ID = adaptor.getUniqueID(node);
+		int tokenIndex = token.getTokenIndex();
+		transmit("createNode "+ID+" "+tokenIndex);
+	}
+
+	public void becomeRoot(Object newRoot, Object oldRoot) {
+		int newRootID = adaptor.getUniqueID(newRoot);
+		int oldRootID = adaptor.getUniqueID(oldRoot);
+		transmit("becomeRoot "+newRootID+" "+oldRootID);
+	}
+
+	public void addChild(Object root, Object child) {
+		int rootID = adaptor.getUniqueID(root);
+		int childID = adaptor.getUniqueID(child);
+		transmit("addChild "+rootID+" "+childID);
+	}
+
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		int ID = adaptor.getUniqueID(t);
+		transmit("setTokenBoundaries "+ID+" "+tokenStartIndex+" "+tokenStopIndex);
+	}
+
+	// support
+
+	protected String serializeToken(Token t) {
+		StringBuffer buf = new StringBuffer(50);
+		buf.append(t.getTokenIndex()); buf.append(' ');
+		buf.append(t.getType()); buf.append(' ');
+		buf.append(t.getChannel()); buf.append(' ');
+		buf.append(t.getLine()); buf.append(' ');
+		buf.append(t.getCharPositionInLine());
+		serializeText(buf, t.getText());
+		return buf.toString();
+	}
+
+	protected void serializeText(StringBuffer buf, String text) {
+		buf.append(" \"");
+		if ( text==null ) {
+			text = "";
+		}
+		// escape \n and \r all text for token appears to exist on one line
+		// this escape is slow but easy to understand
+		text = escapeNewlines(text);
+		buf.append(text);
+	}
+
+	protected String escapeNewlines(String txt) {
+		txt = txt.replaceAll("%","%25");   // escape all escape char ;)
+		txt = txt.replaceAll("\n","%0A");  // escape \n
+		txt = txt.replaceAll("\r","%0D");  // escape \r
+		return txt;
+	}
+}
+
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugParser.java b/runtime/Java/src/org/antlr/runtime/debug/DebugParser.java
new file mode 100644
index 0000000..9fff7b3
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugParser.java
@@ -0,0 +1,113 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+
+import java.io.IOException;
+
+public class DebugParser extends Parser {
+	/** Who to notify when events in the parser occur. */
+	protected DebugEventListener dbg = null;
+
+	/** Used to differentiate between fixed lookahead and cyclic DFA decisions
+	 *  while profiling.
+ 	 */
+	public boolean isCyclicDecision = false;
+
+	/** Create a normal parser except wrap the token stream in a debug
+	 *  proxy that fires consume events.
+	 */
+	public DebugParser(TokenStream input, DebugEventListener dbg) {
+		super(new DebugTokenStream(input,dbg));
+		setDebugListener(dbg);
+	}
+
+	public DebugParser(TokenStream input) {
+		this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT);
+	}
+
+	public DebugParser(TokenStream input, int port) {
+		super(new DebugTokenStream(input,null));
+	}
+
+	/** Provide a new debug event listener for this parser.  Notify the
+	 *  input stream too that it should send events to this listener.
+	 */
+	public void setDebugListener(DebugEventListener dbg) {
+		if ( input instanceof DebugTokenStream ) {
+			((DebugTokenStream)input).setDebugListener(dbg);
+		}
+		this.dbg = dbg;
+	}
+
+	public DebugEventListener getDebugListener() {
+		return dbg;
+	}
+
+	public void reportError(IOException e) {
+		System.err.println(e);
+		e.printStackTrace(System.err);
+	}
+
+	public void beginResync() {
+		dbg.beginResync();
+	}
+
+	public void endResync() {
+		dbg.endResync();
+	}
+
+	public void beginBacktrack(int level) {
+		dbg.beginBacktrack(level);
+	}
+
+	public void endBacktrack(int level, boolean successful) {
+		dbg.endBacktrack(level,successful);		
+	}
+
+	public void recoverFromMismatchedToken(IntStream input,
+										   RecognitionException mte,
+										   int ttype,
+										   BitSet follow)
+		throws RecognitionException
+	{
+		System.err.println("recoverFromMismatchedToken");
+		dbg.recognitionException(mte);
+		super.recoverFromMismatchedToken(input,mte,ttype,follow);
+	}
+
+	public void recoverFromMismatchedSet(IntStream input,
+										 RecognitionException mte,
+										 BitSet follow)
+		throws RecognitionException
+	{
+		dbg.recognitionException(mte);
+		super.recoverFromMismatchedSet(input,mte,follow);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTokenStream.java b/runtime/Java/src/org/antlr/runtime/debug/DebugTokenStream.java
new file mode 100644
index 0000000..adee23a
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugTokenStream.java
@@ -0,0 +1,146 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+
+public class DebugTokenStream implements TokenStream {
+	protected DebugEventListener dbg;
+	public TokenStream input;
+	protected boolean initialStreamState = true;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	public DebugTokenStream(TokenStream input, DebugEventListener dbg) {
+		this.input = input;
+		setDebugListener(dbg);
+		// force TokenStream to get at least first valid token
+		// so we know if there are any hidden tokens first in the stream
+		input.LT(1);
+	}
+
+	public void setDebugListener(DebugEventListener dbg) {
+		this.dbg = dbg;
+	}
+
+	public void consume() {
+		if ( initialStreamState ) {
+			consumeInitialHiddenTokens();
+		}
+		int a = input.index();
+		Token t = input.LT(1);
+		input.consume();
+		int b = input.index();
+		dbg.consumeToken(t);
+		if ( b>a+1 ) {
+			// then we consumed more than one token; must be off channel tokens
+			for (int i=a+1; i<b; i++) {
+				dbg.consumeHiddenToken(input.get(i));
+			}
+		}
+	}
+
+	/* consume all initial off-channel tokens */
+	protected void consumeInitialHiddenTokens() {
+		int firstOnChannelTokenIndex = input.index();
+		for (int i=0; i<firstOnChannelTokenIndex; i++) {
+			dbg.consumeHiddenToken(input.get(i));
+		}
+		initialStreamState = false;
+	}
+
+	public Token LT(int i) {
+		if ( initialStreamState ) {
+			consumeInitialHiddenTokens();
+		}
+		dbg.LT(i, input.LT(i));
+		return input.LT(i);
+	}
+
+	public int LA(int i) {
+		if ( initialStreamState ) {
+			consumeInitialHiddenTokens();
+		}
+		dbg.LT(i, input.LT(i));
+		return input.LA(i);
+	}
+
+	public Token get(int i) {
+		return input.get(i);
+	}
+
+	public int mark() {
+		lastMarker = input.mark();
+		dbg.mark(lastMarker);
+		return lastMarker;
+	}
+
+	public int index() {
+		return input.index();
+	}
+
+	public void rewind(int marker) {
+		dbg.rewind(marker);
+		input.rewind(marker);
+	}
+
+	public void rewind() {
+		dbg.rewind();
+		input.rewind(lastMarker);
+	}
+
+	public void release(int marker) {
+	}
+
+	public void seek(int index) {
+		// TODO: implement seek in dbg interface
+		// db.seek(index);
+		input.seek(index);
+	}
+
+	public int size() {
+		return input.size();
+	}
+
+	public TokenSource getTokenSource() {
+		return input.getTokenSource();
+	}
+
+	public String toString() {
+		return input.toString();
+	}
+
+	public String toString(int start, int stop) {
+		return input.toString(start,stop);
+	}
+
+	public String toString(Token start, Token stop) {
+		return input.toString(start,stop);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeAdaptor.java b/runtime/Java/src/org/antlr/runtime/debug/DebugTreeAdaptor.java
new file mode 100644
index 0000000..644c2e3
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugTreeAdaptor.java
@@ -0,0 +1,164 @@
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+/** A TreeAdaptor proxy that fires debugging events to a DebugEventListener
+ *  delegate and uses the TreeAdaptor delegate to do the actual work.  All
+ *  AST events are triggered by this adaptor; no code gen changes are needed
+ *  in generated rules.  Debugging events are triggered *after* invoking
+ *  tree adaptor routines.
+ *
+ *  Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})"
+ *  cannot be tracked as they might not use the adaptor to create foo, bar.
+ *  The debug listener has to deal with tree node IDs for which it did
+ *  not see a createNode event.  A single <unknown> node is sufficient even
+ *  if it represents a whole tree.
+ */
+public class DebugTreeAdaptor implements TreeAdaptor {
+	protected DebugEventListener dbg;
+	protected TreeAdaptor adaptor;
+
+	public DebugTreeAdaptor(DebugEventListener dbg, TreeAdaptor adaptor) {
+		this.dbg = dbg;
+		this.adaptor = adaptor;
+	}
+
+	public Object create(Token payload) {
+		Object node = adaptor.create(payload);
+		dbg.createNode(node, payload);
+		return node;
+	}
+
+	public Object dupTree(Object tree) {
+		// TODO: do these need to be sent to dbg?
+		return adaptor.dupTree(tree);
+	}
+
+	public Object dupNode(Object treeNode) {
+		// TODO: do these need to be sent to dbg?
+		return adaptor.dupNode(treeNode);
+	}
+
+	public Object nil() {
+		Object node = adaptor.nil();
+		dbg.nilNode(node);
+		return node;
+	}
+
+	public boolean isNil(Object tree) {
+		return adaptor.isNil(tree);
+	}
+
+	public void addChild(Object t, Object child) {
+		if ( t==null || child==null ) {
+			return;
+		}
+		adaptor.addChild(t,child);
+		dbg.addChild(t, child);
+	}
+
+	public Object becomeRoot(Object newRoot, Object oldRoot) {
+		Object n = adaptor.becomeRoot(newRoot, oldRoot);
+		dbg.becomeRoot(newRoot, oldRoot);
+		return n;
+	}
+
+	public Object rulePostProcessing(Object root) {
+		return adaptor.rulePostProcessing(root);
+	}
+
+	public void addChild(Object t, Token child) {
+		Object n = this.create(child);
+		this.addChild(t, n);
+	}
+
+	public Object becomeRoot(Token newRoot, Object oldRoot) {
+		Object n = this.create(newRoot);
+		adaptor.becomeRoot(n, oldRoot);
+		dbg.becomeRoot(newRoot, oldRoot);
+		return n;
+	}
+
+	public Object create(int tokenType, Token fromToken) {
+		Object node = adaptor.create(tokenType, fromToken);
+		dbg.createNode(node);
+		return node;
+	}
+
+	public Object create(int tokenType, Token fromToken, String text) {
+		Object node = adaptor.create(tokenType, fromToken, text);
+		dbg.createNode(node);
+		return node;
+	}
+
+	public Object create(int tokenType, String text) {
+		Object node = adaptor.create(tokenType, text);
+		dbg.createNode(node);
+		return node;
+	}
+
+	public int getType(Object t) {
+		return adaptor.getType(t);
+	}
+
+	public void setType(Object t, int type) {
+		adaptor.setType(t, type);
+	}
+
+	public String getText(Object t) {
+		return adaptor.getText(t);
+	}
+
+	public void setText(Object t, String text) {
+		adaptor.setText(t, text);
+	}
+
+	public Token getToken(Object t) {
+		return adaptor.getToken(t);
+	}
+
+	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
+		adaptor.setTokenBoundaries(t, startToken, stopToken);
+		if ( t!=null && startToken!=null && stopToken!=null ) {
+			dbg.setTokenBoundaries(
+				t, startToken.getTokenIndex(),
+				stopToken.getTokenIndex());
+		}
+	}
+
+	public int getTokenStartIndex(Object t) {
+		return adaptor.getTokenStartIndex(t);
+	}
+
+	public int getTokenStopIndex(Object t) {
+		return adaptor.getTokenStopIndex(t);
+	}
+
+	public Object getChild(Object t, int i) {
+		return adaptor.getChild(t, i);
+	}
+
+	public int getChildCount(Object t) {
+		return adaptor.getChildCount(t);
+	}
+
+	public int getUniqueID(Object node) {
+		return adaptor.getUniqueID(node);
+	}
+
+	
+	// support
+
+	public DebugEventListener getDebugEventListener() {
+		return dbg;
+	}
+
+	public void setDebugEventListener(DebugEventListener dbg) {
+		this.dbg = dbg;
+	}
+
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeNodeStream.java b/runtime/Java/src/org/antlr/runtime/debug/DebugTreeNodeStream.java
new file mode 100644
index 0000000..aec3c99
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugTreeNodeStream.java
@@ -0,0 +1,145 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.runtime.TokenStream;
+
+/** Debug any tree node stream.  The constructor accepts the stream
+ *  and a debug listener.  As node stream calls come in, debug events
+ *  are triggered.
+ */
+public class DebugTreeNodeStream implements TreeNodeStream {
+	protected DebugEventListener dbg;
+	protected TreeAdaptor adaptor;
+	protected TreeNodeStream input;
+	protected boolean initialStreamState = true;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	public DebugTreeNodeStream(TreeNodeStream input,
+							   DebugEventListener dbg)
+	{
+		this.input = input;
+		this.adaptor = input.getTreeAdaptor();
+		this.input.setUniqueNavigationNodes(true);
+		setDebugListener(dbg);
+	}
+
+	public void setDebugListener(DebugEventListener dbg) {
+		this.dbg = dbg;
+	}
+
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+
+	public void consume() {
+		Object node = input.LT(1);
+		input.consume();
+		dbg.consumeNode(node);
+	}
+
+	public Object get(int i) {
+		return input.get(i);
+	}
+
+	public Object LT(int i) {
+		Object node = input.LT(i);
+		int ID = adaptor.getUniqueID(node);
+		String text = adaptor.getText(node);
+		int type = adaptor.getType(node);
+		dbg.LT(i, node);
+		return node;
+	}
+
+	public int LA(int i) {
+		Object node = input.LT(i);
+		int ID = adaptor.getUniqueID(node);
+		String text = adaptor.getText(node);
+		int type = adaptor.getType(node);
+		dbg.LT(i, node);
+		return type;
+	}
+
+	public int mark() {
+		lastMarker = input.mark();
+		dbg.mark(lastMarker);
+		return lastMarker;
+	}
+
+	public int index() {
+		return input.index();
+	}
+
+	public void rewind(int marker) {
+		dbg.rewind(marker);
+		input.rewind(marker);
+	}
+
+	public void rewind() {
+		dbg.rewind();
+		input.rewind(lastMarker);
+	}
+
+	public void release(int marker) {
+	}
+
+	public void seek(int index) {
+		// TODO: implement seek in dbg interface
+		// db.seek(index);
+		input.seek(index);
+	}
+
+	public int size() {
+		return input.size();
+	}
+
+	public Object getTreeSource() {
+		return input;
+	}
+
+	public TokenStream getTokenStream() {
+		return input.getTokenStream();
+	}
+
+	/** It is normally this object that instructs the node stream to
+	 *  create unique nav nodes, but to satisfy interface, we have to
+	 *  define it.  It might be better to ignore the parameter but
+	 *  there might be a use for it later, so I'll leave.
+	 */
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
+		input.setUniqueNavigationNodes(uniqueNavigationNodes);
+	}
+
+	public String toString(Object start, Object stop) {
+		return input.toString(start,stop);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeParser.java b/runtime/Java/src/org/antlr/runtime/debug/DebugTreeParser.java
new file mode 100644
index 0000000..7444e16
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/DebugTreeParser.java
@@ -0,0 +1,115 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.TreeParser;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+import java.io.IOException;
+
+public class DebugTreeParser extends TreeParser {
+	/** Who to notify when events in the parser occur. */
+	protected DebugEventListener dbg = null;
+
+	/** Used to differentiate between fixed lookahead and cyclic DFA decisions
+	 *  while profiling.
+ 	 */
+	public boolean isCyclicDecision = false;
+
+	/** Create a normal parser except wrap the token stream in a debug
+	 *  proxy that fires consume events.
+	 */
+	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg) {
+		super(new DebugTreeNodeStream(input,dbg));
+		setDebugListener(dbg);
+	}
+
+	public DebugTreeParser(TreeNodeStream input) {
+		this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT);
+	}
+
+	public DebugTreeParser(TreeNodeStream input, int port) {
+		super(new DebugTreeNodeStream(input,null));
+	}
+
+	/** Provide a new debug event listener for this parser.  Notify the
+	 *  input stream too that it should send events to this listener.
+	 */
+	public void setDebugListener(DebugEventListener dbg) {
+		if ( input instanceof DebugTreeNodeStream ) {
+			((DebugTreeNodeStream)input).setDebugListener(dbg);
+		}
+		this.dbg = dbg;
+	}
+
+	public DebugEventListener getDebugListener() {
+		return dbg;
+	}
+
+	public void reportError(IOException e) {
+		System.err.println(e);
+		e.printStackTrace(System.err);
+	}
+
+	public void beginResync() {
+		dbg.beginResync();
+	}
+
+	public void endResync() {
+		dbg.endResync();
+	}
+
+	public void beginBacktrack(int level) {
+		dbg.beginBacktrack(level);
+	}
+
+	public void endBacktrack(int level, boolean successful) {
+		dbg.endBacktrack(level,successful);		
+	}
+
+	public void recoverFromMismatchedToken(IntStream input,
+										   RecognitionException mte,
+										   int ttype,
+										   BitSet follow)
+		throws RecognitionException
+	{
+		dbg.recognitionException(mte);
+		super.recoverFromMismatchedToken(input,mte,ttype,follow);
+	}
+
+	public void recoverFromMismatchedSet(IntStream input,
+										 RecognitionException mte,
+										 org.antlr.runtime.BitSet follow)
+		throws RecognitionException
+	{
+		dbg.recognitionException(mte);
+		super.recoverFromMismatchedSet(input,mte,follow);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/ParseTreeBuilder.java b/runtime/Java/src/org/antlr/runtime/debug/ParseTreeBuilder.java
new file mode 100644
index 0000000..ff915ea
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/ParseTreeBuilder.java
@@ -0,0 +1,80 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.ParseTree;
+
+import java.util.Stack;
+
+/** This parser listener tracks rule entry/exit and token matches
+ *  to build a simple parse tree using ParseTree nodes.
+ */
+public class ParseTreeBuilder extends BlankDebugEventListener {
+	Stack callStack = new Stack();
+
+	public ParseTreeBuilder(String grammarName) {
+		ParseTree root = create("<grammar "+grammarName+">");
+		callStack.push(root);
+	}
+
+	public ParseTree getTree() {
+		return (ParseTree)callStack.elementAt(0);
+	}
+
+	/**  What kind of node to create.  You might want to override
+	 *   so I factored out creation here.
+	 */
+	public ParseTree create(Object payload) {
+		return new ParseTree(payload);
+	}
+
+	public void enterRule(String ruleName) {
+		ParseTree parentRuleNode = (ParseTree)callStack.peek();
+		ParseTree ruleNode = create(ruleName);
+		parentRuleNode.addChild(ruleNode);
+		callStack.push(ruleNode);
+	}
+
+	public void exitRule(String ruleName) {
+		callStack.pop();
+	}
+
+	public void consumeToken(Token token) {
+		ParseTree ruleNode = (ParseTree)callStack.peek();
+		ParseTree elementNode = create(token);
+		ruleNode.addChild(elementNode);
+	}
+
+	public void recognitionException(RecognitionException e) {
+		ParseTree ruleNode = (ParseTree)callStack.peek();
+		ParseTree errorNode = create(e);
+		ruleNode.addChild(errorNode);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/Profiler.java b/runtime/Java/src/org/antlr/runtime/debug/Profiler.java
new file mode 100644
index 0000000..80c30e7
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/Profiler.java
@@ -0,0 +1,506 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+import org.antlr.runtime.misc.Stats;
+
+import java.util.*;
+import java.io.IOException;
+
+/** Using the debug event interface, track what is happening in the parser
+ *  and record statistics about the runtime.
+ */
+public class Profiler extends BlankDebugEventListener {
+	/** Because I may change the stats, I need to track that for later
+	 *  computations to be consistent.
+	 */
+	public static final String Version = "2";
+	public static final String RUNTIME_STATS_FILENAME = "runtime.stats";
+	public static final int NUM_RUNTIME_STATS = 29;
+
+	public DebugParser parser = null;
+
+	// working variables
+
+	protected int ruleLevel = 0;
+	protected int decisionLevel = 0;
+	protected int maxLookaheadInCurrentDecision = 0;
+	protected CommonToken lastTokenConsumed=null;
+
+	protected List lookaheadStack = new ArrayList();
+
+	// stats variables
+
+	public int numRuleInvocations = 0;
+	public int numGuessingRuleInvocations = 0;
+	public int maxRuleInvocationDepth = 0;
+	public int numFixedDecisions = 0;
+	public int numCyclicDecisions = 0;
+	public int numBacktrackDecisions = 0;
+	public int[] decisionMaxFixedLookaheads = new int[200]; // TODO: make List
+	public int[] decisionMaxCyclicLookaheads = new int[200];
+	public List decisionMaxSynPredLookaheads = new ArrayList();
+	public int numHiddenTokens = 0;
+	public int numCharsMatched = 0;
+	public int numHiddenCharsMatched = 0;
+	public int numSemanticPredicates = 0;
+	public int numSyntacticPredicates = 0;
+	protected int numberReportedErrors = 0;
+	public int numMemoizationCacheMisses = 0;
+	public int numMemoizationCacheHits = 0;
+	public int numMemoizationCacheEntries = 0;
+
+	public Profiler() {
+	}
+
+	public Profiler(DebugParser parser) {
+		this.parser = parser;
+	}
+
+	public void enterRule(String ruleName) {
+		//System.out.println("enterRule "+ruleName);
+		ruleLevel++;
+		numRuleInvocations++;
+		if ( ruleLevel >maxRuleInvocationDepth ) {
+			maxRuleInvocationDepth = ruleLevel;
+		}
+
+	}
+
+	/** Track memoization; this is not part of standard debug interface
+	 *  but is triggered by profiling.  Code gen inserts an override
+	 *  for this method in the recognizer, which triggers this method.
+	 */
+	public void examineRuleMemoization(IntStream input,
+									   int ruleIndex,
+									   String ruleName)
+	{
+		//System.out.println("examine memo "+ruleName);
+		int stopIndex = parser.getRuleMemoization(ruleIndex, input.index());
+		if ( stopIndex==BaseRecognizer.MEMO_RULE_UNKNOWN ) {
+			//System.out.println("rule "+ruleIndex+" missed @ "+input.index());
+			numMemoizationCacheMisses++;
+			numGuessingRuleInvocations++; // we'll have to enter
+		}
+		else {
+			// regardless of rule success/failure, if in cache, we have a cache hit
+			//System.out.println("rule "+ruleIndex+" hit @ "+input.index());
+			numMemoizationCacheHits++;
+		}
+	}
+
+	public void memoize(IntStream input,
+						int ruleIndex,
+						int ruleStartIndex,
+						String ruleName)
+	{
+		// count how many entries go into table
+		//System.out.println("memoize "+ruleName);
+		numMemoizationCacheEntries++;
+	}
+
+	public void exitRule(String ruleName) {
+		ruleLevel--;
+	}
+
+	public void enterDecision(int decisionNumber) {
+		decisionLevel++;
+		int startingLookaheadIndex = parser.getTokenStream().index();
+		//System.out.println("enterDecision "+decisionNumber+" @ index "+startingLookaheadIndex);
+		lookaheadStack.add(new Integer(startingLookaheadIndex));
+	}
+
+	public void exitDecision(int decisionNumber) {
+		//System.out.println("exitDecision "+decisionNumber);
+		// track how many of acyclic, cyclic here as we don't know what kind
+		// yet in enterDecision event.
+		if ( parser.isCyclicDecision ) {
+			numCyclicDecisions++;
+		}
+		else {
+			numFixedDecisions++;
+		}
+		lookaheadStack.remove(lookaheadStack.size()-1); // pop lookahead depth counter
+		decisionLevel--;
+		if ( parser.isCyclicDecision ) {
+			if ( numCyclicDecisions>=decisionMaxCyclicLookaheads.length ) {
+				int[] bigger = new int[decisionMaxCyclicLookaheads.length*2];
+				System.arraycopy(decisionMaxCyclicLookaheads,0,bigger,0,decisionMaxCyclicLookaheads.length);
+				decisionMaxCyclicLookaheads = bigger;
+			}
+			decisionMaxCyclicLookaheads[numCyclicDecisions-1] = maxLookaheadInCurrentDecision;
+		}
+		else {
+			if ( numFixedDecisions>=decisionMaxFixedLookaheads.length ) {
+				int[] bigger = new int[decisionMaxFixedLookaheads.length*2];
+				System.arraycopy(decisionMaxFixedLookaheads,0,bigger,0,decisionMaxFixedLookaheads.length);
+				decisionMaxFixedLookaheads = bigger;
+			}
+			decisionMaxFixedLookaheads[numFixedDecisions-1] = maxLookaheadInCurrentDecision;
+		}
+		parser.isCyclicDecision = false; // can't nest so just reset to false
+		maxLookaheadInCurrentDecision = 0;
+	}
+
+	public void consumeToken(Token token) {
+		//System.out.println("consume token "+token);
+		lastTokenConsumed = (CommonToken)token;
+	}
+
+	/** The parser is in a decision if the decision depth > 0.  This
+	 *  works for backtracking also, which can have nested decisions.
+	 */
+	public boolean inDecision() {
+		return decisionLevel>0;
+	}
+
+	public void consumeHiddenToken(Token token) {
+		//System.out.println("consume hidden token "+token);
+		lastTokenConsumed = (CommonToken)token;
+	}
+
+	/** Track refs to lookahead if in a fixed/nonfixed decision.
+	 */
+	public void LT(int i, Token t) {
+		if ( inDecision() ) {
+			// get starting index off stack
+			int stackTop = lookaheadStack.size()-1;
+			Integer startingIndex = (Integer)lookaheadStack.get(stackTop);
+			// compute lookahead depth
+			int thisRefIndex = parser.getTokenStream().index();
+			int numHidden =
+				getNumberOfHiddenTokens(startingIndex.intValue(), thisRefIndex);
+			int depth = i + thisRefIndex - startingIndex.intValue() - numHidden;
+			/*
+			System.out.println("LT("+i+") @ index "+thisRefIndex+" is depth "+depth+
+				" max is "+maxLookaheadInCurrentDecision);
+			*/
+			if ( depth>maxLookaheadInCurrentDecision ) {
+				maxLookaheadInCurrentDecision = depth;
+			}
+		}
+	}
+
+	/** Track backtracking decisions.  You'll see a fixed or cyclic decision
+	 *  and then a backtrack.
+	 *
+	 * 		enter rule
+	 * 		...
+	 * 		enter decision
+	 * 		LA and possibly consumes (for cyclic DFAs)
+	 * 		begin backtrack level
+	 * 		mark m
+	 * 		rewind m
+	 * 		end backtrack level, success
+	 * 		exit decision
+	 * 		...
+	 * 		exit rule
+	 */
+	public void beginBacktrack(int level) {
+		//System.out.println("enter backtrack "+level);
+		numBacktrackDecisions++;
+	}
+
+	/** Successful or not, track how much lookahead synpreds use */
+	public void endBacktrack(int level, boolean successful) {
+		//System.out.println("exit backtrack "+level+": "+successful);
+		decisionMaxSynPredLookaheads.add(
+			new Integer(maxLookaheadInCurrentDecision)
+		);
+	}
+
+	/*
+	public void mark(int marker) {
+		int i = parser.getTokenStream().index();
+		System.out.println("mark @ index "+i);
+		synPredLookaheadStack.add(new Integer(i));
+	}
+
+	public void rewind(int marker) {
+		// pop starting index off stack
+		int stackTop = synPredLookaheadStack.size()-1;
+		Integer startingIndex = (Integer)synPredLookaheadStack.get(stackTop);
+		synPredLookaheadStack.remove(synPredLookaheadStack.size()-1);
+		// compute lookahead depth
+		int stopIndex = parser.getTokenStream().index();
+		System.out.println("rewind @ index "+stopIndex);
+		int depth = stopIndex - startingIndex.intValue();
+		System.out.println("depth of lookahead for synpred: "+depth);
+		decisionMaxSynPredLookaheads.add(
+			new Integer(depth)
+		);
+	}
+	*/
+
+	public void recognitionException(RecognitionException e) {
+		numberReportedErrors++;
+	}
+
+	public void semanticPredicate(boolean result, String predicate) {
+		if ( inDecision() ) {
+			numSemanticPredicates++;
+		}
+	}
+
+	public void terminate() {
+		String stats = toNotifyString();
+		try {
+			Stats.writeReport(RUNTIME_STATS_FILENAME,stats);
+		}
+		catch (IOException ioe) {
+			System.err.println(ioe);
+			ioe.printStackTrace(System.err);
+		}
+		System.out.println(toString(stats));
+	}
+
+	public void setParser(DebugParser parser) {
+		this.parser = parser;
+	}
+
+	// R E P O R T I N G
+
+	public String toNotifyString() {
+		TokenStream input = parser.getTokenStream();
+		for (int i=0; i<input.size()&&lastTokenConsumed!=null&&i<=lastTokenConsumed.getTokenIndex(); i++) {
+			Token t = input.get(i);
+			if ( t.getChannel()!=Token.DEFAULT_CHANNEL ) {
+				numHiddenTokens++;
+				numHiddenCharsMatched += t.getText().length();
+			}
+		}
+		numCharsMatched = lastTokenConsumed.getStopIndex() + 1;
+		decisionMaxFixedLookaheads = trim(decisionMaxFixedLookaheads, numFixedDecisions);
+		decisionMaxCyclicLookaheads = trim(decisionMaxCyclicLookaheads, numCyclicDecisions);
+		StringBuffer buf = new StringBuffer();
+		buf.append(Version);
+		buf.append('\t');
+		buf.append(parser.getClass().getName());
+		buf.append('\t');
+		buf.append(numRuleInvocations);
+		buf.append('\t');
+		buf.append(maxRuleInvocationDepth);
+		buf.append('\t');
+		buf.append(numFixedDecisions);
+		buf.append('\t');
+		buf.append(Stats.min(decisionMaxFixedLookaheads));
+		buf.append('\t');
+		buf.append(Stats.max(decisionMaxFixedLookaheads));
+		buf.append('\t');
+		buf.append(Stats.avg(decisionMaxFixedLookaheads));
+		buf.append('\t');
+		buf.append(Stats.stddev(decisionMaxFixedLookaheads));
+		buf.append('\t');
+		buf.append(numCyclicDecisions);
+		buf.append('\t');
+		buf.append(Stats.min(decisionMaxCyclicLookaheads));
+		buf.append('\t');
+		buf.append(Stats.max(decisionMaxCyclicLookaheads));
+		buf.append('\t');
+		buf.append(Stats.avg(decisionMaxCyclicLookaheads));
+		buf.append('\t');
+		buf.append(Stats.stddev(decisionMaxCyclicLookaheads));
+		buf.append('\t');
+		buf.append(numBacktrackDecisions);
+		buf.append('\t');
+		buf.append(Stats.min(toArray(decisionMaxSynPredLookaheads)));
+		buf.append('\t');
+		buf.append(Stats.max(toArray(decisionMaxSynPredLookaheads)));
+		buf.append('\t');
+		buf.append(Stats.avg(toArray(decisionMaxSynPredLookaheads)));
+		buf.append('\t');
+		buf.append(Stats.stddev(toArray(decisionMaxSynPredLookaheads)));
+		buf.append('\t');
+		buf.append(numSemanticPredicates);
+		buf.append('\t');
+		buf.append(parser.getTokenStream().size());
+		buf.append('\t');
+		buf.append(numHiddenTokens);
+		buf.append('\t');
+		buf.append(numCharsMatched);
+		buf.append('\t');
+		buf.append(numHiddenCharsMatched);
+		buf.append('\t');
+		buf.append(numberReportedErrors);
+		buf.append('\t');
+		buf.append(numMemoizationCacheHits);
+		buf.append('\t');
+		buf.append(numMemoizationCacheMisses);
+		buf.append('\t');
+		buf.append(numGuessingRuleInvocations);
+		buf.append('\t');
+		buf.append(numMemoizationCacheEntries);
+		return buf.toString();
+	}
+
+	public String toString() {
+		return toString(toNotifyString());
+	}
+
+	protected static String[] decodeReportData(String data) {
+		String[] fields = new String[NUM_RUNTIME_STATS];
+		StringTokenizer st = new StringTokenizer(data, "\t");
+		int i = 0;
+		while ( st.hasMoreTokens() ) {
+			fields[i] = st.nextToken();
+			i++;
+		}
+		if ( i!=NUM_RUNTIME_STATS ) {
+			return null;
+		}
+		return fields;
+	}
+
+	public static String toString(String notifyDataLine) {
+		String[] fields = decodeReportData(notifyDataLine);
+		if ( fields==null ) {
+			return null;
+		}
+		StringBuffer buf = new StringBuffer();
+		buf.append("ANTLR Runtime Report; Profile Version ");
+		buf.append(fields[0]);
+		buf.append('\n');
+		buf.append("parser name ");
+		buf.append(fields[1]);
+		buf.append('\n');
+		buf.append("Number of rule invocations ");
+		buf.append(fields[2]);
+		buf.append('\n');
+		buf.append("Number of rule invocations in \"guessing\" mode ");
+		buf.append(fields[27]);
+		buf.append('\n');
+		buf.append("max rule invocation nesting depth ");
+		buf.append(fields[3]);
+		buf.append('\n');
+		buf.append("number of fixed lookahead decisions ");
+		buf.append(fields[4]);
+		buf.append('\n');
+		buf.append("min lookahead used in a fixed lookahead decision ");
+		buf.append(fields[5]);
+		buf.append('\n');
+		buf.append("max lookahead used in a fixed lookahead decision ");
+		buf.append(fields[6]);
+		buf.append('\n');
+		buf.append("average lookahead depth used in fixed lookahead decisions ");
+		buf.append(fields[7]);
+		buf.append('\n');
+		buf.append("standard deviation of depth used in fixed lookahead decisions ");
+		buf.append(fields[8]);
+		buf.append('\n');
+		buf.append("number of arbitrary lookahead decisions ");
+		buf.append(fields[9]);
+		buf.append('\n');
+		buf.append("min lookahead used in an arbitrary lookahead decision ");
+		buf.append(fields[10]);
+		buf.append('\n');
+		buf.append("max lookahead used in an arbitrary lookahead decision ");
+		buf.append(fields[11]);
+		buf.append('\n');
+		buf.append("average lookahead depth used in arbitrary lookahead decisions ");
+		buf.append(fields[12]);
+		buf.append('\n');
+		buf.append("standard deviation of depth used in arbitrary lookahead decisions ");
+		buf.append(fields[13]);
+		buf.append('\n');
+		buf.append("number of evaluated syntactic predicates ");
+		buf.append(fields[14]);
+		buf.append('\n');
+		buf.append("min lookahead used in a syntactic predicate ");
+		buf.append(fields[15]);
+		buf.append('\n');
+		buf.append("max lookahead used in a syntactic predicate ");
+		buf.append(fields[16]);
+		buf.append('\n');
+		buf.append("average lookahead depth used in syntactic predicates ");
+		buf.append(fields[17]);
+		buf.append('\n');
+		buf.append("standard deviation of depth used in syntactic predicates ");
+		buf.append(fields[18]);
+		buf.append('\n');
+		buf.append("rule memoization cache size ");
+		buf.append(fields[28]);
+		buf.append('\n');
+		buf.append("number of rule memoization cache hits ");
+		buf.append(fields[25]);
+		buf.append('\n');
+		buf.append("number of rule memoization cache misses ");
+		buf.append(fields[26]);
+		buf.append('\n');
+		buf.append("number of evaluated semantic predicates ");
+		buf.append(fields[19]);
+		buf.append('\n');
+		buf.append("number of tokens ");
+		buf.append(fields[20]);
+		buf.append('\n');
+		buf.append("number of hidden tokens ");
+		buf.append(fields[21]);
+		buf.append('\n');
+		buf.append("number of char ");
+		buf.append(fields[22]);
+		buf.append('\n');
+		buf.append("number of hidden char ");
+		buf.append(fields[23]);
+		buf.append('\n');
+		buf.append("number of syntax errors ");
+		buf.append(fields[24]);
+		buf.append('\n');
+		return buf.toString();
+	}
+
+	protected int[] trim(int[] X, int n) {
+		if ( n<X.length ) {
+			int[] trimmed = new int[n];
+			System.arraycopy(X,0,trimmed,0,n);
+			X = trimmed;
+		}
+		return X;
+	}
+
+	protected int[] toArray(List a) {
+		int[] x = new int[a.size()];
+		for (int i = 0; i < a.size(); i++) {
+			Integer I = (Integer) a.get(i);
+			x[i] = I.intValue();
+		}
+		return x;
+	}
+
+	/** Get num hidden tokens between i..j inclusive */
+	public int getNumberOfHiddenTokens(int i, int j) {
+		int n = 0;
+		TokenStream input = parser.getTokenStream();
+		for (int ti = i; ti<input.size() && ti <= j; ti++) {
+			Token t = input.get(ti);
+			if ( t.getChannel()!=Token.DEFAULT_CHANNEL ) {
+				n++;
+			}
+		}
+		return n;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java b/runtime/Java/src/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
new file mode 100644
index 0000000..0c0fbb5
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
@@ -0,0 +1,511 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.BaseTree;
+import org.antlr.runtime.tree.Tree;
+
+import java.io.*;
+import java.net.ConnectException;
+import java.net.Socket;
+import java.util.StringTokenizer;
+
+public class RemoteDebugEventSocketListener implements Runnable {
+	static final int MAX_EVENT_ELEMENTS = 8;
+	DebugEventListener listener;
+	String machine;
+	int port;
+	Socket channel = null;
+	PrintWriter out;
+	BufferedReader in;
+	String event;
+	/** Version of ANTLR (dictates events) */
+	public String version;
+	public String grammarFileName;
+	/** Track the last token index we saw during a consume.  If same, then
+	 *  set a flag that we have a problem.
+	 */
+	int previousTokenIndex = -1;
+	boolean tokenIndexesInvalid = false;
+
+	public static class ProxyToken implements Token {
+		int index;
+		int type;
+		int channel;
+		int line;
+		int charPos;
+		String text;
+		public ProxyToken(int index) { this.index = index; }		
+		public ProxyToken(int index, int type, int channel,
+						  int line, int charPos, String text)
+		{
+			this.index = index;
+			this.type = type;
+			this.channel = channel;
+			this.line = line;
+			this.charPos = charPos;
+			this.text = text;
+		}
+		public String getText() {
+			return text;
+		}
+		public void setText(String text) {
+			this.text = text;
+		}
+		public int getType() {
+			return type;
+		}
+		public void setType(int ttype) {
+			this.type = ttype;
+		}
+		public int getLine() {
+			return line;
+		}
+		public void setLine(int line) {
+			this.line = line;
+		}
+		public int getCharPositionInLine() {
+			return charPos;
+		}
+		public void setCharPositionInLine(int pos) {
+			this.charPos = pos;
+		}
+		public int getChannel() {
+			return channel;
+		}
+		public void setChannel(int channel) {
+			this.channel = channel;
+		}
+		public int getTokenIndex() {
+			return index;
+		}
+		public void setTokenIndex(int index) {
+			this.index = index;
+		}
+		public String toString() {
+			String channelStr = "";
+			if ( channel!=Token.DEFAULT_CHANNEL ) {
+				channelStr=",channel="+channel;
+			}
+			return "["+getText()+"/<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+",@"+index+"]";
+		}
+	}
+
+	public static class ProxyTree extends BaseTree {
+		public int ID;
+		public int type;
+		public int line = 0;
+		public int charPos = -1;
+		public int tokenIndex = -1;
+		public String text;
+		
+		public ProxyTree(int ID, int type, int line, int charPos, int tokenIndex, String text) {
+			this.ID = ID;
+			this.type = type;
+			this.line = line;
+			this.charPos = charPos;
+			this.tokenIndex = tokenIndex;
+			this.text = text;
+		}
+
+		public ProxyTree(int ID) { this.ID = ID; }
+
+		public int getTokenStartIndex() { return tokenIndex; }
+		public void setTokenStartIndex(int index) {	}
+		public int getTokenStopIndex() { return 0; }
+		public void setTokenStopIndex(int index) { }
+		public Tree dupNode() {	return null; }
+		public int getType() { return type; }
+		public String getText() { return text; }
+		public String toString() {
+			return "fix this";
+		}
+	}
+
+	public RemoteDebugEventSocketListener(DebugEventListener listener,
+										  String machine,
+										  int port) throws IOException
+	{
+		this.listener = listener;
+		this.machine = machine;
+		this.port = port;
+
+        if( !openConnection() ) {
+            throw new ConnectException();
+        }
+	}
+
+	protected void eventHandler() {
+		try {
+			handshake();
+			event = in.readLine();
+			while ( event!=null ) {
+				dispatch(event);
+				ack();
+				event = in.readLine();
+			}
+		}
+		catch (Exception e) {
+			System.err.println(e);
+			e.printStackTrace(System.err);
+		}
+		finally {
+            closeConnection();
+		}
+	}
+
+    protected boolean openConnection() {
+        boolean success = false;
+        try {
+            channel = new Socket(machine, port);
+            channel.setTcpNoDelay(true);
+			OutputStream os = channel.getOutputStream();
+			OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
+			out = new PrintWriter(new BufferedWriter(osw));
+			InputStream is = channel.getInputStream();
+			InputStreamReader isr = new InputStreamReader(is, "UTF8");
+			in = new BufferedReader(isr);
+            success = true;
+        } catch(Exception e) {
+            System.err.println(e);
+        }
+        return success;
+    }
+
+    protected void closeConnection() {
+        try {
+            in.close(); in = null;
+            out.close(); out = null;
+            channel.close(); channel=null;
+        }
+        catch (Exception e) {
+            System.err.println(e);
+            e.printStackTrace(System.err);
+        }
+        finally {
+            if ( in!=null ) {
+                try {in.close();} catch (IOException ioe) {
+                    System.err.println(ioe);
+                }
+            }
+            if ( out!=null ) {
+                out.close();
+            }
+            if ( channel!=null ) {
+                try {channel.close();} catch (IOException ioe) {
+                    System.err.println(ioe);
+                }
+            }
+        }
+
+    }
+
+	protected void handshake() throws IOException {
+		String antlrLine = in.readLine();
+		String[] antlrElements = getEventElements(antlrLine);
+		version = antlrElements[1];
+		String grammarLine = in.readLine();
+		String[] grammarElements = getEventElements(grammarLine);
+		grammarFileName = grammarElements[1];
+		ack();
+		listener.commence(); // inform listener after handshake
+	}
+
+	protected void ack() {
+        out.println("ack");
+		out.flush();
+	}
+
+	protected void dispatch(String line) {
+		String[] elements = getEventElements(line);
+		if ( elements==null || elements[0]==null ) {
+			System.err.println("unknown debug event: "+line);
+			return;
+		}
+		if ( elements[0].equals("enterRule") ) {
+			listener.enterRule(elements[1]);
+		}
+		else if ( elements[0].equals("exitRule") ) {
+			listener.exitRule(elements[1]);
+		}
+		else if ( elements[0].equals("enterAlt") ) {
+			listener.enterAlt(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("enterSubRule") ) {
+			listener.enterSubRule(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("exitSubRule") ) {
+			listener.exitSubRule(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("enterDecision") ) {
+			listener.enterDecision(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("exitDecision") ) {
+			listener.exitDecision(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("location") ) {
+			listener.location(Integer.parseInt(elements[1]),
+							  Integer.parseInt(elements[2]));
+		}
+		else if ( elements[0].equals("consumeToken") ) {
+			ProxyToken t = deserializeToken(elements, 1);
+			if ( t.getTokenIndex() == previousTokenIndex ) {
+				tokenIndexesInvalid = true;
+			}
+			previousTokenIndex = t.getTokenIndex();
+			listener.consumeToken(t);
+		}
+		else if ( elements[0].equals("consumeHiddenToken") ) {
+			ProxyToken t = deserializeToken(elements, 1);
+			if ( t.getTokenIndex() == previousTokenIndex ) {
+				tokenIndexesInvalid = true;
+			}
+			previousTokenIndex = t.getTokenIndex();
+			listener.consumeHiddenToken(t);
+		}
+		else if ( elements[0].equals("LT") ) {
+			Token t = deserializeToken(elements, 2);
+			listener.LT(Integer.parseInt(elements[1]), t);
+		}
+		else if ( elements[0].equals("mark") ) {
+			listener.mark(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("rewind") ) {
+			if ( elements[1]!=null ) {
+				listener.rewind(Integer.parseInt(elements[1]));
+			}
+			else {
+				listener.rewind();
+			}
+		}
+		else if ( elements[0].equals("beginBacktrack") ) {
+			listener.beginBacktrack(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("endBacktrack") ) {
+			int level = Integer.parseInt(elements[1]);
+			int successI = Integer.parseInt(elements[2]);
+			listener.endBacktrack(level, successI==DebugEventListener.TRUE);
+		}
+		else if ( elements[0].equals("exception") ) {
+			String excName = elements[1];
+			String indexS = elements[2];
+			String lineS = elements[3];
+			String posS = elements[4];
+			Class excClass = null;
+			try {
+				excClass = Class.forName(excName);
+				RecognitionException e =
+					(RecognitionException)excClass.newInstance();
+				e.index = Integer.parseInt(indexS);
+				e.line = Integer.parseInt(lineS);
+				e.charPositionInLine = Integer.parseInt(posS);
+				listener.recognitionException(e);
+			}
+			catch (ClassNotFoundException cnfe) {
+				System.err.println("can't find class "+cnfe);
+				cnfe.printStackTrace(System.err);
+			}
+			catch (InstantiationException ie) {
+				System.err.println("can't instantiate class "+ie);
+				ie.printStackTrace(System.err);
+			}
+			catch (IllegalAccessException iae) {
+				System.err.println("can't access class "+iae);
+				iae.printStackTrace(System.err);
+			}
+		}
+		else if ( elements[0].equals("beginResync") ) {
+			listener.beginResync();
+		}
+		else if ( elements[0].equals("endResync") ) {
+			listener.endResync();
+		}
+		else if ( elements[0].equals("terminate") ) {
+			listener.terminate();
+		}
+		else if ( elements[0].equals("semanticPredicate") ) {
+			Boolean result = Boolean.valueOf(elements[1]);
+			String predicateText = elements[2];
+			predicateText = unEscapeNewlines(predicateText);
+			listener.semanticPredicate(result.booleanValue(),
+									   predicateText);
+		}
+		else if ( elements[0].equals("consumeNode") ) {
+			ProxyTree node = deserializeNode(elements, 1);
+			listener.consumeNode(node);
+		}
+		else if ( elements[0].equals("LN") ) {
+			int i = Integer.valueOf(elements[1]);
+			ProxyTree node = deserializeNode(elements, 2);
+			listener.LT(i, node);
+		}
+		else if ( elements[0].equals("createNodeFromTokenElements") ) {
+			int ID = Integer.valueOf(elements[1]);
+			int type = Integer.valueOf(elements[2]);
+			String text = elements[3];
+			text = unEscapeNewlines(text);
+			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
+			listener.createNode(node);
+		}
+		else if ( elements[0].equals("createNode") ) {
+			int ID = Integer.valueOf(elements[1]);
+			int tokenIndex = Integer.valueOf(elements[2]);
+			// create dummy node/token filled with ID, tokenIndex
+			ProxyTree node = new ProxyTree(ID);
+			ProxyToken token = new ProxyToken(tokenIndex);
+			listener.createNode(node, token);
+		}
+		else if ( elements[0].equals("nilNode") ) {
+			int ID = Integer.valueOf(elements[1]);
+			ProxyTree node = new ProxyTree(ID);
+			listener.nilNode(node);
+		}
+		else if ( elements[0].equals("becomeRoot") ) {
+			int newRootID = Integer.valueOf(elements[1]);
+			int oldRootID = Integer.valueOf(elements[2]);
+			ProxyTree newRoot = new ProxyTree(newRootID);
+			ProxyTree oldRoot = new ProxyTree(oldRootID);
+			listener.becomeRoot(newRoot, oldRoot);
+		}
+		else if ( elements[0].equals("addChild") ) {
+			int rootID = Integer.valueOf(elements[1]);
+			int childID = Integer.valueOf(elements[2]);
+			ProxyTree root = new ProxyTree(rootID);
+			ProxyTree child = new ProxyTree(childID);
+			listener.addChild(root, child);
+		}
+		else if ( elements[0].equals("setTokenBoundaries") ) {
+			int ID = Integer.valueOf(elements[1]);
+			ProxyTree node = new ProxyTree(ID);
+			listener.setTokenBoundaries(
+				node,
+				Integer.parseInt(elements[2]),
+				Integer.parseInt(elements[3]));
+		}
+		else {
+			System.err.println("unknown debug event: "+line);
+		}
+	}
+
+	protected ProxyTree deserializeNode(String[] elements, int offset) {
+		int ID = Integer.valueOf(elements[offset+0]);
+		int type = Integer.valueOf(elements[offset+1]);
+		int tokenLine = Integer.valueOf(elements[offset+2]);
+		int charPositionInLine = Integer.valueOf(elements[offset+3]);
+		int tokenIndex = Integer.valueOf(elements[offset+4]);
+		String text = elements[offset+5];
+		text = unEscapeNewlines(text);
+		return new ProxyTree(ID, type, tokenLine, charPositionInLine, tokenIndex, text);
+	}
+
+	protected ProxyToken deserializeToken(String[] elements,
+										  int offset)
+	{
+		String indexS = elements[offset+0];
+		String typeS = elements[offset+1];
+		String channelS = elements[offset+2];
+		String lineS = elements[offset+3];
+		String posS = elements[offset+4];
+		String text = elements[offset+5];
+		text = unEscapeNewlines(text);
+		int index = Integer.parseInt(indexS);
+		ProxyToken t =
+			new ProxyToken(index,
+						   Integer.parseInt(typeS),
+						   Integer.parseInt(channelS),
+						   Integer.parseInt(lineS),
+						   Integer.parseInt(posS),
+						   text);
+		return t;
+	}
+
+	/** Create a thread to listen to the remote running recognizer */
+	public void start() {
+		Thread t = new Thread(this);
+		t.start();
+	}
+
+	public void run() {
+		eventHandler();
+	}
+
+	// M i s c
+
+	public String[] getEventElements(String event) {
+		if ( event==null ) {
+			return null;
+		}
+		String[] elements = new String[MAX_EVENT_ELEMENTS];
+		String str = null; // a string element if present (must be last)
+		try {
+			int firstQuoteIndex = event.indexOf('"');
+			if ( firstQuoteIndex>=0 ) {
+				// treat specially; has a string argument like "a comment\n
+				// Note that the string is terminated by \n not end quote.
+				// Easier to parse that way.
+				String eventWithoutString = event.substring(0,firstQuoteIndex);
+				str = event.substring(firstQuoteIndex+1,event.length());
+				event = eventWithoutString;
+			}
+			StringTokenizer st = new StringTokenizer(event, " \t", false);
+			int i = 0;
+			while ( st.hasMoreTokens() ) {
+				if ( i>=MAX_EVENT_ELEMENTS ) {
+					// ErrorManager.internalError("event has more than "+MAX_EVENT_ELEMENTS+" args: "+event);
+					return elements;
+				}
+				elements[i] = st.nextToken();
+				i++;
+			}
+			if ( str!=null ) {
+				elements[i] = str;
+			}
+		}
+		catch (Exception e) {
+			e.printStackTrace(System.err);
+		}
+		return elements;
+	}
+
+	protected String unEscapeNewlines(String txt) {
+		// this unescape is slow but easy to understand
+		txt = txt.replaceAll("%0A","\n");  // unescape \n
+		txt = txt.replaceAll("%0D","\r");  // unescape \r
+		txt = txt.replaceAll("%25","%");   // undo escaped escape chars
+		return txt;
+	}
+
+	public boolean tokenIndexesAreInvalid() {
+		return false;
+		//return tokenIndexesInvalid;
+	}
+
+}
+
diff --git a/runtime/Java/src/org/antlr/runtime/debug/TraceDebugEventListener.java b/runtime/Java/src/org/antlr/runtime/debug/TraceDebugEventListener.java
new file mode 100644
index 0000000..99e17c7
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/TraceDebugEventListener.java
@@ -0,0 +1,69 @@
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+/** Print out (most of) the events... Useful for debugging, testing... */
+public class TraceDebugEventListener extends BlankDebugEventListener {
+	TreeAdaptor adaptor;
+
+	public TraceDebugEventListener(TreeAdaptor adaptor) {
+		this.adaptor = adaptor;
+	}
+
+	public void enterRule(String ruleName) { System.out.println("enterRule "+ruleName); }
+	public void exitRule(String ruleName) { System.out.println("exitRule "+ruleName); }
+	public void enterSubRule(int decisionNumber) { System.out.println("enterSubRule"); }
+	public void exitSubRule(int decisionNumber) { System.out.println("exitSubRule"); }
+	public void location(int line, int pos) {System.out.println("location "+line+":"+pos);}
+
+	// Tree parsing stuff
+
+	public void consumeNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		System.out.println("consumeNode "+ID+" "+text+" "+type);
+	}
+
+	public void LT(int i, Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		System.out.println("LT "+i+" "+ID+" "+text+" "+type);
+	}
+
+
+	// AST stuff
+	public void nilNode(Object t) {System.out.println("nilNode "+adaptor.getUniqueID(t));}
+
+	public void createNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		System.out.println("create "+ID+": "+text+", "+type);
+	}
+
+	public void createNode(Object node, Token token) {
+		int ID = adaptor.getUniqueID(node);
+		String text = adaptor.getText(node);
+		int tokenIndex = token.getTokenIndex();
+		System.out.println("create "+ID+": "+tokenIndex);
+	}
+
+	public void becomeRoot(Object newRoot, Object oldRoot) {
+		System.out.println("becomeRoot "+adaptor.getUniqueID(newRoot)+", "+
+						   adaptor.getUniqueID(oldRoot));
+	}
+
+	public void addChild(Object root, Object child) {
+		System.out.println("addChild "+adaptor.getUniqueID(root)+", "+
+						   adaptor.getUniqueID(child));
+	}
+
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		System.out.println("setTokenBoundaries "+adaptor.getUniqueID(t)+", "+
+						   tokenStartIndex+", "+tokenStopIndex);
+	}
+}
+
diff --git a/runtime/Java/src/org/antlr/runtime/debug/Tracer.java b/runtime/Java/src/org/antlr/runtime/debug/Tracer.java
new file mode 100644
index 0000000..dbc663e
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/debug/Tracer.java
@@ -0,0 +1,65 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.IntStream;
+import org.antlr.runtime.TokenStream;
+
+/** The default tracer mimics the traceParser behavior of ANTLR 2.x.
+ *  This listens for debugging events from the parser and implies
+ *  that you cannot debug and trace at the same time.
+ */
+public class Tracer extends BlankDebugEventListener {
+	public IntStream input;
+	protected int level = 0;
+
+	public Tracer(IntStream input) {
+		this.input = input;
+	}
+
+	public void enterRule(String ruleName) {
+		for (int i=1; i<=level; i++) {System.out.print(" ");}
+		System.out.println("> "+ruleName+" lookahead(1)="+getInputSymbol(1));
+		level++;
+	}
+
+	public void exitRule(String ruleName) {
+		level--;
+		for (int i=1; i<=level; i++) {System.out.print(" ");}
+		System.out.println("< "+ruleName+" lookahead(1)="+getInputSymbol(1));
+	}
+
+	public Object getInputSymbol(int k) {
+		if ( input instanceof TokenStream ) {
+			return ((TokenStream)input).LT(k);
+		}
+		return new Character((char)input.LA(k));
+	}
+}
+
+
diff --git a/runtime/Java/src/org/antlr/runtime/misc/Stats.java b/runtime/Java/src/org/antlr/runtime/misc/Stats.java
new file mode 100644
index 0000000..9d52bc6
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/misc/Stats.java
@@ -0,0 +1,117 @@
+package org.antlr.runtime.misc;
+
+import java.io.*;
+
+/** Stats routines needed by profiler etc...
+
+ // note that these routines return 0.0 if no values exist in the X[]
+ // which is not "correct", but it is useful so I don't generate NaN
+ // in my output
+
+ */
+public class Stats {
+	public static final String ANTLRWORKS_DIR = "antlrworks";
+
+	/** Compute the sample (unbiased estimator) standard deviation following:
+	 *
+	 *  Computing Deviations: Standard Accuracy
+	 *  Tony F. Chan and John Gregg Lewis
+	 *  Stanford University
+	 *  Communications of ACM September 1979 of Volume 22 the ACM Number 9
+	 *
+	 *  The "two-pass" method from the paper; supposed to have better
+	 *  numerical properties than the textbook summation/sqrt.  To me
+	 *  this looks like the textbook method, but I ain't no numerical
+	 *  methods guy.
+	 */
+	public static double stddev(int[] X) {
+		int m = X.length;
+		if ( m<=1 ) {
+			return 0;
+		}
+		double xbar = avg(X);
+		double s2 = 0.0;
+		for (int i=0; i<m; i++){
+			s2 += (X[i] - xbar)*(X[i] - xbar);
+		}
+		s2 = s2/(m-1);
+		return Math.sqrt(s2);
+	}
+
+	/** Compute the sample mean */
+	public static double avg(int[] X) {
+		double xbar = 0.0;
+		int m = X.length;
+		if ( m==0 ) {
+			return 0;
+		}
+		for (int i=0; i<m; i++){
+			xbar += X[i];
+		}
+		if ( xbar>=0.0 ) {
+			return xbar / m;
+		}
+		return 0.0;
+	}
+
+	public static int min(int[] X) {
+		int min = Integer.MAX_VALUE;
+		int m = X.length;
+		if ( m==0 ) {
+			return 0;
+		}
+		for (int i=0; i<m; i++){
+			if ( X[i] < min ) {
+				min = X[i];
+			}
+		}
+		return min;
+	}
+
+	public static int max(int[] X) {
+		int max = Integer.MIN_VALUE;
+		int m = X.length;
+		if ( m==0 ) {
+			return 0;
+		}
+		for (int i=0; i<m; i++){
+			if ( X[i] > max ) {
+				max = X[i];
+			}
+		}
+		return max;
+	}
+
+	public static int sum(int[] X) {
+		int s = 0;
+		int m = X.length;
+		if ( m==0 ) {
+			return 0;
+		}
+		for (int i=0; i<m; i++){
+			s += X[i];
+		}
+		return s;
+	}
+
+	public static void writeReport(String filename, String data) throws IOException {
+		String absoluteFilename = getAbsoluteFileName(filename);
+		File f = new File(absoluteFilename);
+		File parent = f.getParentFile();
+		parent.mkdirs(); // ensure parent dir exists
+		// write file
+		FileOutputStream fos = new FileOutputStream(f, true); // append
+		BufferedOutputStream bos = new BufferedOutputStream(fos);
+		PrintStream ps = new PrintStream(bos);
+		ps.println(data);
+		ps.close();
+		bos.close();
+		fos.close();
+	}
+
+	public static String getAbsoluteFileName(String filename) {
+		return System.getProperty("user.home")+File.separator+
+					ANTLRWORKS_DIR +File.separator+
+					filename;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/BaseTree.java b/runtime/Java/src/org/antlr/runtime/tree/BaseTree.java
new file mode 100644
index 0000000..2819f72
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/BaseTree.java
@@ -0,0 +1,193 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A generic tree implementation with no payload.  You must subclass to
+ *  actually have any user data.  ANTLR v3 uses a list of children approach
+ *  instead of the child-sibling approach in v2.  A flat tree (a list) is
+ *  an empty node whose children represent the list.  An empty, but
+ *  non-null node is called "nil".
+ */
+public abstract class BaseTree implements Tree {
+	protected List children;
+
+	public BaseTree() {
+	}
+
+	/** Create a new node from an existing node does nothing for BaseTree
+	 *  as there are no fields other than the children list, which cannot
+	 *  be copied as the children are not considered part of this node. 
+	 */
+	public BaseTree(Tree node) {
+	}
+
+	public Tree getChild(int i) {
+		if ( children==null || i>=children.size() ) {
+			return null;
+		}
+		return (BaseTree)children.get(i);
+	}
+
+	public Tree getFirstChildWithType(int type) {
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			Tree t = (Tree) children.get(i);
+			if ( t.getType()==type ) {
+				return t;
+			}
+		}	
+		return null;
+	}
+
+	public int getChildCount() {
+		if ( children==null ) {
+			return 0;
+		}
+		return children.size();
+	}
+
+	/** Add t as child of this node.
+	 *
+	 *  Warning: if t has no children, but child does
+	 *  and child isNil then this routine moves children to t via
+	 *  t.children = child.children; i.e., without copying the array.
+	 */
+	public void addChild(Tree t) {
+		//System.out.println("add "+t.toStringTree()+" as child to "+this.toStringTree());
+		if ( t==null ) {
+			return; // do nothing upon addChild(null)
+		}
+		BaseTree childTree = (BaseTree)t;
+		if ( childTree.isNil() ) { // t is an empty node possibly with children
+			if ( this.children!=null && this.children == childTree.children ) {
+				throw new RuntimeException("attempt to add child list to itself");
+			}
+			// just add all of childTree's children to this
+			if ( childTree.children!=null ) {
+				if ( this.children!=null ) { // must copy, this has children already
+					int n = childTree.children.size();
+					for (int i = 0; i < n; i++) {
+						this.children.add(childTree.children.get(i));
+					}
+				}
+				else {
+					// no children for this but t has children; just set pointer
+					this.children = childTree.children;
+				}
+			}
+		}
+		else { // t is not empty and might have children
+			if ( children==null ) {
+				children = createChildrenList(); // create children list on demand
+			}
+			children.add(t);
+		}
+	}
+
+	/** Add all elements of kids list as children of this node */
+	public void addChildren(List kids) {
+		for (int i = 0; i < kids.size(); i++) {
+			Tree t = (Tree) kids.get(i);
+			addChild(t);
+		}
+	}
+
+	public void setChild(int i, BaseTree t) {
+		if ( children==null ) {
+			children = createChildrenList();
+		}
+		children.set(i, t);
+	}
+
+	public BaseTree deleteChild(int i) {
+		if ( children==null ) {
+			return null;
+		}
+		return (BaseTree)children.remove(i);
+	}
+
+	/** Override in a subclass to change the impl of children list */
+	protected List createChildrenList() {
+		return new ArrayList();
+	}
+
+	public boolean isNil() {
+		return false;
+	}
+
+	/** Recursively walk this tree, dup'ing nodes until you have copy of
+	 *  this tree.  This method should work for all subclasses as long
+	 *  as they override dupNode().
+	 */
+	public Tree dupTree() {
+		Tree newTree = this.dupNode();
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			Tree t = (Tree) children.get(i);
+			Tree newSubTree = t.dupTree();
+			newTree.addChild(newSubTree);
+		}
+		return newTree;
+	}
+
+	/** Print out a whole tree not just a node */
+    public String toStringTree() {
+		if ( children==null || children.size()==0 ) {
+			return this.toString();
+		}
+		StringBuffer buf = new StringBuffer();
+		if ( !isNil() ) {
+			buf.append("(");
+			buf.append(this.toString());
+			buf.append(' ');
+		}
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			BaseTree t = (BaseTree) children.get(i);
+			if ( i>0 ) {
+				buf.append(' ');
+			}
+			buf.append(t.toStringTree());
+		}
+		if ( !isNil() ) {
+			buf.append(")");
+		}
+		return buf.toString();
+	}
+
+    public int getLine() {
+		return 0;
+	}
+
+	public int getCharPositionInLine() {
+		return 0;
+	}
+
+	/** Override to say how a node (not a tree) should look as text */
+	public abstract String toString();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/BaseTreeAdaptor.java b/runtime/Java/src/org/antlr/runtime/tree/BaseTreeAdaptor.java
new file mode 100644
index 0000000..4432ef4
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/BaseTreeAdaptor.java
@@ -0,0 +1,190 @@
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.Map;
+import java.util.HashMap;
+
+public abstract class BaseTreeAdaptor implements TreeAdaptor {
+	/** System.identityHashCode() is not always unique due to GC; we have to
+	 *  track ourselves.  That's ok, it's only for debugging, though it's
+	 *  expensive: we have to create a hashtable with all tree nodes in it.
+	 */
+	protected Map treeToUniqueIDMap;
+	protected int uniqueNodeID = 1;
+
+	public Object nil() {
+		return create(null);
+	}
+
+	public boolean isNil(Object tree) {
+		return ((Tree)tree).isNil();
+	}
+
+	public Object dupTree(Object tree) {
+		return ((Tree)tree).dupTree();
+	}
+
+	/** Add a child to the tree t.  If child is a flat tree (a list), make all
+	 *  in list children of t.  Warning: if t has no children, but child does
+	 *  and child isNil then you can decide it is ok to move children to t via
+	 *  t.children = child.children; i.e., without copying the array.  Just
+	 *  make sure that this is consistent with have the user will build
+	 *  ASTs.
+	 */
+	public void addChild(Object t, Object child) {
+		if ( t!=null && child!=null ) {
+			((Tree)t).addChild((Tree)child);
+		}
+	}
+
+	/** If oldRoot is a nil root, just copy or move the children to newRoot.
+	 *  If not a nil root, make oldRoot a child of newRoot.
+	 *
+	 *    old=^(nil a b c), new=r yields ^(r a b c)
+	 *    old=^(a b c), new=r yields ^(r ^(a b c))
+	 *
+	 *  If newRoot is a nil-rooted single child tree, use the single
+	 *  child as the new root node.
+	 *
+	 *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+	 *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+	 *
+	 *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
+	 *
+	 *    old=null, new=r yields r
+	 *    old=null, new=^(nil r) yields ^(nil r)
+	 *
+	 *  Return newRoot.  Throw an exception if newRoot is not a
+	 *  simple node or nil root with a single child node--it must be a root
+	 *  node.  If newRoot is ^(nil x) return x as newRoot.
+	 *
+	 *  Be advised that it's ok for newRoot to point at oldRoot's
+	 *  children; i.e., you don't have to copy the list.  We are
+	 *  constructing these nodes so we should have this control for
+	 *  efficiency.
+	 */
+	public Object becomeRoot(Object newRoot, Object oldRoot) {
+		Tree newRootTree = (Tree)newRoot;
+		Tree oldRootTree = (Tree)oldRoot;
+		if ( oldRoot==null ) {
+			return newRoot;
+		}
+		// handle ^(nil real-node)
+		if ( newRootTree.isNil() ) {
+			if ( newRootTree.getChildCount()>1 ) {
+				// TODO: make tree run time exceptions hierarchy
+				throw new RuntimeException("more than one node as root (TODO: make exception hierarchy)");
+			}
+			newRootTree = (Tree)newRootTree.getChild(0);
+		}
+		// add oldRoot to newRoot; addChild takes care of case where oldRoot
+		// is a flat list (i.e., nil-rooted tree).  All children of oldRoot
+		// are added to newRoot.
+		newRootTree.addChild(oldRootTree);
+		return newRootTree;
+	}
+
+	/** Transform ^(nil x) to x */
+	public Object rulePostProcessing(Object root) {
+		Tree r = (Tree)root;
+		if ( r!=null && r.isNil() && r.getChildCount()==1 ) {
+			r = (Tree)r.getChild(0);
+		}
+		return r;
+	}
+
+	public Object becomeRoot(Token newRoot, Object oldRoot) {
+		return becomeRoot(create(newRoot), oldRoot);
+	}
+
+	public Object create(int tokenType, Token fromToken) {
+		fromToken = createToken(fromToken);
+		//((ClassicToken)fromToken).setType(tokenType);
+		fromToken.setType(tokenType);
+		Tree t = (Tree)create(fromToken);
+		return t;
+	}
+
+	public Object create(int tokenType, Token fromToken, String text) {
+		fromToken = createToken(fromToken);
+		fromToken.setType(tokenType);
+		fromToken.setText(text);
+		Tree t = (Tree)create(fromToken);
+		return t;
+	}
+
+	public Object create(int tokenType, String text) {
+		Token fromToken = createToken(tokenType, text);
+		Tree t = (Tree)create(fromToken);
+		return t;
+	}
+
+	public int getType(Object t) {
+		((Tree)t).getType();
+		return 0;
+	}
+
+	public void setType(Object t, int type) {
+		throw new NoSuchMethodError("don't know enough about Tree node");
+	}
+
+	public String getText(Object t) {
+		return ((Tree)t).getText();
+	}
+
+	public void setText(Object t, String text) {
+		throw new NoSuchMethodError("don't know enough about Tree node");
+	}
+
+	public Object getChild(Object t, int i) {
+		return ((Tree)t).getChild(i);
+	}
+
+	public int getChildCount(Object t) {
+		return ((Tree)t).getChildCount();
+	}
+
+	public int getUniqueID(Object node) {
+		if ( treeToUniqueIDMap==null ) {
+			 treeToUniqueIDMap = new HashMap();
+		}
+		Integer prevID = (Integer)treeToUniqueIDMap.get(node);
+		if ( prevID!=null ) {
+			return prevID.intValue();
+		}
+		int ID = uniqueNodeID;
+		treeToUniqueIDMap.put(node, new Integer(ID));
+		uniqueNodeID++;
+		return ID;
+		// GC makes these nonunique:
+		// return System.identityHashCode(node);
+	}
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	public abstract Token createToken(int tokenType, String text);
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  This is a variant of createToken where the new token is derived from
+	 *  an actual real input token.  Typically this is for converting '{'
+	 *  tokens to BLOCK etc...  You'll see
+	 *
+	 *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	public abstract Token createToken(Token fromToken);
+}
+
diff --git a/runtime/Java/src/org/antlr/runtime/tree/CommonTree.java b/runtime/Java/src/org/antlr/runtime/tree/CommonTree.java
new file mode 100644
index 0000000..1998d6e
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/CommonTree.java
@@ -0,0 +1,127 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+/** A tree node that is wrapper for a Token object. */
+public class CommonTree extends BaseTree {
+	/** What token indexes bracket all tokens associated with this node
+	 *  and below?
+	 */
+	public int startIndex=-1, stopIndex=-1;
+
+	/** A single token is the payload */
+	public Token token;
+
+	public CommonTree() { }
+	
+	public CommonTree(CommonTree node) {
+		super(node);
+		this.token = node.token;
+	}
+
+	public CommonTree(Token t) {
+		this.token = t;
+	}
+
+	public Token getToken() {
+		return token;
+	}
+
+	public Tree dupNode() {
+		return new CommonTree(this);
+	}
+
+	public boolean isNil() {
+		return token==null;
+	}
+
+	public int getType() {
+		if ( token==null ) {
+			return 0;
+		}
+		return token.getType();
+	}
+
+	public String getText() {
+		if ( token==null ) {
+			return null;
+		}
+		return token.getText();
+	}
+
+	public int getLine() {
+		if ( token==null || token.getLine()==0 ) {
+			if ( getChildCount()>0 ) {
+				return getChild(0).getLine();
+			}
+			return 0;
+		}
+		return token.getLine();
+	}
+
+	public int getCharPositionInLine() {
+		if ( token==null || token.getCharPositionInLine()==-1 ) {
+			if ( getChildCount()>0 ) {
+				return getChild(0).getCharPositionInLine();
+			}
+			return 0;
+		}
+		return token.getCharPositionInLine();
+	}
+
+	public int getTokenStartIndex() {
+		if ( startIndex==-1 && token!=null ) {
+			return token.getTokenIndex();
+		}
+		return startIndex;
+	}
+
+	public void setTokenStartIndex(int index) {
+		startIndex = index;
+	}
+
+	public int getTokenStopIndex() {
+		if ( stopIndex==-1 && token!=null ) {
+			return token.getTokenIndex();
+		}
+		return stopIndex;
+	}
+
+	public void setTokenStopIndex(int index) {
+		stopIndex = index;
+	}
+
+	public String toString() {
+		if ( isNil() ) {
+			return "nil";
+		}
+		return token.getText();
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/CommonTreeAdaptor.java b/runtime/Java/src/org/antlr/runtime/tree/CommonTreeAdaptor.java
new file mode 100644
index 0000000..547d005
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/CommonTreeAdaptor.java
@@ -0,0 +1,137 @@
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+
+/** A TreeAdaptor that works with any Tree implementation.  It provides
+ *  really just factory methods; all the work is done by BaseTreeAdaptor.
+ *  If you would like to have different tokens created than ClassicToken
+ *  objects, you need to override this and then set the parser tree adaptor to
+ *  use your subclass.
+ *
+ *  To get your parser to build nodes of a different type, override
+ *  create(Token).
+ */
+public class CommonTreeAdaptor extends BaseTreeAdaptor {
+	/** Duplicate a node.  This is part of the factory;
+	 *	override if you want another kind of node to be built.
+	 *
+	 *  I could use reflection to prevent having to override this
+	 *  but reflection is slow.
+	 */
+	public Object dupNode(Object t) {
+		if ( t==null ) {
+			return null;
+		}
+		return ((Tree)t).dupNode();
+	}
+
+	public Object create(Token payload) {
+		return new CommonTree(payload);
+	}
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	public Token createToken(int tokenType, String text) {
+		return new CommonToken(tokenType, text);
+	}
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  This is a variant of createToken where the new token is derived from
+	 *  an actual real input token.  Typically this is for converting '{'
+	 *  tokens to BLOCK etc...  You'll see
+	 *
+	 *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	public Token createToken(Token fromToken) {
+		return new CommonToken(fromToken);
+	}
+
+	/** Track start/stop token for subtree root created for a rule.
+	 *  Only works with Tree nodes.  For rules that match nothing,
+	 *  seems like this will yield start=i and stop=i-1 in a nil node.
+	 *  Might be useful info so I'll not force to be i..i.
+	 */
+	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
+		if ( t==null ) {
+			return;
+		}
+		int start = 0;
+		int stop = 0;
+		if ( startToken!=null ) {
+			start = startToken.getTokenIndex();
+		}
+		if ( stopToken!=null ) {
+			stop = stopToken.getTokenIndex();
+		}
+		((Tree)t).setTokenStartIndex(start);
+		((Tree)t).setTokenStopIndex(stop);
+	}
+
+	public int getTokenStartIndex(Object t) {
+		if ( t==null ) {
+			return -1;
+		}
+		return ((Tree)t).getTokenStartIndex();
+	}
+
+	public int getTokenStopIndex(Object t) {
+		if ( t==null ) {
+			return -1;
+		}
+		return ((Tree)t).getTokenStopIndex();
+	}
+
+	public String getText(Object t) {
+		if ( t==null ) {
+			return null;
+		}
+		return ((Tree)t).getText();
+	}
+
+    public int getType(Object t) {
+		if ( t==null ) {
+			return Token.INVALID_TOKEN_TYPE;
+		}
+		return ((Tree)t).getType();
+	}
+
+	/** What is the Token associated with this node?  If
+	 *  you are not using CommonTree, then you must
+	 *  override this in your own adaptor.
+	 */
+	public Token getToken(Object t) {
+		if ( t instanceof CommonTree ) {
+			return ((CommonTree)t).getToken();
+		}
+		return null; // no idea what to do
+	}
+
+	public Object getChild(Object t, int i) {
+		if ( t==null ) {
+			return null;
+		}
+        return ((Tree)t).getChild(i);
+    }
+
+    public int getChildCount(Object t) {
+		if ( t==null ) {
+			return 0;
+		}
+        return ((Tree)t).getChildCount();
+    }
+
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/CommonTreeNodeStream.java b/runtime/Java/src/org/antlr/runtime/tree/CommonTreeNodeStream.java
new file mode 100644
index 0000000..a645b16
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/CommonTreeNodeStream.java
@@ -0,0 +1,560 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+
+import java.util.*;
+
+/** A buffered stream of tree nodes.  Nodes can be from a tree of ANY kind.
+ *
+ *  This node stream sucks all nodes out of the tree specified in
+ *  the constructor during construction and makes pointers into
+ *  the tree using an array of Object pointers. The stream necessarily
+ *  includes pointers to DOWN and UP and EOF nodes.
+ *
+ *  This stream knows how to mark/release for backtracking.
+ *
+ *  This stream is most suitable for tree interpreters that need to
+ *  jump around a lot or for tree parsers requiring speed (at cost of memory).
+ *  There is some duplicated functionality here with UnBufferedTreeNodeStream
+ *  but just in bookkeeping, not tree walking etc...
+ *
+ *  @see UnBufferedTreeNodeStream
+ */
+public class CommonTreeNodeStream implements TreeNodeStream {
+	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
+	public static final int INITIAL_CALL_STACK_SIZE = 10;
+
+	protected class StreamIterator implements Iterator {
+		int i = 0;
+		public boolean hasNext() {
+			return i<nodes.size();
+		}
+
+		public Object next() {
+			int current = i;
+			i++;
+			if ( current < nodes.size() ) {
+				return nodes.get(current);
+			}
+			return eof;
+		}
+
+		public void remove() {
+			throw new RuntimeException("cannot remove nodes from stream");
+		}
+	}
+
+	// all these navigation nodes are shared and hence they
+	// cannot contain any line/column info
+
+	protected Object down;
+	protected Object up;
+	protected Object eof;
+
+	/** The complete mapping from stream index to tree node.
+	 *  This buffer includes pointers to DOWN, UP, and EOF nodes.
+	 *  It is built upon ctor invocation.  The elements are type
+	 *  Object as we don't what the trees look like.
+	 *
+	 *  Load upon first need of the buffer so we can set token types
+	 *  of interest for reverseIndexing.  Slows us down a wee bit to
+	 *  do all of the if p==-1 testing everywhere though.
+	 */
+	protected List nodes;
+
+	/** Pull nodes from which tree? */
+	protected Object root;
+
+	/** IF this tree (root) was created from a token stream, track it. */
+	protected TokenStream tokens;
+
+	/** What tree adaptor was used to build these trees */
+	TreeAdaptor adaptor;
+
+	/** Reuse same DOWN, UP navigation nodes unless this is true */
+	protected boolean uniqueNavigationNodes = false;
+
+	/** The index into the nodes list of the current node (next node
+	 *  to consume).  If -1, nodes array not filled yet.
+	 */
+	protected int p = -1;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	/** Stack of indexes used for push/pop calls */
+	protected int[] calls;
+
+	/** Stack pointer for stack of indexes; -1 indicates empty.  Points
+	 *  at next location to push a value.
+	 */
+	protected int _sp = -1;
+
+	/** During fillBuffer(), we can make a reverse index from a set
+	 *  of token types of interest to the list of indexes into the
+	 *  node stream.  This lets us convert a node pointer to a
+	 *  stream index semi-efficiently for a list of interesting
+	 *  nodes such as function definition nodes (you'll want to seek
+	 *  to their bodies for an interpreter).  Also useful for doing
+	 *  dynamic searches; i.e., go find me all PLUS nodes.
+	 */
+	protected Map tokenTypeToStreamIndexesMap;
+
+	/** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
+	 *  occurs for all token types.
+	 */
+	public static final Set INDEX_ALL = new HashSet();
+
+	/** A set of token types user would like to index for faster lookup.
+	 *  If this is INDEX_ALL, then all token types are tracked.  If null,
+	 *  then none are indexed.
+	 */
+	protected Set tokenTypesToReverseIndex = null;
+
+	public CommonTreeNodeStream(Object tree) {
+		this(new CommonTreeAdaptor(), tree);
+	}
+
+	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree) {
+		this(adaptor, tree, DEFAULT_INITIAL_BUFFER_SIZE);
+	}
+
+	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree, int initialBufferSize) {
+		this.root = tree;
+		this.adaptor = adaptor;
+		nodes = new ArrayList(initialBufferSize);
+		down = adaptor.create(Token.DOWN, "DOWN");
+		up = adaptor.create(Token.UP, "UP");
+		eof = adaptor.create(Token.EOF, "EOF");
+	}
+
+	/** Walk tree with depth-first-search and fill nodes buffer.
+	 *  Don't do DOWN, UP nodes if its a list (t is isNil).
+	 */
+	protected void fillBuffer() {
+		fillBuffer(root);
+		//System.out.println("revIndex="+tokenTypeToStreamIndexesMap);
+		p = 0; // buffer of nodes intialized now
+	}
+
+	protected void fillBuffer(Object t) {
+		boolean nil = adaptor.isNil(t);
+		if ( !nil ) {
+			nodes.add(t); // add this node
+			fillReverseIndex(t, nodes.size()-1);
+		}
+		// add DOWN node if t has children
+		int n = adaptor.getChildCount(t);
+		if ( !nil && n>0 ) {
+			addNavigationNode(Token.DOWN);
+		}
+		// and now add all its children
+		for (int c=0; c<n; c++) {
+			Object child = adaptor.getChild(t,c);
+			fillBuffer(child);
+		}
+		// add UP node if t has children
+		if ( !nil && n>0 ) {
+			addNavigationNode(Token.UP);
+		}
+	}
+
+	/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
+	 *  You can override this method to alter how indexing occurs.  The
+	 *  default is to create a
+	 *
+	 *    Map<Integer token type,ArrayList<Integer stream index>>
+	 *
+	 *  This data structure allows you to find all nodes with type INT in order.
+	 *
+	 *  If you really need to find a node of type, say, FUNC quickly then perhaps
+	 *
+	 *    Map<Integertoken type,Map<Object tree node,Integer stream index>>
+	 *
+	 *  would be better for you.  The interior maps map a tree node to
+	 *  the index so you don't have to search linearly for a specific node.
+	 *
+	 *  If you change this method, you will likely need to change
+	 *  getNodeIndex(), which extracts information.
+	 */
+	protected void fillReverseIndex(Object node, int streamIndex) {
+		//System.out.println("revIndex "+node+"@"+streamIndex);
+		if ( tokenTypesToReverseIndex==null ) {
+			return; // no indexing if this is empty (nothing of interest)
+		}
+		if ( tokenTypeToStreamIndexesMap==null ) {
+			tokenTypeToStreamIndexesMap = new HashMap(); // first indexing op
+		}
+		int tokenType = adaptor.getType(node);
+		Integer tokenTypeI = new Integer(tokenType);
+		if ( !(tokenTypesToReverseIndex==INDEX_ALL ||
+			   tokenTypesToReverseIndex.contains(tokenTypeI)) )
+		{
+			return; // tokenType not of interest
+		}
+		Integer streamIndexI = new Integer(streamIndex);
+		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+		if ( indexes==null ) {
+			indexes = new ArrayList(); // no list yet for this token type
+			indexes.add(streamIndexI); // not there yet, add
+			tokenTypeToStreamIndexesMap.put(tokenTypeI, indexes);
+		}
+		else {
+			if ( !indexes.contains(streamIndexI) ) {
+				indexes.add(streamIndexI); // not there yet, add
+			}
+		}
+	}
+
+	/** Track the indicated token type in the reverse index.  Call this
+	 *  repeatedly for each type or use variant with Set argument to
+	 *  set all at once.
+	 * @param tokenType
+	 */
+	public void reverseIndex(int tokenType) {
+		if ( tokenTypesToReverseIndex==null ) {
+			tokenTypesToReverseIndex = new HashSet();
+		}
+		else if ( tokenTypesToReverseIndex==INDEX_ALL ) {
+			return;
+		}
+		tokenTypesToReverseIndex.add(new Integer(tokenType));
+	}
+
+	/** Track the indicated token types in the reverse index. Set
+	 *  to INDEX_ALL to track all token types.
+	 */
+	public void reverseIndex(Set tokenTypes) {
+		tokenTypesToReverseIndex = tokenTypes;
+	}
+
+	/** Given a node pointer, return its index into the node stream.
+	 *  This is not its Token stream index.  If there is no reverse map
+	 *  from node to stream index or the map does not contain entries
+	 *  for node's token type, a linear search of entire stream is used.
+	 *
+	 *  Return -1 if exact node pointer not in stream.
+	 */
+	public int getNodeIndex(Object node) {
+		//System.out.println("get "+node);
+		if ( tokenTypeToStreamIndexesMap==null ) {
+			return getNodeIndexLinearly(node);
+		}
+		int tokenType = adaptor.getType(node);
+		Integer tokenTypeI = new Integer(tokenType);
+		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+		if ( indexes==null ) {
+			//System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
+			return getNodeIndexLinearly(node);
+		}
+		for (int i = 0; i < indexes.size(); i++) {
+			Integer streamIndexI = (Integer)indexes.get(i);
+			Object n = get(streamIndexI.intValue());
+			if ( n==node ) {
+				//System.out.println("found in index; stream index = "+streamIndexI);
+				return streamIndexI.intValue(); // found it!
+			}
+		}
+		return -1;
+	}
+
+	protected int getNodeIndexLinearly(Object node) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		for (int i = 0; i < nodes.size(); i++) {
+			Object t = (Object) nodes.get(i);
+			if ( t==node ) {
+				return i;
+			}
+		}
+		return -1;
+	}
+
+	/** As we flatten the tree, we use UP, DOWN nodes to represent
+	 *  the tree structure.  When debugging we need unique nodes
+	 *  so instantiate new ones when uniqueNavigationNodes is true.
+	 */
+	protected void addNavigationNode(final int ttype) {
+		Object navNode = null;
+		if ( ttype==Token.DOWN ) {
+			if ( hasUniqueNavigationNodes() ) {
+				navNode = adaptor.create(Token.DOWN, "DOWN");
+			}
+			else {
+				navNode = down;
+			}
+		}
+		else {
+			if ( hasUniqueNavigationNodes() ) {
+				navNode = adaptor.create(Token.UP, "UP");
+			}
+			else {
+				navNode = up;
+			}
+		}
+		nodes.add(navNode);
+	}
+
+	public Object get(int i) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		return nodes.get(i);
+	}
+
+	public Object LT(int k) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		if ( k==0 ) {
+			return null;
+		}
+		if ( k<0 ) {
+			return LB(-k);
+		}
+		//System.out.print("LT(p="+p+","+k+")=");
+		if ( (p+k-1) >= nodes.size() ) {
+			return eof;
+		}
+		return nodes.get(p+k-1);
+	}
+
+/*
+	public Object getLastTreeNode() {
+		int i = index();
+		if ( i>=size() ) {
+			i--; // if at EOF, have to start one back
+		}
+		System.out.println("start last node: "+i+" size=="+nodes.size());
+		while ( i>=0 &&
+			(adaptor.getType(get(i))==Token.EOF ||
+			 adaptor.getType(get(i))==Token.UP ||
+			 adaptor.getType(get(i))==Token.DOWN) )
+		{
+			i--;
+		}
+		System.out.println("stop at node: "+i+" "+nodes.get(i));
+		return nodes.get(i);
+	}
+*/
+	
+	/** Look backwards k nodes */
+	protected Object LB(int k) {
+		if ( k==0 ) {
+			return null;
+		}
+		if ( (p-k)<0 ) {
+			return null;
+		}
+		return nodes.get(p-k);
+	}
+
+	public Object getTreeSource() {
+		return root;
+	}
+
+	public TokenStream getTokenStream() {
+		return tokens;
+	}
+
+	public void setTokenStream(TokenStream tokens) {
+		this.tokens = tokens;
+	}
+
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+
+	public boolean hasUniqueNavigationNodes() {
+		return uniqueNavigationNodes;
+	}
+
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
+		this.uniqueNavigationNodes = uniqueNavigationNodes;
+	}
+
+	public void consume() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		p++;
+	}
+
+	public int LA(int i) {
+		return adaptor.getType(LT(i));
+	}
+
+	public int mark() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		lastMarker = index();
+		return lastMarker;
+	}
+
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	public int index() {
+		return p;
+	}
+
+	public void rewind(int marker) {
+		seek(marker);
+	}
+
+	public void rewind() {
+		seek(lastMarker);
+	}
+
+	public void seek(int index) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		p = index;
+	}
+
+	/** Make stream jump to a new location, saving old location.
+	 *  Switch back with pop().  I manage dyanmic array manually
+	 *  to avoid creating Integer objects all over the place.
+	 */
+	public void push(int index) {
+		if ( calls==null ) {
+			calls = new int[INITIAL_CALL_STACK_SIZE];
+		}
+		else if ( (_sp+1)>=calls.length ) {
+			int[] newStack = new int[calls.length*2];
+			System.arraycopy(calls, 0, newStack, 0, calls.length);
+			calls = newStack;
+		}
+		calls[++_sp] = p; // save current index
+		seek(index);
+	}
+
+	/** Seek back to previous index saved during last push() call.
+	 *  Return top of stack (return index).
+	 */
+	public int pop() {
+		int ret = calls[_sp--];
+		seek(ret);
+		return ret;
+	}
+
+	public int size() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		return nodes.size();
+	}
+
+	public Iterator iterator() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		return new StreamIterator();
+	}
+
+	/** Used for testing, just return the token type stream */
+	public String toString() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		StringBuffer buf = new StringBuffer();
+		for (int i = 0; i < nodes.size(); i++) {
+			Object t = (Object) nodes.get(i);
+			buf.append(" ");
+			buf.append(adaptor.getType(t));
+		}
+		return buf.toString();
+	}
+
+	public String toString(Object start, Object stop) {
+		if ( start==null || stop==null ) {
+			return null;
+		}
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		System.out.println("stop: "+stop);
+		if ( start instanceof CommonTree )
+			System.out.print("toString: "+((CommonTree)start).getToken()+", ");
+		else
+			System.out.println(start);
+		if ( stop instanceof CommonTree )
+			System.out.println(((CommonTree)stop).getToken());
+		else
+			System.out.println(stop);
+		// if we have the token stream, use that to dump text in order
+		if ( tokens!=null ) {
+			int beginTokenIndex = adaptor.getTokenStartIndex(start);
+			int endTokenIndex = adaptor.getTokenStopIndex(stop);
+			// if it's a tree, use start/stop index from start node
+			// else use token range from start/stop nodes
+			if ( adaptor.getType(stop)==Token.UP ) {
+				endTokenIndex = adaptor.getTokenStopIndex(start);
+			}
+			else if ( adaptor.getType(stop)==Token.EOF ) {
+				endTokenIndex = size()-2; // don't use EOF
+			}
+			return tokens.toString(beginTokenIndex, endTokenIndex);
+		}
+		// walk nodes looking for start
+		Object t = null;
+		int i = 0;
+		for (; i < nodes.size(); i++) {
+			t = nodes.get(i);
+			if ( t==start ) {
+				break;
+			}
+		}
+		// now walk until we see stop, filling string buffer with text
+		 StringBuffer buf = new StringBuffer();
+		t = nodes.get(i);
+		while ( t!=stop ) {
+			String text = adaptor.getText(t);
+			if ( text==null ) {
+				text = " "+String.valueOf(adaptor.getType(t));
+			}
+			buf.append(text);
+			i++;
+			t = nodes.get(i);
+		}
+		// include stop node too
+		String text = adaptor.getText(stop);
+		if ( text==null ) {
+			text = " "+String.valueOf(adaptor.getType(stop));
+		}
+		buf.append(text);
+		return buf.toString();
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/DOTTreeGenerator.java b/runtime/Java/src/org/antlr/runtime/tree/DOTTreeGenerator.java
new file mode 100644
index 0000000..f6f18d1
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/DOTTreeGenerator.java
@@ -0,0 +1,205 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.stringtemplate.StringTemplate;
+
+import java.util.HashMap;
+
+/** A utility class to generate DOT diagrams (graphviz) from
+ *  arbitrary trees.  You can pass in your own templates and
+ *  can pass in any kind of tree or use Tree interface method.
+ *  I wanted this separator so that you don't have to include
+ *  ST just to use the org.antlr.runtime.tree.* package.
+ *  This is a set of non-static methods so you can subclass
+ *  to override.  For example, here is an invocation:
+ *
+ *      CharStream input = new ANTLRInputStream(System.in);
+ *      TLexer lex = new TLexer(input);
+ *      CommonTokenStream tokens = new CommonTokenStream(lex);
+ *      TParser parser = new TParser(tokens);
+ *      TParser.e_return r = parser.e();
+ *      Tree t = (Tree)r.tree;
+ *      System.out.println(t.toStringTree());
+ *      DOTTreeGenerator gen = new DOTTreeGenerator();
+ *      StringTemplate st = gen.toDOT(t);
+ *      System.out.println(st);
+ */
+public class DOTTreeGenerator {
+
+	public static StringTemplate _treeST =
+		new StringTemplate(
+			"digraph {\n" +
+			"  ordering=out;\n" +
+			"  ranksep=.4;\n" +
+			"  node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
+			"        width=.25, height=.25];\n" +
+			"  edge [arrowsize=.5]\n" +
+			"  $nodes$\n" +
+			"  $edges$\n" +
+			"}\n");
+
+	public static StringTemplate _nodeST =
+			new StringTemplate("$name$ [label=\"$text$\"];\n");
+
+	public static StringTemplate _edgeST =
+			new StringTemplate("$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n");
+
+	/** Track node to number mapping so we can get proper node name back */
+	HashMap nodeToNumberMap = new HashMap();
+
+	/** Track node number so we can get unique node names */
+	int nodeNumber = 0;
+
+	public StringTemplate toDOT(Object tree,
+								TreeAdaptor adaptor,
+								StringTemplate _treeST,
+								StringTemplate _edgeST)
+	{
+		StringTemplate treeST = _treeST.getInstanceOf();
+		nodeNumber = 0;
+		toDOTDefineNodes(tree, adaptor, treeST);
+		nodeNumber = 0;
+		toDOTDefineEdges(tree, adaptor, treeST);
+		/*
+		if ( adaptor.getChildCount(tree)==0 ) {
+            // single node, don't do edge.
+            treeST.setAttribute("nodes", adaptor.getText(tree));
+        }
+        */
+		return treeST;
+	}
+
+	public StringTemplate toDOT(Object tree,
+								TreeAdaptor adaptor)
+	{
+		return toDOT(tree, adaptor, _treeST, _edgeST);
+	}
+
+	/** Generate DOT (graphviz) for a whole tree not just a node.
+	 *  For example, 3+4*5 should generate:
+	 *
+	 * digraph {
+	 *   node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
+	 *         width=.4, height=.2];
+	 *   edge [arrowsize=.7]
+	 *   "+"->3
+	 *   "+"->"*"
+	 *   "*"->4
+	 *   "*"->5
+	 * }
+	 *
+	 * Return the ST not a string in case people want to alter.
+	 *
+	 * Takes a Tree interface object.
+	 */
+	public StringTemplate toDOT(Tree tree) {
+		return toDOT(tree, new CommonTreeAdaptor());
+	}
+
+	protected void toDOTDefineNodes(Object tree,
+									TreeAdaptor adaptor,
+									StringTemplate treeST)
+	{
+		if ( tree==null ) {
+			return;
+		}
+		int n = adaptor.getChildCount(tree);
+		if ( n==0 ) {
+			// must have already dumped as child from previous
+			// invocation; do nothing
+			return;
+		}
+
+		// define parent node
+		StringTemplate parentNodeST = getNodeST(adaptor, tree);
+		treeST.setAttribute("nodes", parentNodeST);
+
+		// for each child, do a "<unique-name> [label=text]" node def
+		for (int i = 0; i < n; i++) {
+			Object child = adaptor.getChild(tree, i);
+			StringTemplate nodeST = getNodeST(adaptor, child);
+			treeST.setAttribute("nodes", nodeST);
+			toDOTDefineNodes(child, adaptor, treeST);
+		}
+	}
+
+	protected void toDOTDefineEdges(Object tree,
+									TreeAdaptor adaptor,
+									StringTemplate treeST)
+	{
+		if ( tree==null ) {
+			return;
+		}
+		int n = adaptor.getChildCount(tree);
+		if ( n==0 ) {
+			// must have already dumped as child from previous
+			// invocation; do nothing
+			return;
+		}
+
+		String parentName = "n"+getNodeNumber(tree);
+
+		// for each child, do a parent -> child edge using unique node names
+		String parentText = adaptor.getText(tree);
+		for (int i = 0; i < n; i++) {
+			Object child = adaptor.getChild(tree, i);
+			String childText = adaptor.getText(child);
+			String childName = "n"+getNodeNumber(child);
+			StringTemplate edgeST = _edgeST.getInstanceOf();
+			edgeST.setAttribute("parent", parentName);
+			edgeST.setAttribute("child", childName);
+			edgeST.setAttribute("parentText", parentText);
+			edgeST.setAttribute("childText", childText);
+			treeST.setAttribute("edges", edgeST);
+			toDOTDefineEdges(child, adaptor, treeST);
+		}
+	}
+
+	protected StringTemplate getNodeST(TreeAdaptor adaptor, Object t) {
+		String text = adaptor.getText(t);
+		StringTemplate nodeST = _nodeST.getInstanceOf();
+		String uniqueName = "n"+getNodeNumber(t);
+		nodeST.setAttribute("name", uniqueName);
+		if (text!=null) text = text.replaceAll("\"", "\\\\\"");
+		nodeST.setAttribute("text", text);
+		return nodeST;
+	}
+
+	protected int getNodeNumber(Object t) {
+		Integer nI = (Integer)nodeToNumberMap.get(t);
+		if ( nI!=null ) {
+			return nI.intValue();
+		}
+		else {
+			nodeToNumberMap.put(t, new Integer(nodeNumber));
+			nodeNumber++;
+			return nodeNumber-1;
+		}
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java b/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
new file mode 100644
index 0000000..0aaa6e9
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
@@ -0,0 +1,54 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+/** A generic doubly-linked tree implementation with no payload.
+ *  You must subclass to actually have any user data.
+ *  TODO: do we really need/want this?
+ */
+public abstract class DoubleLinkTree extends BaseTree {
+	protected DoubleLinkTree parent;
+
+	public DoubleLinkTree getParent() {
+		return parent;
+	}
+
+	public void setParent(DoubleLinkTree t) {
+		parent = t;
+	}
+
+	public void addChild(BaseTree t) {
+		super.addChild(t);
+		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
+	}
+
+	public void setChild(int i, BaseTree t) {
+		super.setChild(i, t);
+		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/ParseTree.java b/runtime/Java/src/org/antlr/runtime/tree/ParseTree.java
new file mode 100644
index 0000000..e5baedb
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/ParseTree.java
@@ -0,0 +1,79 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+/** A record of the rules used to match a token sequence.  The tokens
+ *  end up as the leaves of this tree and rule nodes are the interior nodes.
+ *  This really adds no functionality, it is just an alias for CommonTree
+ *  that is more meaningful (specific) and holds a String to display for a node.
+ */
+public class ParseTree extends BaseTree {
+	public Object payload;
+	public ParseTree(Object label) {
+		this.payload = label;
+	}
+
+	public Tree dupNode() {
+		return null;
+	}
+
+	public int getType() {
+		return 0;
+	}
+
+	public String getText() {
+		return toString();
+	}
+
+	public int getTokenStartIndex() {
+		return 0;
+	}
+
+	public void setTokenStartIndex(int index) {
+	}
+
+	public int getTokenStopIndex() {
+		return 0;
+	}
+
+	public void setTokenStopIndex(int index) {
+	}
+
+	public String toString() {
+		if ( payload instanceof Token ) {
+			Token t = (Token)payload;
+			if ( t.getType() == Token.EOF ) {
+				return "<EOF>";
+			}
+			return t.getText();
+		}
+		return payload.toString();
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteCardinalityException.java b/runtime/Java/src/org/antlr/runtime/tree/RewriteCardinalityException.java
new file mode 100644
index 0000000..2abe04f
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/RewriteCardinalityException.java
@@ -0,0 +1,47 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+/** Base class for all exceptions thrown during AST rewrite construction.
+ *  This signifies a case where the cardinality of two or more elements
+ *  in a subrule are different: (ID INT)+ where |ID|!=|INT|
+ */
+public class RewriteCardinalityException extends RuntimeException {
+	public String elementDescription;
+
+	public RewriteCardinalityException(String elementDescription) {
+		this.elementDescription = elementDescription;
+	}
+
+	public String getMessage() {
+		if ( elementDescription!=null ) {
+			return elementDescription;
+		}
+		return null;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEarlyExitException.java b/runtime/Java/src/org/antlr/runtime/tree/RewriteEarlyExitException.java
new file mode 100644
index 0000000..c2bc29b
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/RewriteEarlyExitException.java
@@ -0,0 +1,39 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+/** No elements within a (...)+ in a rewrite rule */
+public class RewriteEarlyExitException extends RewriteCardinalityException {
+	public RewriteEarlyExitException() {
+		super(null);
+	}
+	public RewriteEarlyExitException(String elementDescription) {
+		super(elementDescription);
+	}
+
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
new file mode 100644
index 0000000..815b4e6
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
@@ -0,0 +1,35 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
+public class RewriteEmptyStreamException extends RewriteCardinalityException {
+	public RewriteEmptyStreamException(String elementDescription) {
+		super(elementDescription);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleElementStream.java b/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleElementStream.java
new file mode 100644
index 0000000..b8799c7
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleElementStream.java
@@ -0,0 +1,211 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.CommonToken;
+
+import java.util.List;
+import java.util.ArrayList;
+
+/** A generic list of elements tracked in an alternative to be used in
+ *  a -> rewrite rule.  We need to subclass to fill in the next() method,
+ *  which returns either an AST node wrapped around a token payload or
+ *  an existing subtree.
+ *
+ *  Once you start next()ing, do not try to add more elements.  It will
+ *  break the cursor tracking I believe.
+ *
+ *  @see org.antlr.runtime.tree.RewriteRuleSubtreeStream
+ *  @see org.antlr.runtime.tree.RewriteRuleTokenStream
+ *
+ *  TODO: add mechanism to detect/puke on modification after reading from stream
+ */
+public abstract class RewriteRuleElementStream {
+	/** Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
+	 *  which bumps it to 1 meaning no more elements.
+	 */
+	protected int cursor = 0;
+
+	/** Track single elements w/o creating a list.  Upon 2nd add, alloc list */
+	protected Object singleElement;
+
+	/** The list of tokens or subtrees we are tracking */
+	protected List elements;
+
+	/** Once a node / subtree has been used in a stream, it must be dup'd
+	 *  from then on.  Streams are reset after subrules so that the streams
+	 *  can be reused in future subrules.  So, reset must set a dirty bit.
+	 *  If dirty, then next() always returns a dup.
+	 *
+	 *  I wanted to use "naughty bit" here, but couldn't think of a way
+	 *  to use "naughty".
+	 */
+	protected boolean dirty = false;
+
+	/** The element or stream description; usually has name of the token or
+	 *  rule reference that this list tracks.  Can include rulename too, but
+	 *  the exception would track that info.
+	 */
+	protected String elementDescription;
+	protected TreeAdaptor adaptor;
+
+	public RewriteRuleElementStream(TreeAdaptor adaptor, String elementDescription) {
+		this.elementDescription = elementDescription;
+		this.adaptor = adaptor;
+	}
+
+	/** Create a stream with one element */
+	public RewriteRuleElementStream(TreeAdaptor adaptor,
+									String elementDescription,
+									Object oneElement)
+	{
+		this(adaptor, elementDescription);
+		add(oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleElementStream(TreeAdaptor adaptor,
+									String elementDescription,
+									List elements)
+	{
+		this(adaptor, elementDescription);
+		this.singleElement = null;
+		this.elements = elements;
+	}
+
+	/** Reset the condition of this stream so that it appears we have
+	 *  not consumed any of its elements.  Elements themselves are untouched.
+	 *  Once we reset the stream, any future use will need duplicates.  Set
+	 *  the dirty bit.
+	 */
+	public void reset() {
+		cursor = 0;
+		dirty = true;
+	}
+
+	public void add(Object el) {
+		//System.out.println("add '"+elementDescription+"' is "+el);
+		if ( el==null ) {
+			return;
+		}
+		if ( elements!=null ) { // if in list, just add
+			elements.add(el);
+			return;
+		}
+		if ( singleElement == null ) { // no elements yet, track w/o list
+			singleElement = el;
+			return;
+		}
+		// adding 2nd element, move to list
+		elements = new ArrayList(5);
+		elements.add(singleElement);
+		singleElement = null;
+		elements.add(el);
+	}
+
+	/** Return the next element in the stream.  If out of elements, throw
+	 *  an exception unless size()==1.  If size is 1, then return elements[0].
+	 *  Return a duplicate node/subtree if stream is out of elements and
+	 *  size==1.  If we've already used the element, dup (dirty bit set).
+	 */
+	public Object next() {
+		int n = size();
+		if ( dirty || (cursor>=n && n==1) ) {
+			// if out of elements and size is 1, dup
+			Object el = _next();
+			return dup(el);
+		}
+		// test size above then fetch
+		Object el = _next();
+		return el;
+	}
+
+	/** do the work of getting the next element, making sure that it's
+	 *  a tree node or subtree.  Deal with the optimization of single-
+	 *  element list versus list of size > 1.  Throw an exception
+	 *  if the stream is empty or we're out of elements and size>1.
+	 *  protected so you can override in a subclass if necessary.
+	 */
+	protected Object _next() {
+		int n = size();
+		if ( n ==0 ) {
+			throw new RewriteEmptyStreamException(elementDescription);
+		}
+		if ( cursor>= n) { // out of elements?
+			if ( n ==1 ) {  // if size is 1, it's ok; return and we'll dup
+				return toTree(singleElement);
+			}
+			// out of elements and size was not 1, so we can't dup
+			throw new RewriteCardinalityException(elementDescription);
+		}
+		// we have elements
+		if ( singleElement!=null ) {
+			cursor++; // move cursor even for single element list
+			return toTree(singleElement);
+		}
+		// must have more than one in list, pull from elements
+		Object o = toTree(elements.get(cursor));
+		cursor++;
+		return o;
+	}
+
+	/** When constructing trees, sometimes we need to dup a token or AST
+	 * 	subtree.  Dup'ing a token means just creating another AST node
+	 *  around it.  For trees, you must call the adaptor.dupTree() unless
+	 *  the element is for a tree root; then it must be a node dup.
+	 */
+	protected abstract Object dup(Object el);
+
+	/** Ensure stream emits trees; tokens must be converted to AST nodes.
+	 *  AST nodes can be passed through unmolested.
+	 */
+	protected Object toTree(Object el) {
+		return el;
+	}
+
+	public boolean hasNext() {
+		 return (singleElement != null && cursor < 1) ||
+			   (elements!=null && cursor < elements.size());
+	}
+
+	public int size() {
+		int n = 0;
+		if ( singleElement != null ) {
+			n = 1;
+		}
+		if ( elements!=null ) {
+			return elements.size();
+		}
+		return n;
+	}
+
+	public String getDescription() {
+		return elementDescription;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java b/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
new file mode 100644
index 0000000..793f925
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
@@ -0,0 +1,83 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import java.util.List;
+
+public class RewriteRuleSubtreeStream extends RewriteRuleElementStream {
+
+	public RewriteRuleSubtreeStream(TreeAdaptor adaptor, String elementDescription) {
+		super(adaptor, elementDescription);
+	}
+
+	/** Create a stream with one element */
+	public RewriteRuleSubtreeStream(TreeAdaptor adaptor,
+									String elementDescription,
+									Object oneElement)
+	{
+		super(adaptor, elementDescription, oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleSubtreeStream(TreeAdaptor adaptor,
+									String elementDescription,
+									List elements)
+	{
+		super(adaptor, elementDescription, elements);
+	}
+
+	/** Treat next element as a single node even if it's a subtree.
+	 *  This is used instead of next() when the result has to be a
+	 *  tree root node.  Also prevents us from duplicating recently-added
+	 *  children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
+	 *  must dup the type node, but ID has been added.
+	 *
+	 *  Referencing a rule result twice is ok; dup entire tree as
+	 *  we can't be adding trees as root; e.g., expr expr.
+	 *
+	 *  Hideous code duplication here with super.next().  Can't think of
+	 *  a proper way to refactor.  This needs to always call dup node
+	 *  and super.next() doesn't know which to call: dup node or dup tree.
+	 */
+	public Object nextNode() {
+		int n = size();
+		if ( dirty || (cursor>=n && n==1) ) {
+			// if out of elements and size is 1, dup (at most a single node
+			// since this is for making root nodes).
+			Object el = _next();
+			return adaptor.dupNode(el);
+		}
+		// test size above then fetch
+		Object el = _next();
+		return el;
+	}
+
+	protected Object dup(Object el) {
+		return adaptor.dupTree(el);
+	}
+}
\ No newline at end of file
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java b/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
new file mode 100644
index 0000000..4e6e843
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
@@ -0,0 +1,67 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.List;
+
+public class RewriteRuleTokenStream extends RewriteRuleElementStream {
+
+	public RewriteRuleTokenStream(TreeAdaptor adaptor, String elementDescription) {
+		super(adaptor, elementDescription);
+	}
+
+	/** Create a stream with one element */
+	public RewriteRuleTokenStream(TreeAdaptor adaptor,
+								  String elementDescription,
+								  Object oneElement)
+	{
+		super(adaptor, elementDescription, oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleTokenStream(TreeAdaptor adaptor,
+								  String elementDescription,
+								  List elements)
+	{
+		super(adaptor, elementDescription, elements);
+	}
+
+	public Object next() {
+		return _next();
+	}
+
+	protected Object toTree(Object el) {
+		return adaptor.create((Token)el);
+	}
+
+	protected Object dup(Object el) {
+		throw new UnsupportedOperationException("dup can't be called for a token stream.");
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/Tree.java b/runtime/Java/src/org/antlr/runtime/tree/Tree.java
new file mode 100644
index 0000000..2794ea6
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/Tree.java
@@ -0,0 +1,64 @@
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+/** What does a tree look like?  ANTLR has a number of support classes
+ *  such as CommonTreeNodeStream that work on these kinds of trees.  You
+ *  don't have to make your trees implement this interface, but if you do,
+ *  you'll be able to use more support code.
+ *
+ *  NOTE: When constructing trees, ANTLR can build any kind of tree; it can
+ *  even use Token objects as trees if you add a child list to your tokens.
+ *
+ *  This is a tree node without any payload; just navigation and factory stuff.
+ */
+public interface Tree {
+	public static final Tree INVALID_NODE = new CommonTree(Token.INVALID_TOKEN);
+
+	Tree getChild(int i);
+
+	int getChildCount();
+
+	/** Add t as a child to this node.  If t is null, do nothing.  If t
+	 *  is nil, add all children of t to this' children.
+	 * @param t
+	 */
+	void addChild(Tree t);
+
+	/** Indicates the node is a nil node but may still have children, meaning
+	 *  the tree is a flat list.
+	 */
+	boolean isNil();
+
+	/**  What is the smallest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStartIndex();
+
+	void setTokenStartIndex(int index);
+
+	/**  What is the largest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStopIndex();
+
+	void setTokenStopIndex(int index);
+
+	Tree dupTree();
+
+	Tree dupNode();
+
+	/** Return a token type; needed for tree parsing */
+	int getType();
+
+	String getText();
+
+	/** In case we don't have a token payload, what is the line for errors? */
+	int getLine();
+
+	int getCharPositionInLine();
+
+	String toStringTree();
+
+	String toString();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeAdaptor.java b/runtime/Java/src/org/antlr/runtime/tree/TreeAdaptor.java
new file mode 100644
index 0000000..ff847f0
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreeAdaptor.java
@@ -0,0 +1,212 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+/** How to create and navigate trees.  Rather than have a separate factory
+ *  and adaptor, I've merged them.  Makes sense to encapsulate.
+ *
+ *  This takes the place of the tree construction code generated in the
+ *  generated code in 2.x and the ASTFactory.
+ *
+ *  I do not need to know the type of a tree at all so they are all
+ *  generic Objects.  This may increase the amount of typecasting needed. :(
+ */
+public interface TreeAdaptor {
+	// C o n s t r u c t i o n
+
+	/** Create a tree node from Token object; for CommonTree type trees,
+	 *  then the token just becomes the payload.  This is the most
+	 *  common create call.
+     */
+	public Object create(Token payload);
+
+	/** Duplicate tree recursively, using dupNode() for each node */
+	public Object dupTree(Object tree);
+
+	/** Duplicate a single tree node */
+	public Object dupNode(Object treeNode);
+
+	/** Return a nil node (an empty but non-null node) that can hold
+	 *  a list of element as the children.  If you want a flat tree (a list)
+	 *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
+	 */
+	public Object nil();
+
+	/** Is tree considered a nil node used to make lists of child nodes? */ 
+	public boolean isNil(Object tree);
+
+	/** Add a child to the tree t.  If child is a flat tree (a list), make all
+	 *  in list children of t.  Warning: if t has no children, but child does
+	 *  and child isNil then you can decide it is ok to move children to t via
+	 *  t.children = child.children; i.e., without copying the array.  Just
+	 *  make sure that this is consistent with have the user will build
+	 *  ASTs.  Do nothing if t or child is null.
+	 */
+	public void addChild(Object t, Object child);
+
+	/** If oldRoot is a nil root, just copy or move the children to newRoot.
+	 *  If not a nil root, make oldRoot a child of newRoot.
+	 *
+	 *    old=^(nil a b c), new=r yields ^(r a b c)
+	 *    old=^(a b c), new=r yields ^(r ^(a b c))
+	 *
+	 *  If newRoot is a nil-rooted single child tree, use the single
+	 *  child as the new root node.
+	 *
+	 *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+	 *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+	 *
+	 *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
+	 *
+	 *    old=null, new=r yields r
+	 *    old=null, new=^(nil r) yields ^(nil r)
+	 *
+	 *  Return newRoot.  Throw an exception if newRoot is not a
+	 *  simple node or nil root with a single child node--it must be a root
+	 *  node.  If newRoot is ^(nil x) return x as newRoot.
+	 *
+	 *  Be advised that it's ok for newRoot to point at oldRoot's
+	 *  children; i.e., you don't have to copy the list.  We are
+	 *  constructing these nodes so we should have this control for
+	 *  efficiency.
+	 */
+	public Object becomeRoot(Object newRoot, Object oldRoot);
+
+	/** Given the root of the subtree created for this rule, post process
+	 *  it to do any simplifications or whatever you want.  A required
+	 *  behavior is to convert ^(nil singleSubtree) to singleSubtree
+	 *  as the setting of start/stop indexes relies on a single non-nil root
+	 *  for non-flat trees.
+	 *
+	 *  Flat trees such as for lists like "idlist : ID+ ;" are left alone
+	 *  unless there is only one ID.  For a list, the start/stop indexes
+	 *  are set in the nil node.
+	 *
+	 *  This method is executed after all rule tree construction and right
+	 *  before setTokenBoundaries().
+	 */
+	public Object rulePostProcessing(Object root);
+
+	/** For identifying trees.
+	 *
+	 *  How to identify nodes so we can say "add node to a prior node"?
+	 *  Even becomeRoot is an issue.  Use System.identityHashCode(node)
+	 *  usually.
+	 */
+	public int getUniqueID(Object node);
+
+
+	// R e w r i t e  R u l e s
+
+	/** Create a node for newRoot make it the root of oldRoot.
+	 *  If oldRoot is a nil root, just copy or move the children to newRoot.
+	 *  If not a nil root, make oldRoot a child of newRoot.
+	 *
+	 *  Return node created for newRoot.
+	 *
+	 *  Be advised: when debugging ASTs, the DebugTreeAdaptor manually
+	 *  calls create(Token child) and then plain becomeRoot(node, node)
+	 *  because it needs to trap calls to create, but it can't since it delegates
+	 *  to not inherits from the TreeAdaptor.
+	 */
+	public Object becomeRoot(Token newRoot, Object oldRoot);
+
+	/** Create a new node derived from a token, with a new token type.
+	 *  This is invoked from an imaginary node ref on right side of a
+	 *  rewrite rule as IMAG[$tokenLabel].
+	 *
+	 *  This should invoke createToken(Token).
+	 */
+	public Object create(int tokenType, Token fromToken);
+
+	/** Same as create(tokenType,fromToken) except set the text too.
+	 *  This is invoked from an imaginary node ref on right side of a
+	 *  rewrite rule as IMAG[$tokenLabel, "IMAG"].
+	 *
+	 *  This should invoke createToken(Token).
+	 */
+	public Object create(int tokenType, Token fromToken, String text);
+
+	/** Create a new node derived from a token, with a new token type.
+	 *  This is invoked from an imaginary node ref on right side of a
+	 *  rewrite rule as IMAG["IMAG"].
+	 *
+	 *  This should invoke createToken(int,String).
+	 */
+	public Object create(int tokenType, String text);
+
+
+	// C o n t e n t
+
+	/** For tree parsing, I need to know the token type of a node */
+	public int getType(Object t);
+
+	/** Node constructors can set the type of a node */
+	public void setType(Object t, int type);
+
+	public String getText(Object t);
+
+	/** Node constructors can set the text of a node */
+	public void setText(Object t, String text);
+
+	/** Return the token object from which this node was created.
+	 *  Currently used only for printing an error message.
+	 *  The error display routine in BaseRecognizer needs to
+	 *  display where the input the error occurred. If your
+	 *  tree of limitation does not store information that can
+	 *  lead you to the token, you can create a token filled with
+	 *  the appropriate information and pass that back.  See
+	 *  BaseRecognizer.getErrorMessage().
+	 */
+	public Token getToken(Object t);
+
+	/** Where are the bounds in the input token stream for this node and
+	 *  all children?  Each rule that creates AST nodes will call this
+	 *  method right before returning.  Flat trees (i.e., lists) will
+	 *  still usually have a nil root node just to hold the children list.
+	 *  That node would contain the start/stop indexes then.
+	 */
+	public void setTokenBoundaries(Object t, Token startToken, Token stopToken);
+
+	/** Get the token start index for this subtree; return -1 if no such index */
+	public int getTokenStartIndex(Object t);
+
+	/** Get the token stop index for this subtree; return -1 if no such index */
+	public int getTokenStopIndex(Object t);
+
+
+	// N a v i g a t i o n  /  T r e e  P a r s i n g
+
+	/** Get a child 0..n-1 node */
+	public Object getChild(Object t, int i);
+
+	/** How many children?  If 0, then this is a leaf node */
+	public int getChildCount(Object t);
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeNodeStream.java b/runtime/Java/src/org/antlr/runtime/tree/TreeNodeStream.java
new file mode 100644
index 0000000..d945682
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreeNodeStream.java
@@ -0,0 +1,87 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.IntStream;
+import org.antlr.runtime.TokenStream;
+
+/** A stream of tree nodes, accessing nodes from a tree of some kind */
+public interface TreeNodeStream extends IntStream {
+	/** Get a tree node at an absolute index i; 0..n-1.
+	 *  If you don't want to buffer up nodes, then this method makes no
+	 *  sense for you.
+	 */
+	public Object get(int i);
+
+	/** Get tree node at current input pointer + i ahead where i=1 is next node.
+	 *  i<0 indicates nodes in the past.  So LT(-1) is previous node, but
+	 *  implementations are not required to provide results for k < -1.
+	 *  LT(0) is undefined.  For i>=n, return null.
+	 *  Return null for LT(0) and any index that results in an absolute address
+	 *  that is negative.
+	 *
+	 *  This is analogus to the LT() method of the TokenStream, but this
+	 *  returns a tree node instead of a token.  Makes code gen identical
+	 *  for both parser and tree grammars. :)
+	 */
+	public Object LT(int k);
+
+	/** Where is this stream pulling nodes from?  This is not the name, but
+	 *  the object that provides node objects.
+	 */
+	public Object getTreeSource();
+
+	/** If the tree associated with this stream was created from a TokenStream,
+	 *  you can specify it here.  Used to do rule $text attribute in tree
+	 *  parser.  Optional unless you use tree parser rule text attribute
+	 *  or output=template and rewrite=true options.
+	 */
+	public TokenStream getTokenStream();
+
+	/** What adaptor can tell me how to interpret/navigate nodes and
+	 *  trees.  E.g., get text of a node.
+	 */
+	public TreeAdaptor getTreeAdaptor();
+
+	/** As we flatten the tree, we use UP, DOWN nodes to represent
+	 *  the tree structure.  When debugging we need unique nodes
+	 *  so we have to instantiate new ones.  When doing normal tree
+	 *  parsing, it's slow and a waste of memory to create unique
+	 *  navigation nodes.  Default should be false;
+	 */
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes);
+
+	/** Return the text of all nodes from start to stop, inclusive.
+	 *  If the stream does not buffer all the nodes then it can still
+	 *  walk recursively from start until stop.  You can always return
+	 *  null or "" too, but users should not access $ruleLabel.text in
+	 *  an action of course in that case.
+	 */
+	public String toString(Object start, Object stop);
+}
+
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeParser.java b/runtime/Java/src/org/antlr/runtime/tree/TreeParser.java
new file mode 100644
index 0000000..0fad3c3
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreeParser.java
@@ -0,0 +1,135 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.*;
+
+/** A parser for a stream of tree nodes.  "tree grammars" result in a subclass
+ *  of this.  All the error reporting and recovery is shared with Parser via
+ *  the BaseRecognizer superclass.
+*/
+public class TreeParser extends BaseRecognizer {
+	public static final int DOWN = Token.DOWN;
+	public static final int UP = Token.UP;
+
+	protected TreeNodeStream input;
+
+	public TreeParser(TreeNodeStream input) {
+		setTreeNodeStream(input);
+	}
+
+	public void reset() {
+		super.reset(); // reset all recognizer state variables
+		if ( input!=null ) {
+			input.seek(0); // rewind the input
+		}
+	}
+
+	/** Set the input stream */
+	public void setTreeNodeStream(TreeNodeStream input) {
+		this.input = input;
+	}
+
+	public TreeNodeStream getTreeNodeStream() {
+		return input;
+	}
+
+	/** Match '.' in tree parser has special meaning.  Skip node or
+	 *  entire tree if node has children.  If children, scan until
+	 *  corresponding UP node.
+	 */
+	public void matchAny(IntStream ignore) { // ignore stream, copy of this.input
+		errorRecovery = false;
+		failed = false;
+		Object look = input.LT(1);
+		if ( input.getTreeAdaptor().getChildCount(look)==0 ) {
+			input.consume(); // not subtree, consume 1 node and return
+			return;
+		}
+		// current node is a subtree, skip to corresponding UP.
+		// must count nesting level to get right UP
+		int level=0;
+		int tokenType = input.getTreeAdaptor().getType(look);
+		while ( tokenType!=Token.EOF && !(tokenType==UP && level==0) ) {
+			input.consume();
+			look = input.LT(1);
+			tokenType = input.getTreeAdaptor().getType(look);
+			if ( tokenType == DOWN ) {
+				level++;
+			}
+			else if ( tokenType == UP ) {
+				level--;
+			}
+		}
+		input.consume(); // consume UP
+	}
+
+	/** We have DOWN/UP nodes in the stream that have no line info; override.
+	 *  plus we want to alter the exception type.
+	 */
+	protected void mismatch(IntStream input, int ttype, BitSet follow)
+		throws RecognitionException
+	{
+		MismatchedTreeNodeException mte =
+			new MismatchedTreeNodeException(ttype, (TreeNodeStream)input);
+		recoverFromMismatchedToken(input, mte, ttype, follow);
+	}
+
+	/** Prefix error message with the grammar name because message is
+	 *  always intended for the programmer because the parser built
+	 *  the input tree not the user.
+	 */
+	public String getErrorHeader(RecognitionException e) {
+		return getGrammarFileName()+": node from "+
+			   (e.approximateLineInfo?"after ":"")+"line "+e.line+":"+e.charPositionInLine;
+	}
+
+	/** Tree parsers parse nodes they usually have a token object as
+	 *  payload. Set the exception token and do the default behavior.
+	 */
+	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
+		if ( this instanceof TreeParser ) {
+			TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor();
+			e.token = adaptor.getToken(e.node);
+			if ( e.token==null ) { // could be an UP/DOWN node
+				e.token = new CommonToken(adaptor.getType(e.node),
+										  adaptor.getText(e.node));
+			}
+		}
+		return super.getErrorMessage(e, tokenNames);
+	}
+
+	public void traceIn(String ruleName, int ruleIndex)  {
+		super.traceIn(ruleName, ruleIndex, input.LT(1));
+	}
+
+	public void traceOut(String ruleName, int ruleIndex)  {
+		super.traceOut(ruleName, ruleIndex, input.LT(1));
+	}
+
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreePatternLexer.java b/runtime/Java/src/org/antlr/runtime/tree/TreePatternLexer.java
new file mode 100644
index 0000000..a23149c
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreePatternLexer.java
@@ -0,0 +1,135 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+public class TreePatternLexer {
+	public static final int EOF = -1;
+	public static final int BEGIN = 1;
+	public static final int END = 2;
+	public static final int ID = 3;
+	public static final int ARG = 4;
+	public static final int PERCENT = 5;
+	public static final int COLON = 6;
+	public static final int DOT = 7;
+
+	/** The tree pattern to lex like "(A B C)" */
+	protected String pattern;
+
+	/** Index into input string */
+	protected int p = -1;
+
+	/** Current char */
+	protected int c;
+
+	/** How long is the pattern in char? */
+	protected int n;
+
+	/** Set when token type is ID or ARG (name mimics Java's StreamTokenizer) */
+	public StringBuffer sval = new StringBuffer();
+
+	public boolean error = false;
+
+	public TreePatternLexer(String pattern) {
+		this.pattern = pattern;
+		this.n = pattern.length();
+		consume();
+	}
+
+	public int nextToken() {
+		sval.setLength(0); // reset, but reuse buffer
+		while ( c != EOF ) {
+			if ( c==' ' || c=='\n' || c=='\r' || c=='\t' ) {
+				consume();
+				continue;
+			}
+			if ( (c>='a' && c<='z') || (c>='A' && c<='Z') || c=='_' ) {
+				sval.append((char)c);
+				consume();
+				while ( (c>='a' && c<='z') || (c>='A' && c<='Z') ||
+						(c>='0' && c<='9') || c=='_' )
+				{
+					sval.append((char)c);
+					consume();
+				}
+				return ID;
+			}
+			if ( c=='(' ) {
+				consume();
+				return BEGIN;
+			}
+			if ( c==')' ) {
+				consume();
+				return END;
+			}
+			if ( c=='%' ) {
+				consume();
+				return PERCENT;
+			}
+			if ( c==':' ) {
+				consume();
+				return COLON;
+			}
+			if ( c=='.' ) {
+				consume();
+				return DOT;
+			}
+			if ( c=='[' ) { // grab [x] as a string, returning x
+				consume();
+				while ( c!=']' ) {
+					if ( c=='\\' ) {
+						consume();
+						if ( c!=']' ) {
+							sval.append('\\');
+						}
+						sval.append((char)c);
+					}
+					else {
+						sval.append((char)c);
+					}
+					consume();
+				}
+				consume();
+				return ARG;
+			}
+			consume();
+			error = true;
+			return EOF;
+		}
+		return EOF;
+	}
+
+	protected void consume() {
+		p++;
+		if ( p>=n ) {
+			c = EOF;
+		}
+		else {
+			c = pattern.charAt(p);
+		}
+	}
+}
\ No newline at end of file
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreePatternParser.java b/runtime/Java/src/org/antlr/runtime/tree/TreePatternParser.java
new file mode 100644
index 0000000..9298f89
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreePatternParser.java
@@ -0,0 +1,156 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.CommonToken;
+
+public class TreePatternParser {
+	protected TreePatternLexer tokenizer;
+	protected int ttype;
+	protected TreeWizard wizard;
+	protected TreeAdaptor adaptor;
+
+	public TreePatternParser(TreePatternLexer tokenizer, TreeWizard wizard, TreeAdaptor adaptor) {
+		this.tokenizer = tokenizer;
+		this.wizard = wizard;
+		this.adaptor = adaptor;
+		ttype = tokenizer.nextToken(); // kickstart
+	}
+
+	public Object pattern() {
+		if ( ttype==TreePatternLexer.BEGIN ) {
+			return parseTree();
+		}
+		else if ( ttype==TreePatternLexer.ID ) {
+			Object node = parseNode();
+			if ( ttype==TreePatternLexer.EOF ) {
+				return node;
+			}
+			return null; // extra junk on end
+		}
+		return null;
+	}
+
+	public Object parseTree() {
+		if ( ttype != TreePatternLexer.BEGIN ) {
+			System.out.println("no BEGIN");
+			return null;
+		}
+		ttype = tokenizer.nextToken();
+		Object root = parseNode();
+		if ( root==null ) {
+			return null;
+		}
+		while ( ttype==TreePatternLexer.BEGIN ||
+				ttype==TreePatternLexer.ID ||
+				ttype==TreePatternLexer.PERCENT ||
+				ttype==TreePatternLexer.DOT )
+		{
+			if ( ttype==TreePatternLexer.BEGIN ) {
+				Object subtree = parseTree();
+				adaptor.addChild(root, subtree);
+			}
+			else {
+				Object child = parseNode();
+				if ( child==null ) {
+					return null;
+				}
+				adaptor.addChild(root, child);
+			}
+		}
+		if ( ttype != TreePatternLexer.END ) {
+			System.out.println("no END");
+			return null;
+		}
+		ttype = tokenizer.nextToken();
+		return root;
+	}
+
+	public Object parseNode() {
+		// "%label:" prefix
+		String label = null;
+		if ( ttype == TreePatternLexer.PERCENT ) {
+			ttype = tokenizer.nextToken();
+			if ( ttype != TreePatternLexer.ID ) {
+				return null;
+			}
+			label = tokenizer.sval.toString();
+			ttype = tokenizer.nextToken();
+			if ( ttype != TreePatternLexer.COLON ) {
+				return null;
+			}
+			ttype = tokenizer.nextToken(); // move to ID following colon
+		}
+
+		// Wildcard?
+		if ( ttype == TreePatternLexer.DOT ) {
+			ttype = tokenizer.nextToken();
+			Token wildcardPayload = new CommonToken(0, ".");
+			TreeWizard.TreePattern node =
+				new TreeWizard.WildcardTreePattern(wildcardPayload);
+			if ( label!=null ) {
+				node.label = label;
+			}
+			return node;
+		}
+
+		// "ID" or "ID[arg]"
+		if ( ttype != TreePatternLexer.ID ) {
+			return null;
+		}
+		String tokenName = tokenizer.sval.toString();
+		ttype = tokenizer.nextToken();
+		if ( tokenName.equals("nil") ) {
+			return adaptor.nil();
+		}
+		String text = tokenName;
+		// check for arg
+		String arg = null;
+		if ( ttype == TreePatternLexer.ARG ) {
+			arg = tokenizer.sval.toString();
+			text = arg;
+			ttype = tokenizer.nextToken();
+		}
+		
+		// create node
+		int treeNodeType = (Integer)wizard.getTokenType(tokenName);
+		if ( treeNodeType==Token.INVALID_TOKEN_TYPE ) {
+			return null;
+		}
+		Object node;
+		node = adaptor.create(treeNodeType, text);
+		if ( label!=null && node.getClass()==TreeWizard.TreePattern.class ) {
+			((TreeWizard.TreePattern)node).label = label;
+		}
+		if ( arg!=null && node.getClass()==TreeWizard.TreePattern.class ) {
+			((TreeWizard.TreePattern)node).hasTextArg = true;
+		}
+		return node;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java b/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java
new file mode 100644
index 0000000..eca2c59
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java
@@ -0,0 +1,40 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RuleReturnScope;
+
+/** This is identical to the ParserRuleReturnScope except that
+ *  the start property is a tree nodes not Token object
+ *  when you are parsing trees.  To be generic the tree node types
+ *  have to be Object.
+ */
+public class TreeRuleReturnScope extends RuleReturnScope {
+	/** First node or root node of tree matched for this rule. */
+	public Object start;
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeWizard.java b/runtime/Java/src/org/antlr/runtime/tree/TreeWizard.java
new file mode 100644
index 0000000..162beb6
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/TreeWizard.java
@@ -0,0 +1,409 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.Map;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+
+/** Build and navigate trees with this object.  Must know about the names
+ *  of tokens so you have to pass in a map or array of token names (from which
+ *  this class can build the map).  I.e., Token DECL means nothing unless the
+ *  class can translate it to a token type.
+ *
+ *  In order to create nodes and navigate, this class needs a TreeAdaptor.
+ *
+ *  This class can build a token type -> node index for repeated use or for
+ *  iterating over the various nodes with a particular type.
+ *
+ *  This class works in conjunction with the TreeAdaptor rather than moving
+ *  all this functionality into the adaptor.  An adaptor helps build and
+ *  navigate trees using methods.  This class helps you do it with string
+ *  patterns like "(A B C)".  You can create a tree from that pattern or
+ *  match subtrees against it.
+ */
+public class TreeWizard {
+	protected TreeAdaptor adaptor;
+	protected Map tokenNameToTypeMap;
+
+	public interface ContextVisitor {
+		// TODO: should this be called visit or something else?
+		public void visit(Object t, Object parent, int childIndex, Map labels);
+	}
+
+	public static abstract class Visitor implements ContextVisitor {
+		public void visit(Object t, Object parent, int childIndex, Map labels) {
+			visit(t);
+		}
+		public abstract void visit(Object t);
+	}
+
+	/** When using %label:TOKENNAME in a tree for parse(), we must
+	 *  track the label.
+	 */
+	public static class TreePattern extends CommonTree {
+		public String label;
+		public boolean hasTextArg;
+		public TreePattern(Token payload) {
+			super(payload);
+		}
+		public String toString() {
+			if ( label!=null ) {
+				return "%"+label+":"+super.toString();
+			}
+			else {
+				return super.toString();				
+			}
+		}
+	}
+
+	public static class WildcardTreePattern extends TreePattern {
+		public WildcardTreePattern(Token payload) {
+			super(payload);
+		}
+	}
+
+	/** This adaptor creates TreePattern objects for use during scan() */
+	public static class TreePatternTreeAdaptor extends CommonTreeAdaptor {
+		public Object create(Token payload) {
+			return new TreePattern(payload);
+		}
+	}
+
+	public TreeWizard(TreeAdaptor adaptor) {
+		this.adaptor = adaptor;
+	}
+
+	public TreeWizard(TreeAdaptor adaptor, Map tokenNameToTypeMap) {
+		this.adaptor = adaptor;
+		this.tokenNameToTypeMap = tokenNameToTypeMap;
+	}
+
+	public TreeWizard(TreeAdaptor adaptor, String[] tokenNames) {
+		this.adaptor = adaptor;
+		this.tokenNameToTypeMap = computeTokenTypes(tokenNames);
+	}
+
+	public TreeWizard(String[] tokenNames) {
+		this(null, tokenNames);
+	}
+
+	/** Compute a Map<String, Integer> that is an inverted index of
+	 *  tokenNames (which maps int token types to names).
+	 */
+	public Map computeTokenTypes(String[] tokenNames) {
+		Map m = new HashMap();
+		for (int ttype = Token.MIN_TOKEN_TYPE; ttype < tokenNames.length; ttype++) {
+			String name = tokenNames[ttype];
+			m.put(name, new Integer(ttype));
+		}
+		return m;
+	}
+
+	/** Using the map of token names to token types, return the type. */
+	public int getTokenType(String tokenName) {
+	 	if ( tokenNameToTypeMap==null ) {
+			 return Token.INVALID_TOKEN_TYPE;
+		 }
+		Integer ttypeI = (Integer)tokenNameToTypeMap.get(tokenName);
+		if ( ttypeI!=null ) {
+			return ttypeI.intValue();
+		}
+		return Token.INVALID_TOKEN_TYPE;
+	}
+
+	/** Walk the entire tree and make a node name to nodes mapping.
+	 *  For now, use recursion but later nonrecursive version may be
+	 *  more efficient.  Returns Map<Integer, List> where the List is
+	 *  of your AST node type.  The Integer is the token type of the node.
+	 *
+	 *  TODO: save this index so that find and visit are faster
+	 */
+	public Map index(Object t) {
+		Map m = new HashMap();
+		_index(t, m);
+		return m;
+	}
+
+	/** Do the work for index */
+	protected void _index(Object t, Map m) {
+		if ( t==null ) {
+			return;
+		}
+		int ttype = adaptor.getType(t);
+		List elements = (List)m.get(ttype);
+		if ( elements==null ) {
+			elements = new ArrayList();
+			m.put(new Integer(ttype), elements);
+		}
+		elements.add(t);
+		int n = adaptor.getChildCount(t);
+		for (int i=0; i<n; i++) {
+			Object child = adaptor.getChild(t, i);
+			_index(child, m);
+		}
+	}
+
+	/** Return a List of tree nodes with token type ttype */
+	public List find(Object t, int ttype) {
+		final List nodes = new ArrayList();
+		visit(t, ttype, new TreeWizard.Visitor() {
+			public void visit(Object t) {
+				nodes.add(t);
+			}
+		});
+		return nodes;
+	}
+
+	/** Return a List of subtrees matching pattern. */
+	public List find(Object t, String pattern) {
+		final List subtrees = new ArrayList();
+		// Create a TreePattern from the pattern
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser =
+			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+		final TreePattern tpattern = (TreePattern)parser.pattern();
+		// don't allow invalid patterns
+		if ( tpattern==null ||
+			 tpattern.isNil() ||
+			 tpattern.getClass()==WildcardTreePattern.class )
+		{
+			return null;
+		}
+		int rootTokenType = tpattern.getType();
+		visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
+			public void visit(Object t, Object parent, int childIndex, Map labels) {
+				if ( _parse(t, tpattern, null) ) {
+					subtrees.add(t);
+				}
+			}
+		});
+		return subtrees;
+	}
+
+	public Object findFirst(Object t, int ttype) {
+		return null;
+	}
+
+	public Object findFirst(Object t, String pattern) {
+		return null;
+	}
+
+	/** Visit every ttype node in t, invoking the visitor.  This is a quicker
+	 *  version of the general visit(t, pattern) method.  The labels arg
+	 *  of the visitor action method is never set (it's null) since using
+	 *  a token type rather than a pattern doesn't let us set a label.
+	 */
+	public void visit(Object t, int ttype, ContextVisitor visitor) {
+		_visit(t, null, 0, ttype, visitor);
+	}
+
+	/** Do the recursive work for visit */
+	protected void _visit(Object t, Object parent, int childIndex, int ttype, ContextVisitor visitor) {
+		if ( t==null ) {
+			return;
+		}
+		if ( adaptor.getType(t)==ttype ) {
+			visitor.visit(t, parent, childIndex, null);
+		}
+		int n = adaptor.getChildCount(t);
+		for (int i=0; i<n; i++) {
+			Object child = adaptor.getChild(t, i);
+			_visit(child, t, i, ttype, visitor);
+		}
+	}
+
+	/** For all subtrees that match the pattern, execute the visit action.
+	 *  The implementation uses the root node of the pattern in combination
+	 *  with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
+	 *  Patterns with wildcard roots are also not allowed.
+	 */
+	public void visit(Object t, final String pattern, final ContextVisitor visitor) {
+		// Create a TreePattern from the pattern
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser =
+			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+		final TreePattern tpattern = (TreePattern)parser.pattern();
+		// don't allow invalid patterns
+		if ( tpattern==null ||
+			 tpattern.isNil() ||
+			 tpattern.getClass()==WildcardTreePattern.class )
+		{
+			return;
+		}
+		final Map labels = new HashMap(); // reused for each _parse
+		int rootTokenType = tpattern.getType();
+		visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
+			public void visit(Object t, Object parent, int childIndex, Map unusedlabels) {
+				// the unusedlabels arg is null as visit on token type doesn't set.
+				labels.clear();
+				if ( _parse(t, tpattern, labels) ) {
+					visitor.visit(t, parent, childIndex, labels);
+				}
+			}
+		});
+	}
+
+	/** Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
+	 *  on the various nodes and '.' (dot) as the node/subtree wildcard,
+	 *  return true if the pattern matches and fill the labels Map with
+	 *  the labels pointing at the appropriate nodes.  Return false if
+	 *  the pattern is malformed or the tree does not match.
+	 *
+	 *  If a node specifies a text arg in pattern, then that must match
+	 *  for that node in t.
+	 *
+	 *  TODO: what's a better way to indicate bad pattern? Exceptions are a hassle 
+	 */
+	public boolean parse(Object t, String pattern, Map labels) {
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser =
+			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+		TreePattern tpattern = (TreePattern)parser.pattern();
+		/*
+		System.out.println("t="+((Tree)t).toStringTree());
+		System.out.println("scant="+tpattern.toStringTree());
+		*/
+		boolean matched = _parse(t, tpattern, labels);
+		return matched;
+	}
+
+	public boolean parse(Object t, String pattern) {
+		return parse(t, pattern, null);
+	}
+
+	/** Do the work for parse. Check to see if the t2 pattern fits the
+	 *  structure and token types in t1.  Check text if the pattern has
+	 *  text arguments on nodes.  Fill labels map with pointers to nodes
+	 *  in tree matched against nodes in pattern with labels.
+	 */
+	protected boolean _parse(Object t1, TreePattern t2, Map labels) {
+		// make sure both are non-null
+		if ( t1==null || t2==null ) {
+			return false;
+		}
+		// check roots (wildcard matches anything)
+		if ( t2.getClass() != WildcardTreePattern.class ) {
+			if ( adaptor.getType(t1) != t2.getType() ) {
+				return false;
+			}
+			if ( t2.hasTextArg && !adaptor.getText(t1).equals(t2.getText()) ) {
+				return false;
+			}
+		}
+		if ( t2.label!=null && labels!=null ) {
+			// map label in pattern to node in t1
+			labels.put(t2.label, t1);
+		}
+		// check children
+		int n1 = adaptor.getChildCount(t1);
+		int n2 = t2.getChildCount();
+		if ( n1 != n2 ) {
+			return false;
+		}
+		for (int i=0; i<n1; i++) {
+			Object child1 = adaptor.getChild(t1, i);
+			TreePattern child2 = (TreePattern)t2.getChild(i);
+			if ( !_parse(child1, child2, labels) ) {
+				return false;
+			}
+		}
+		return true;
+	}
+
+	/** Create a tree or node from the indicated tree pattern that closely
+	 *  follows ANTLR tree grammar tree element syntax:
+	 *
+	 * 		(root child1 ... child2).
+	 *
+	 *  You can also just pass in a node: ID
+	 * 
+	 *  Any node can have a text argument: ID[foo]
+	 *  (notice there are no quotes around foo--it's clear it's a string).
+	 *
+	 *  nil is a special name meaning "give me a nil node".  Useful for
+	 *  making lists: (nil A B C) is a list of A B C.
+ 	 */
+	public Object create(String pattern) {
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser = new TreePatternParser(tokenizer, this, adaptor);
+		Object t = parser.pattern();
+		return t;
+	}
+
+	/** Compare t1 and t2; return true if token types/text, structure match exactly.
+	 *  The trees are examined in their entirety so that (A B) does not match
+	 *  (A B C) nor (A (B C)). 
+	 // TODO: allow them to pass in a comparator
+	 *  TODO: have a version that is nonstatic so it can use instance adaptor
+	 *
+	 *  I cannot rely on the tree node's equals() implementation as I make
+	 *  no constraints at all on the node types nor interface etc... 
+	 */
+	public static boolean equals(Object t1, Object t2, TreeAdaptor adaptor) {
+		return _equals(t1, t2, adaptor);
+	}
+
+	/** Compare type, structure, and text of two trees, assuming adaptor in
+	 *  this instance of a TreeWizard.
+	 */
+	public boolean equals(Object t1, Object t2) {
+		return _equals(t1, t2, adaptor);
+	}
+
+	protected static boolean _equals(Object t1, Object t2, TreeAdaptor adaptor) {
+		// make sure both are non-null
+		if ( t1==null || t2==null ) {
+			return false;
+		}
+		// check roots
+		if ( adaptor.getType(t1) != adaptor.getType(t2) ) {
+			return false;
+		}
+		if ( !adaptor.getText(t1).equals(adaptor.getText(t2)) ) {
+			return false;
+		}
+		// check children
+		int n1 = adaptor.getChildCount(t1);
+		int n2 = adaptor.getChildCount(t2);
+		if ( n1 != n2 ) {
+			return false;
+		}
+		for (int i=0; i<n1; i++) {
+			Object child1 = adaptor.getChild(t1, i);
+			Object child2 = adaptor.getChild(t2, i);
+			if ( !_equals(child1, child2, adaptor) ) {
+				return false;
+			}
+		}
+		return true;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/UnBufferedTreeNodeStream.java b/runtime/Java/src/org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
new file mode 100644
index 0000000..a4aff58
--- /dev/null
+++ b/runtime/Java/src/org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
@@ -0,0 +1,561 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Stack;
+
+/** A stream of tree nodes, accessing nodes from a tree of ANY kind.
+ *  No new nodes should be created in tree during the walk.  A small buffer
+ *  of tokens is kept to efficiently and easily handle LT(i) calls, though
+ *  the lookahead mechanism is fairly complicated.
+ *
+ *  For tree rewriting during tree parsing, this must also be able
+ *  to replace a set of children without "losing its place".
+ *  That part is not yet implemented.  Will permit a rule to return
+ *  a different tree and have it stitched into the output tree probably.
+ *
+ *  @see CommonTreeNodeStream
+ */
+public class UnBufferedTreeNodeStream implements TreeNodeStream {
+	public static final int INITIAL_LOOKAHEAD_BUFFER_SIZE = 5;
+
+	/** Reuse same DOWN, UP navigation nodes unless this is true */
+	protected boolean uniqueNavigationNodes = false;
+
+	/** Pull nodes from which tree? */
+	protected Object root;
+
+	/** IF this tree (root) was created from a token stream, track it. */
+	protected TokenStream tokens;
+
+	/** What tree adaptor was used to build these trees */
+	TreeAdaptor adaptor;
+
+	/** As we walk down the nodes, we must track parent nodes so we know
+	 *  where to go after walking the last child of a node.  When visiting
+	 *  a child, push current node and current index.
+	 */
+	protected Stack nodeStack = new Stack();
+
+	/** Track which child index you are visiting for each node we push.
+	 *  TODO: pretty inefficient...use int[] when you have time
+	 */
+	protected Stack indexStack = new Stack();
+
+	/** Which node are we currently visiting? */
+	protected Object currentNode;
+
+	/** Which node did we visit last?  Used for LT(-1) calls. */
+	protected Object previousNode;
+
+	/** Which child are we currently visiting?  If -1 we have not visited
+	 *  this node yet; next consume() request will set currentIndex to 0.
+	 */
+	protected int currentChildIndex;
+
+	/** What node index did we just consume?  i=0..n-1 for n node trees.
+	 *  IntStream.next is hence 1 + this value.  Size will be same.
+	 */
+	protected int absoluteNodeIndex;
+
+	/** Buffer tree node stream for use with LT(i).  This list grows
+	 *  to fit new lookahead depths, but consume() wraps like a circular
+	 *  buffer.
+	 */
+	protected Object[] lookahead = new Object[INITIAL_LOOKAHEAD_BUFFER_SIZE];
+
+	/** lookahead[head] is the first symbol of lookahead, LT(1). */
+	protected int head;
+
+	/** Add new lookahead at lookahead[tail].  tail wraps around at the
+	 *  end of the lookahead buffer so tail could be less than head.
+	  */
+	protected int tail;
+
+	/** When walking ahead with cyclic DFA or for syntactic predicates,
+	  *  we need to record the state of the tree node stream.  This
+	 *  class wraps up the current state of the UnBufferedTreeNodeStream.
+	 *  Calling mark() will push another of these on the markers stack.
+	 */
+	protected class TreeWalkState {
+		int currentChildIndex;
+		int absoluteNodeIndex;
+		Object currentNode;
+		Object previousNode;
+		/** Record state of the nodeStack */
+		int nodeStackSize;
+		/** Record state of the indexStack */
+		int indexStackSize;
+		Object[] lookahead;
+	}
+
+	/** Calls to mark() may be nested so we have to track a stack of
+	 *  them.  The marker is an index into this stack.
+	 *  This is a List<TreeWalkState>.  Indexed from 1..markDepth.
+	 *  A null is kept @ index 0.  Create upon first call to mark().
+	 */
+	protected List markers;
+
+	/** tracks how deep mark() calls are nested */
+	protected int markDepth = 0;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	// navigation nodes
+
+	protected Object down;
+	protected Object up;
+	protected Object eof;
+
+	public UnBufferedTreeNodeStream(Object tree) {
+		this(new CommonTreeAdaptor(), tree);
+	}
+
+	public UnBufferedTreeNodeStream(TreeAdaptor adaptor, Object tree) {
+		this.root = tree;
+		this.adaptor = adaptor;
+		reset();
+		down = adaptor.create(Token.DOWN, "DOWN");
+		up = adaptor.create(Token.UP, "UP");
+		eof = adaptor.create(Token.EOF, "EOF");
+	}
+
+	public void reset() {
+		currentNode = root;
+		previousNode = null;
+		currentChildIndex = -1;
+		absoluteNodeIndex = -1;
+		head = tail = 0;
+	}
+
+	// Satisfy TreeNodeStream
+
+	public Object get(int i) {
+		throw new UnsupportedOperationException("stream is unbuffered");
+	}
+
+	/** Get tree node at current input pointer + i ahead where i=1 is next node.
+	 *  i<0 indicates nodes in the past.  So -1 is previous node and -2 is
+	 *  two nodes ago. LT(0) is undefined.  For i>=n, return null.
+	 *  Return null for LT(0) and any index that results in an absolute address
+	 *  that is negative.
+	 *
+	 *  This is analogus to the LT() method of the TokenStream, but this
+	 *  returns a tree node instead of a token.  Makes code gen identical
+	 *  for both parser and tree grammars. :)
+	 */
+	public Object LT(int k) {
+		//System.out.println("LT("+k+"); head="+head+", tail="+tail);
+		if ( k==-1 ) {
+			return previousNode;
+		}
+		if ( k<0 ) {
+			throw new IllegalArgumentException("tree node streams cannot look backwards more than 1 node");
+		}
+		if ( k==0 ) {
+			return Tree.INVALID_NODE;
+		}
+		fill(k);
+		return lookahead[(head+k-1)%lookahead.length];
+	}
+
+	/** Where is this stream pulling nodes from?  This is not the name, but
+	 *  the object that provides node objects.
+	 */
+	public Object getTreeSource() {
+		return root;
+	}
+
+	public TokenStream getTokenStream() {
+		return tokens;
+	}
+
+	public void setTokenStream(TokenStream tokens) {
+		this.tokens = tokens;
+	}
+
+	/** Make sure we have at least k symbols in lookahead buffer */
+	protected void fill(int k) {
+		int n = getLookaheadSize();
+		//System.out.println("we have "+n+" nodes; need "+(k-n));
+		for (int i=1; i<=k-n; i++) {
+			next(); // get at least k-depth lookahead nodes
+		}
+	}
+
+	/** Add a node to the lookahead buffer.  Add at lookahead[tail].
+	 *  If you tail+1 == head, then we must create a bigger buffer
+	 *  and copy all the nodes over plus reset head, tail.  After
+	 *  this method, LT(1) will be lookahead[0].
+	 */
+	protected void addLookahead(Object node) {
+		//System.out.println("addLookahead head="+head+", tail="+tail);
+		lookahead[tail] = node;
+		tail = (tail+1)%lookahead.length;
+		if ( tail==head ) {
+			// buffer overflow: tail caught up with head
+			// allocate a buffer 2x as big
+			Object[] bigger = new Object[2*lookahead.length];
+			// copy head to end of buffer to beginning of bigger buffer
+			int remainderHeadToEnd = lookahead.length-head;
+			System.arraycopy(lookahead, head, bigger, 0, remainderHeadToEnd);
+			// copy 0..tail to after that
+			System.arraycopy(lookahead, 0, bigger, remainderHeadToEnd, tail);
+			lookahead = bigger; // reset to bigger buffer
+			head = 0;
+			tail += remainderHeadToEnd;
+		}
+	}
+
+	// Satisfy IntStream interface
+
+	public void consume() {
+		/*
+		System.out.println("consume: currentNode="+currentNode.getType()+
+						   " childIndex="+currentChildIndex+
+						   " nodeIndex="+absoluteNodeIndex);
+						   */
+		// make sure there is something in lookahead buf, which might call next()
+		fill(1);
+		absoluteNodeIndex++;
+		previousNode = lookahead[head]; // track previous node before moving on
+		head = (head+1) % lookahead.length;
+	}
+
+	public int LA(int i) {
+		Object t = LT(i);
+		if ( t==null ) {
+			return Token.INVALID_TOKEN_TYPE;
+		}
+		return adaptor.getType(t);
+	}
+
+	/** Record the current state of the tree walk which includes
+	 *  the current node and stack state as well as the lookahead
+	 *  buffer.
+	 */
+	public int mark() {
+		if ( markers==null ) {
+			markers = new ArrayList();
+			markers.add(null); // depth 0 means no backtracking, leave blank
+		}
+		markDepth++;
+		TreeWalkState state = null;
+		if ( markDepth>=markers.size() ) {
+			state = new TreeWalkState();
+			markers.add(state);
+		}
+		else {
+			state = (TreeWalkState)markers.get(markDepth);
+		}
+		state.absoluteNodeIndex = absoluteNodeIndex;
+		state.currentChildIndex = currentChildIndex;
+		state.currentNode = currentNode;
+		state.previousNode = previousNode;
+		state.nodeStackSize = nodeStack.size();
+		state.indexStackSize = indexStack.size();
+		// take snapshot of lookahead buffer
+		int n = getLookaheadSize();
+		int i=0;
+		state.lookahead = new Object[n];
+		for (int k=1; k<=n; k++,i++) {
+			state.lookahead[i] = LT(k);
+		}
+		lastMarker = markDepth;
+		return markDepth;
+	}
+
+	public void release(int marker) {
+		// unwind any other markers made after marker and release marker
+		markDepth = marker;
+		// release this marker
+		markDepth--;
+	}
+
+	/** Rewind the current state of the tree walk to the state it
+	 *  was in when mark() was called and it returned marker.  Also,
+	 *  wipe out the lookahead which will force reloading a few nodes
+	 *  but it is better than making a copy of the lookahead buffer
+	 *  upon mark().
+	 */
+	public void rewind(int marker) {
+		if ( markers==null ) {
+			return;
+		}
+		TreeWalkState state = (TreeWalkState)markers.get(marker);
+		absoluteNodeIndex = state.absoluteNodeIndex;
+		currentChildIndex = state.currentChildIndex;
+		currentNode = state.currentNode;
+		previousNode = state.previousNode;
+		// drop node and index stacks back to old size
+		nodeStack.setSize(state.nodeStackSize);
+		indexStack.setSize(state.indexStackSize);
+		head = tail = 0; // wack lookahead buffer and then refill
+		for (; tail<state.lookahead.length; tail++) {
+			lookahead[tail] = state.lookahead[tail];
+		}
+		release(marker);
+	}
+
+	public void rewind() {
+		rewind(lastMarker);
+	}
+
+	/** consume() ahead until we hit index.  Can't just jump ahead--must
+	 *  spit out the navigation nodes.
+	 */
+	public void seek(int index) {
+		if ( index<this.index() ) {
+			throw new IllegalArgumentException("can't seek backwards in node stream");
+		}
+		// seek forward, consume until we hit index
+		while ( this.index()<index ) {
+			consume();
+		}
+	}
+
+	public int index() {
+		return absoluteNodeIndex+1;
+	}
+
+	/** Expensive to compute; recursively walk tree to find size;
+	 *  include navigation nodes and EOF.  Reuse functionality
+	 *  in CommonTreeNodeStream as we only really use this
+	 *  for testing.
+	 */
+	public int size() {
+		CommonTreeNodeStream s = new CommonTreeNodeStream(root);
+		return s.size();
+	}
+
+	/** Return the next node found during a depth-first walk of root.
+	 *  Also, add these nodes and DOWN/UP imaginary nodes into the lokoahead
+	 *  buffer as a side-effect.  Normally side-effects are bad, but because
+	 *  we can emit many tokens for every next() call, it's pretty hard to
+	 *  use a single return value for that.  We must add these tokens to
+	 *  the lookahead buffer.
+	 *
+	 *  This does *not* return the DOWN/UP nodes; those are only returned
+	 *  by the LT() method.
+	 *
+	 *  Ugh.  This mechanism is much more complicated than a recursive
+	 *  solution, but it's the only way to provide nodes on-demand instead
+	 *  of walking once completely through and buffering up the nodes. :(
+	 */
+	public Object next() {
+		// already walked entire tree; nothing to return
+		if ( currentNode==null ) {
+			addLookahead(eof);
+			// this is infinite stream returning EOF at end forever
+			// so don't throw NoSuchElementException
+			return null;
+		}
+
+		// initial condition (first time method is called)
+		if ( currentChildIndex==-1 ) {
+			return handleRootNode();
+		}
+
+		// index is in the child list?
+		if ( currentChildIndex<adaptor.getChildCount(currentNode) ) {
+			return visitChild(currentChildIndex);
+		}
+
+		// hit end of child list, return to parent node or its parent ...
+		walkBackToMostRecentNodeWithUnvisitedChildren();
+		if ( currentNode!=null ) {
+			return visitChild(currentChildIndex);
+		}
+
+		return null;
+	}
+
+	protected Object handleRootNode() {
+		Object node;
+		node = currentNode;
+		// point to first child in prep for subsequent next()
+		currentChildIndex = 0;
+		if ( adaptor.isNil(node) ) {
+			// don't count this root nil node
+			node = visitChild(currentChildIndex);
+		}
+		else {
+			addLookahead(node);
+			if ( adaptor.getChildCount(currentNode)==0 ) {
+				// single node case
+				currentNode = null; // say we're done
+			}
+		}
+		return node;
+	}
+
+	protected Object visitChild(int child) {
+		Object node = null;
+		// save state
+		nodeStack.push(currentNode);
+		indexStack.push(new Integer(child));
+		if ( child==0 && !adaptor.isNil(currentNode) ) {
+			addNavigationNode(Token.DOWN);
+		}
+		// visit child
+		currentNode = adaptor.getChild(currentNode,child);
+		currentChildIndex = 0;
+		node = currentNode;  // record node to return
+		addLookahead(node);
+		walkBackToMostRecentNodeWithUnvisitedChildren();
+		return node;
+	}
+
+	/** As we flatten the tree, we use UP, DOWN nodes to represent
+	 *  the tree structure.  When debugging we need unique nodes
+	 *  so instantiate new ones when uniqueNavigationNodes is true.
+	 */
+	protected void addNavigationNode(final int ttype) {
+		Object navNode = null;
+		if ( ttype==Token.DOWN ) {
+			if ( hasUniqueNavigationNodes() ) {
+				navNode = adaptor.create(Token.DOWN, "DOWN");
+			}
+			else {
+				navNode = down;
+			}
+		}
+		else {
+			if ( hasUniqueNavigationNodes() ) {
+				navNode = adaptor.create(Token.UP, "UP");
+			}
+			else {
+				navNode = up;
+			}
+		}
+		addLookahead(navNode);
+	}
+
+	/** Walk upwards looking for a node with more children to walk. */
+	protected void walkBackToMostRecentNodeWithUnvisitedChildren() {
+		while ( currentNode!=null &&
+				currentChildIndex>=adaptor.getChildCount(currentNode) )
+		{
+			currentNode = nodeStack.pop();
+			if ( currentNode==null ) { // hit the root?
+				return;
+			}
+			currentChildIndex = ((Integer)indexStack.pop()).intValue();
+			currentChildIndex++; // move to next child
+			if ( currentChildIndex>=adaptor.getChildCount(currentNode) ) {
+				if ( !adaptor.isNil(currentNode) ) {
+					addNavigationNode(Token.UP);
+				}
+				if ( currentNode==root ) { // we done yet?
+					currentNode = null;
+				}
+			}
+		}
+	}
+
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+
+	public boolean hasUniqueNavigationNodes() {
+		return uniqueNavigationNodes;
+	}
+
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
+		this.uniqueNavigationNodes = uniqueNavigationNodes;
+	}
+
+	/** Print out the entire tree including DOWN/UP nodes.  Uses
+	 *  a recursive walk.  Mostly useful for testing as it yields
+	 *  the token types not text.
+	 */
+	public String toString() {
+		return toString(root, null);
+	}
+
+	protected int getLookaheadSize() {
+		return tail<head?(lookahead.length-head+tail):(tail-head);
+	}
+
+	public String toString(Object start, Object stop) {
+		if ( start==null ) {
+			return null;
+		}
+		// if we have the token stream, use that to dump text in order
+		if ( tokens!=null ) {
+			// don't trust stop node as it's often an UP node etc...
+			// walk backwards until you find a non-UP, non-DOWN node
+			// and ask for it's token index.
+			int beginTokenIndex = adaptor.getTokenStartIndex(start);
+			int endTokenIndex = adaptor.getTokenStopIndex(stop);
+			if ( stop!=null && adaptor.getType(stop)==Token.UP ) {
+				endTokenIndex = adaptor.getTokenStopIndex(start);
+			}
+			else {
+				endTokenIndex = size()-1;
+			}
+			return tokens.toString(beginTokenIndex, endTokenIndex);
+		}
+		StringBuffer buf = new StringBuffer();
+		toStringWork(start, stop, buf);
+		return buf.toString();
+	}
+
+	protected void toStringWork(Object p, Object stop, StringBuffer buf) {
+		if ( !adaptor.isNil(p) ) {
+			String text = adaptor.getText(p);
+			if ( text==null ) {
+				text = " "+String.valueOf(adaptor.getType(p));
+			}
+			buf.append(text); // ask the node to go to string
+		}
+		if ( p==stop ) {
+			return;
+		}
+		int n = adaptor.getChildCount(p);
+		if ( n>0 && !adaptor.isNil(p) ) {
+			buf.append(" ");
+			buf.append(Token.DOWN);
+		}
+		for (int c=0; c<n; c++) {
+			Object child = adaptor.getChild(p,c);
+			toStringWork(child, stop, buf);
+		}
+		if ( n>0 && !adaptor.isNil(p) ) {
+			buf.append(" ");
+			buf.append(Token.UP);
+		}
+	}
+}
+
diff --git a/src/org/antlr/Tool.java b/src/org/antlr/Tool.java
new file mode 100644
index 0000000..399c86d
--- /dev/null
+++ b/src/org/antlr/Tool.java
@@ -0,0 +1,551 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr;
+
+import org.antlr.tool.*;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.analysis.*;
+import org.antlr.runtime.misc.Stats;
+
+import java.io.*;
+import java.util.*;
+
+/** The main ANTLR entry point.  Read a grammar and generate a parser. */
+public class Tool {
+	public static final String VERSION = "3.0.1";
+
+	public static final String UNINITIALIZED_DIR = "<unset-dir>";
+
+    // Input parameters / option
+
+    protected List grammarFileNames = new ArrayList();
+	protected boolean generate_NFA_dot = false;
+	protected boolean generate_DFA_dot = false;
+	protected String outputDirectory = UNINITIALIZED_DIR;
+	protected String libDirectory = ".";
+	protected boolean debug = false;
+	protected boolean trace = false;
+	protected boolean profile = false;
+	protected boolean report = false;
+	protected boolean printGrammar = false;
+	protected boolean depend = false;
+	protected boolean forceAllFilesToOutputDir = false;
+
+	// the internal options are for my use on the command line during dev
+
+	public static boolean internalOption_PrintGrammarTree = false;
+	public static boolean internalOption_PrintDFA = false;
+	public static boolean internalOption_ShowNFConfigsInDFA = false;
+	public static boolean internalOption_watchNFAConversion = false;
+
+    public static void main(String[] args) {
+		ErrorManager.info("ANTLR Parser Generator  Version " +
+						  VERSION + " (August 13, 2007)  1989-2007");
+		Tool antlr = new Tool(args);
+		antlr.process();
+		System.exit(0);
+	}
+
+	public Tool() {
+	}
+
+	public Tool(String[] args) {
+		processArgs(args);
+	}
+
+	public void processArgs(String[] args) {
+		if ( args==null || args.length==0 ) {
+			help();
+			return;
+		}
+		for (int i = 0; i < args.length; i++) {
+			if (args[i].equals("-o") || args[i].equals("-fo")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing output directory with -fo/-o option; ignoring");
+				}
+				else {
+					if ( args[i].equals("-fo") ) { // force output into dir
+						forceAllFilesToOutputDir = true;
+					}
+					i++;
+					outputDirectory = args[i];
+					if ( outputDirectory.endsWith("/") ||
+						 outputDirectory.endsWith("\\") )
+					{
+						outputDirectory =
+							outputDirectory.substring(0,outputDirectory.length()-1);
+					}
+					File outDir = new File(outputDirectory);
+					if( outDir.exists() && !outDir.isDirectory() ) {
+						ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE,outputDirectory);
+						libDirectory = ".";
+					}
+				}
+			}
+			else if (args[i].equals("-lib")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing library directory with -lib option; ignoring");
+				}
+				else {
+					i++;
+					libDirectory = args[i];
+					if ( libDirectory.endsWith("/") ||
+						 libDirectory.endsWith("\\") )
+					{
+						libDirectory =
+							libDirectory.substring(0,libDirectory.length()-1);
+					}
+					File outDir = new File(libDirectory);
+					if( !outDir.exists() ) {
+						ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND,libDirectory);
+						libDirectory = ".";
+					}
+				}
+			}
+			else if (args[i].equals("-nfa")) {
+				generate_NFA_dot=true;
+			}
+			else if (args[i].equals("-dfa")) {
+				generate_DFA_dot=true;
+			}
+			else if (args[i].equals("-debug")) {
+				debug=true;
+			}
+			else if (args[i].equals("-trace")) {
+				trace=true;
+			}
+			else if (args[i].equals("-report")) {
+				report=true;
+			}
+			else if (args[i].equals("-profile")) {
+				profile=true;
+			}
+			else if (args[i].equals("-print")) {
+				printGrammar = true;
+			}
+			else if (args[i].equals("-depend")) {
+				depend=true;
+			}
+			else if (args[i].equals("-message-format")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing output format with -message-format option; using default");
+				}
+				else {
+					i++;
+					ErrorManager.setFormat(args[i]);
+				}
+			}
+			else if (args[i].equals("-Xgrtree")) {
+				internalOption_PrintGrammarTree=true; // print grammar tree
+			}
+			else if (args[i].equals("-Xdfa")) {
+				internalOption_PrintDFA=true;
+			}
+			else if (args[i].equals("-Xnoprune")) {
+				DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES=false;
+			}
+			else if (args[i].equals("-Xnocollapse")) {
+				DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES=false;
+			}
+			else if (args[i].equals("-Xdbgconversion")) {
+				NFAToDFAConverter.debug = true;
+			}
+			else if (args[i].equals("-Xmultithreaded")) {
+				NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
+			}
+			else if (args[i].equals("-Xnomergestopstates")) {
+				DFAOptimizer.MERGE_STOP_STATES = false;
+			}
+			else if (args[i].equals("-Xdfaverbose")) {
+				internalOption_ShowNFConfigsInDFA = true;
+			}
+			else if (args[i].equals("-Xwatchconversion")) {
+				internalOption_watchNFAConversion = true;
+			}
+			else if (args[i].equals("-XdbgST")) {
+				CodeGenerator.EMIT_TEMPLATE_DELIMITERS = true;
+			}
+			else if (args[i].equals("-Xnoinlinedfa")) {
+				CodeGenerator.GEN_ACYCLIC_DFA_INLINE = false;
+			}
+			else if (args[i].equals("-Xm")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing max recursion with -Xm option; ignoring");
+				}
+				else {
+					i++;
+					NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
+				}
+			}
+			else if (args[i].equals("-Xmaxdfaedges")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
+				}
+				else {
+					i++;
+					DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
+				}
+			}
+			else if (args[i].equals("-Xconversiontimeout")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
+				}
+				else {
+					i++;
+					DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
+				}
+			}
+			else if (args[i].equals("-Xnfastates")) {
+				DecisionProbe.verbose=true;
+			}			
+			else if (args[i].equals("-X")) {
+				Xhelp();
+			}
+            else {
+                if (args[i].charAt(0) != '-') {
+                    // Must be the grammar file
+                    grammarFileNames.add(args[i]);
+                }
+            }
+        }
+    }
+
+    /*
+    protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
+        // check for invalid command line args
+        for (int a = 0; a < args.length; a++) {
+            if (!cmdLineArgValid.member(a)) {
+                System.err.println("invalid command-line argument: " + args[a] + "; ignored");
+            }
+        }
+    }
+    */
+
+    public void process()  {
+		int numFiles = grammarFileNames.size();
+		for (int i = 0; i < numFiles; i++) {
+			String grammarFileName = (String) grammarFileNames.get(i);
+			if ( numFiles > 1 && !depend ) {
+			    System.out.println(grammarFileName);
+			}
+			try {
+				if ( depend ) {
+					BuildDependencyGenerator dep =
+						new BuildDependencyGenerator(this, grammarFileName);
+					List outputFiles = dep.getGeneratedFileList();
+					List dependents = dep.getDependenciesFileList();
+					//System.out.println("output: "+outputFiles);
+					//System.out.println("dependents: "+dependents);
+					System.out.println(dep.getDependencies());
+					continue;
+				}
+				Grammar grammar = getGrammar(grammarFileName);
+				processGrammar(grammar);
+
+				if ( printGrammar ) {
+					grammar.printGrammar(System.out);
+				}
+
+				if ( generate_NFA_dot ) {
+					generateNFAs(grammar);
+				}
+				if ( generate_DFA_dot ) {
+					generateDFAs(grammar);
+				}
+				if ( report ) {
+					GrammarReport report = new GrammarReport(grammar);
+					System.out.println(report.toString());
+					// print out a backtracking report too (that is not encoded into log)
+					System.out.println(report.getBacktrackingReport());
+					// same for aborted NFA->DFA conversions
+					System.out.println(report.getEarlyTerminationReport());
+				}
+				if ( profile ) {
+					GrammarReport report = new GrammarReport(grammar);
+					Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
+											  report.toNotifyString());
+				}
+
+				// now handle the lexer if one was created for a merged spec
+				String lexerGrammarStr = grammar.getLexerGrammar();
+				if ( grammar.type==Grammar.COMBINED && lexerGrammarStr!=null ) {
+					String lexerGrammarFileName =
+						grammar.getImplicitlyGeneratedLexerFileName();
+					Writer w = getOutputFile(grammar,lexerGrammarFileName);
+					w.write(lexerGrammarStr);
+					w.close();
+					StringReader sr = new StringReader(lexerGrammarStr);
+					Grammar lexerGrammar = new Grammar();
+					lexerGrammar.setTool(this);
+					File lexerGrammarFullFile =
+						new File(getFileDirectory(lexerGrammarFileName),lexerGrammarFileName);
+					lexerGrammar.setFileName(lexerGrammarFullFile.toString());
+					lexerGrammar.importTokenVocabulary(grammar);
+					lexerGrammar.setGrammarContent(sr);
+					sr.close();
+					processGrammar(lexerGrammar);
+				}
+			}
+			catch (IOException e) {
+				ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
+								   grammarFileName);
+			}
+			catch (Exception e) {
+				ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
+			}
+		}
+    }
+
+	public Grammar getGrammar(String grammarFileName)
+		throws IOException, antlr.TokenStreamException, antlr.RecognitionException
+	{
+		//StringTemplate.setLintMode(true);
+		FileReader fr = null;
+		fr = new FileReader(grammarFileName);
+		BufferedReader br = new BufferedReader(fr);
+		Grammar grammar = new Grammar(this,grammarFileName,br);
+		grammar.setWatchNFAConversion(internalOption_watchNFAConversion);
+		br.close();
+		fr.close();
+		return grammar;
+	}
+
+	protected void processGrammar(Grammar grammar)
+	{
+		String language = (String)grammar.getOption("language");
+		if ( language!=null ) {
+			CodeGenerator generator = new CodeGenerator(this, grammar, language);
+			grammar.setCodeGenerator(generator);
+			generator.setDebug(debug);
+			generator.setProfile(profile);
+			generator.setTrace(trace);
+			generator.genRecognizer();
+		}
+	}
+
+	protected void generateDFAs(Grammar g) {
+		for (int d=1; d<=g.getNumberOfDecisions(); d++) {
+			DFA dfa = g.getLookaheadDFA(d);
+			if ( dfa==null ) {
+				continue; // not there for some reason, ignore
+			}
+			DOTGenerator dotGenerator = new DOTGenerator(g);
+			String dot = dotGenerator.getDOT( dfa.startState );
+			String dotFileName = g.name+"_dec-"+d;
+			try {
+				writeDOTFile(g, dotFileName, dot);
+			}
+			catch(IOException ioe) {
+				ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
+								   dotFileName,
+								   ioe);
+			}
+		}
+	}
+
+	protected void generateNFAs(Grammar g) {
+		DOTGenerator dotGenerator = new DOTGenerator(g);
+		Collection rules = g.getRules();
+		for (Iterator itr = rules.iterator(); itr.hasNext();) {
+			Rule r = (Rule) itr.next();
+			String ruleName = r.name;
+			try {
+				writeDOTFile(
+					g,
+					ruleName,
+					dotGenerator.getDOT(g.getRuleStartState(ruleName)));
+			}
+			catch (IOException ioe) {
+				ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
+			}
+		}
+	}
+
+	protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
+		Writer fw = getOutputFile(g, name+".dot");
+		fw.write(dot);
+		fw.close();
+	}
+
+	private static void help() {
+        System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
+		System.err.println("  -o outputDir          specify output directory where all output is generated");
+		System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
+		System.err.println("  -lib dir              specify location of token files");
+		System.err.println("  -depend               generate file dependencies");
+		System.err.println("  -report               print out a report about the grammar(s) processed");
+		System.err.println("  -print                print out the grammar without actions");
+		System.err.println("  -debug                generate a parser that emits debugging events");
+		System.err.println("  -profile              generate a parser that computes profiling information");
+		System.err.println("  -nfa                  generate an NFA for each rule");
+		System.err.println("  -dfa                  generate a DFA for each decision point");
+		System.err.println("  -message-format name  specify output style for messages");
+		System.err.println("  -X                    display extended argument list");
+    }
+
+	private static void Xhelp() {
+		System.err.println("  -Xgrtree               print the grammar AST");
+		System.err.println("  -Xdfa                  print DFA as text ");
+		System.err.println("  -Xnoprune              test lookahead against EBNF block exit branches");
+		System.err.println("  -Xnocollapse           collapse incident edges into DFA states");
+		System.err.println("  -Xdbgconversion        dump lots of info during NFA conversion");
+		System.err.println("  -Xmultithreaded        run the analysis in 2 threads");
+		System.err.println("  -Xnomergestopstates    do not merge stop states");
+		System.err.println("  -Xdfaverbose           generate DFA states in DOT with NFA configs");
+		System.err.println("  -Xwatchconversion      print a message for each NFA before converting");
+		System.err.println("  -XdbgST                put tags at start/stop of all templates in output");
+		System.err.println("  -Xm m                  max number of rule invocations during conversion");
+		System.err.println("  -Xmaxdfaedges m        max \"comfortable\" number of edges for single DFA state");
+		System.err.println("  -Xconversiontimeout t  set NFA conversion timeout for each decision");
+		System.err.println("  -Xnoinlinedfa          make all DFA with tables; no inline prediction with IFs");
+		System.err.println("  -Xnfastates            for nondeterminisms, list NFA states for each path");
+    }
+
+	public void setOutputDirectory(String outputDirectory) {
+		this.outputDirectory = outputDirectory;
+	}
+
+    /** This method is used by all code generators to create new output
+     *  files. If the outputDir set by -o is not present it will be created.
+	 *  The final filename is sensitive to the output directory and
+	 *  the directory where the grammar file was found.  If -o is /tmp
+	 *  and the original grammar file was foo/t.g then output files
+	 *  go in /tmp/foo.
+	 *
+	 *  The output dir -o spec takes precedence if it's absolute.
+	 *  E.g., if the grammar file dir is absolute the output dir is given
+	 *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
+	 *  output (assuming t.g holds T.java).
+	 *
+	 *  If no -o is specified, then just write to the directory where the
+	 *  grammar file was found.
+	 *
+	 *  If outputDirectory==null then write a String.
+     */
+    public Writer getOutputFile(Grammar g, String fileName) throws IOException {
+		if ( outputDirectory==null ) {
+			return new StringWriter();
+		}
+		// output directory is a function of where the grammar file lives
+		// for subdir/T.g, you get subdir here.  Well, depends on -o etc...
+		File outputDir = getOutputDirectory(g.getFileName());
+		File outputFile = new File(outputDir, fileName);
+
+		if( !outputDir.exists() ) {
+			outputDir.mkdirs();
+		}
+        FileWriter fw = new FileWriter(outputFile);
+		return new BufferedWriter(fw);
+    }
+
+	public File getOutputDirectory(String fileNameWithPath) {
+		File outputDir = new File(outputDirectory);
+		String fileDirectory = getFileDirectory(fileNameWithPath);
+		if ( outputDirectory!=UNINITIALIZED_DIR ) {
+			// -o /tmp /var/lib/t.g => /tmp/T.java
+			// -o subdir/output /usr/lib/t.g => subdir/output/T.java
+			// -o . /usr/lib/t.g => ./T.java
+			if ( fileDirectory!=null &&
+				 (new File(fileDirectory).isAbsolute() ||
+				  fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
+				  forceAllFilesToOutputDir
+				)
+			{
+				// somebody set the dir, it takes precendence; write new file there
+				outputDir = new File(outputDirectory);
+			}
+			else {
+				// -o /tmp subdir/t.g => /tmp/subdir/t.g
+				if ( fileDirectory!=null ) {
+					outputDir = new File(outputDirectory, fileDirectory);
+				}
+				else {
+					outputDir = new File(outputDirectory);
+				}
+			}
+		}
+		else {
+			// they didn't specify a -o dir so just write to location
+			// where grammar is, absolute or relative
+			String dir = ".";
+			if ( fileDirectory!=null ) {
+				dir = fileDirectory;
+			}
+			outputDir = new File(dir);
+		}
+		return outputDir;
+	}
+
+	/** Open a file in the -lib dir.  For now, it's just .tokens files */
+	public BufferedReader getLibraryFile(String fileName) throws IOException {
+		String fullName = libDirectory+File.separator+fileName;
+		FileReader fr = new FileReader(fullName);
+		BufferedReader br = new BufferedReader(fr);
+		return br;
+	}
+
+	public String getLibraryDirectory() {
+		return libDirectory;
+	}
+
+	/** Return the directory containing the grammar file for this grammar.
+	 *  normally this is a relative path from current directory.  People will
+	 *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
+	 *  "grammars/foo.g3" etc...  This method returns "grammars".
+	 */
+	public String getFileDirectory(String fileName) {
+		File f = new File(fileName);
+		return f.getParent();
+	}
+
+	/** If the tool needs to panic/exit, how do we do that? */
+	public void panic() {
+		throw new Error("ANTLR panic");
+	}
+
+	/** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss */
+	public static String getCurrentTimeStamp() {
+		GregorianCalendar calendar = new java.util.GregorianCalendar();
+		int y = calendar.get(Calendar.YEAR);
+		int m = calendar.get(Calendar.MONTH)+1; // zero-based for months
+		int d = calendar.get(Calendar.DAY_OF_MONTH);
+		int h = calendar.get(Calendar.HOUR_OF_DAY);
+		int min = calendar.get(Calendar.MINUTE);
+		int sec = calendar.get(Calendar.SECOND);
+		String sy = String.valueOf(y);
+		String sm = m<10?"0"+m:String.valueOf(m);
+		String sd = d<10?"0"+d:String.valueOf(d);
+		String sh = h<10?"0"+h:String.valueOf(h);
+		String smin = min<10?"0"+min:String.valueOf(min);
+		String ssec = sec<10?"0"+sec:String.valueOf(sec);
+		return new StringBuffer().append(sy).append("-").append(sm).append("-")
+			.append(sd).append(" ").append(sh).append(":").append(smin)
+			.append(":").append(ssec).toString();
+	}
+
+}
diff --git a/src/org/antlr/analysis/DFA.java b/src/org/antlr/analysis/DFA.java
new file mode 100644
index 0000000..0fd09dd
--- /dev/null
+++ b/src/org/antlr/analysis/DFA.java
@@ -0,0 +1,1106 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.IntStream;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.*;
+
+import java.util.*;
+
+/** A DFA (converted from a grammar's NFA).
+ *  DFAs are used as prediction machine for alternative blocks in all kinds
+ *  of recognizers (lexers, parsers, tree walkers).
+ */
+public class DFA {
+	public static final int REACHABLE_UNKNOWN = -2;
+	public static final int REACHABLE_BUSY = -1; // in process of computing
+	public static final int REACHABLE_NO = 0;
+	public static final int REACHABLE_YES = 1;
+
+	/** Prevent explosion of DFA states during conversion. The max number
+	 *  of states per alt in a single decision's DFA.
+	 */
+	public static final int MAX_STATES_PER_ALT_IN_DFA = 450;
+
+	/** Set to 0 to not terminate early */
+	public static int MAX_TIME_PER_DFA_CREATION = 1*1000;
+
+	/** How many edges can each DFA state have before a "special" state
+	 *  is created that uses IF expressions instead of a table?
+	 */
+	public static int MAX_STATE_TRANSITIONS_FOR_TABLE = 65534;
+
+	/** What's the start state for this DFA? */
+    public DFAState startState;
+
+	/** This DFA is being built for which decision? */
+	public int decisionNumber = 0;
+
+    /** From what NFAState did we create the DFA? */
+    public NFAState decisionNFAStartState;
+
+	/** The printable grammar fragment associated with this DFA */
+	public String description;
+
+	/** A set of all uniquely-numbered DFA states.  Maps hash of DFAState
+     *  to the actual DFAState object.  We use this to detect
+     *  existing DFA states.  Map<DFAState,DFAState>.  Use Map so
+	 *  we can get old state back (Set only allows you to see if it's there).
+	 *  Not used during fixed k lookahead as it's a waste to fill it with
+	 *  a dup of states array.
+     */
+    protected Map uniqueStates = new HashMap();
+
+	/** Maps the state number to the actual DFAState.  Use a Vector as it
+	 *  grows automatically when I set the ith element.  This contains all
+	 *  states, but the states are not unique.  s3 might be same as s1 so
+	 *  s3 -> s1 in this table.  This is how cycles occur.  If fixed k,
+	 *  then these states will all be unique as states[i] always points
+	 *  at state i when no cycles exist.
+	 *
+	 *  This is managed in parallel with uniqueStates and simply provides
+	 *  a way to go from state number to DFAState rather than via a
+	 *  hash lookup.
+	 */
+	protected Vector states = new Vector();
+
+	/** Unique state numbers */
+	protected int stateCounter = 0;
+
+	/** count only new states not states that were rejected as already present */
+	protected int numberOfStates = 0;
+
+	/** User specified max fixed lookahead.  If 0, nothing specified.  -1
+	 *  implies we have not looked at the options table yet to set k.
+	 */
+	protected int user_k = -1;
+
+	/** While building the DFA, track max lookahead depth if not cyclic */
+	protected int max_k = -1;
+
+    /** Is this DFA reduced?  I.e., can all states lead to an accept state? */
+    protected boolean reduced = true;
+
+    /** Are there any loops in this DFA?
+	 *  Computed by doesStateReachAcceptState()
+	 */
+    protected boolean cyclic = false;
+
+    /** Each alt in an NFA derived from a grammar must have a DFA state that
+     *  predicts it lest the parser not know what to do.  Nondeterminisms can
+     *  lead to this situation (assuming no semantic predicates can resolve
+     *  the problem) and when for some reason, I cannot compute the lookahead
+     *  (which might arise from an error in the algorithm or from
+     *  left-recursion etc...).  This list starts out with all alts contained
+     *  and then in method doesStateReachAcceptState() I remove the alts I
+     *  know to be uniquely predicted.
+     */
+    protected List unreachableAlts;
+
+	protected int nAlts = 0;
+
+	/** We only want one accept state per predicted alt; track here */
+	protected DFAState[] altToAcceptState;
+
+	/** Track whether an alt discovers recursion for each alt during
+	 *  NFA to DFA conversion; >1 alt with recursion implies nonregular.
+	 */
+	protected IntSet recursiveAltSet = new IntervalSet();
+
+	/** Which NFA are we converting (well, which piece of the NFA)? */
+    public NFA nfa;
+
+	protected NFAToDFAConverter nfaConverter;
+
+	/** This probe tells you a lot about a decision and is useful even
+	 *  when there is no error such as when a syntactic nondeterminism
+	 *  is solved via semantic predicates.  Perhaps a GUI would want
+	 *  the ability to show that.
+	 */
+	public DecisionProbe probe = new DecisionProbe(this);
+
+	/** Track absolute time of the conversion so we can have a failsafe:
+	 *  if it takes too long, then terminate.  Assume bugs are in the
+	 *  analysis engine.
+	 */
+	protected long conversionStartTime;
+
+	/** Map an edge transition table to a unique set number; ordered so
+	 *  we can push into the output template as an ordered list of sets
+	 *  and then ref them from within the transition[][] table.  Like this
+	 *  for C# target:
+	 *     public static readonly DFA30_transition0 =
+	 *     	new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...};
+	 *         public static readonly DFA30_transition1 =
+	 *     	new short[] { 21 };
+	 *      public static readonly short[][] DFA30_transition = {
+	 *     	  DFA30_transition0,
+	 *     	  DFA30_transition0,
+	 *     	  DFA30_transition1,
+	 *     	  ...
+	 *      };
+	 */
+	public Map edgeTransitionClassMap = new LinkedHashMap();
+
+	/** The unique edge transition class number; every time we see a new
+	 *  set of edges emanating from a state, we number it so we can reuse
+	 *  if it's every seen again for another state.  For Java grammar,
+	 *  some of the big edge transition tables are seen about 57 times.
+	 */
+	protected int edgeTransitionClass =0;
+
+	/* This DFA can be converted to a transition[state][char] table and
+	 * the following tables are filled by createStateTables upon request.
+	 * These are injected into the templates for code generation.
+	 * See March 25, 2006 entry for description:
+	 *   http://www.antlr.org/blog/antlr3/codegen.tml
+	 * Often using Vector as can't set ith position in a List and have
+	 * it extend list size; bizarre.
+	 */
+
+	/** List of special DFAState objects */
+	public List specialStates;
+	/** List of ST for special states. */
+	public List specialStateSTs;
+	public Vector accept;
+	public Vector eot;
+	public Vector eof;
+	public Vector min;
+	public Vector max;
+	public Vector special;
+	public Vector transition;
+	/** just the Vector<Integer> indicating which unique edge table is at
+	 *  position i.
+	 */
+	public Vector transitionEdgeTables; // not used by java yet
+	protected int uniqueCompressedSpecialStateNum = 0;
+
+	public DFA(int decisionNumber, NFAState decisionStartState) {
+		this.decisionNumber = decisionNumber;
+        this.decisionNFAStartState = decisionStartState;
+        nfa = decisionStartState.nfa;
+        nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+        //setOptions( nfa.grammar.getDecisionOptions(getDecisionNumber()) );
+        initAltRelatedInfo();
+
+		//long start = System.currentTimeMillis();
+        nfaConverter = new NFAToDFAConverter(this);
+		nfaConverter.convert();
+
+		// figure out if there are problems with decision
+		verify();
+
+		if ( !probe.isDeterministic() ||
+			 probe.analysisAborted() ||
+			 probe.analysisOverflowed() )
+		{
+			probe.issueWarnings();
+		}
+
+		// must be after verify as it computes cyclic, needed by this routine
+		// should be after warnings because early termination or something
+		// will not allow the reset to operate properly in some cases.
+		resetStateNumbersToBeContiguous();
+
+		//long stop = System.currentTimeMillis();
+		//System.out.println("verify cost: "+(int)(stop-start)+" ms");
+
+		if ( Tool.internalOption_PrintDFA ) {
+			System.out.println("DFA d="+decisionNumber);
+			FASerializer serializer = new FASerializer(nfa.grammar);
+			String result = serializer.serialize(startState);
+			System.out.println(result);
+		}
+    }
+
+	/** Walk all states and reset their numbers to be a contiguous sequence
+	 *  of integers starting from 0.  Only cyclic DFA can have unused positions
+	 *  in states list.  State i might be identical to a previous state j and
+	 *  will result in states[i] == states[j].  We don't want to waste a state
+	 *  number on this.  Useful mostly for code generation in tables.
+	 *
+	 *  At the start of this routine, states[i].stateNumber <= i by definition.
+	 *  If states[50].stateNumber is 50 then a cycle during conversion may
+	 *  try to add state 103, but we find that an identical DFA state, named
+	 *  50, already exists, hence, states[103]==states[50] and both have
+	 *  stateNumber 50 as they point at same object.  Afterwards, the set
+	 *  of state numbers from all states should represent a contiguous range
+	 *  from 0..n-1 where n is the number of unique states.
+	 */
+	public void resetStateNumbersToBeContiguous() {
+		if ( getUserMaxLookahead()>0 ) {
+			// all numbers are unique already; no states are thrown out.
+			return;
+		}
+        /*
+        if ( decisionNumber==14 ) {
+			System.out.println("DFA :"+decisionNumber+" "+this);
+            //System.out.println("DFA start state :"+startState);
+			System.out.println("unique state numbers: ");
+			Set s = getUniqueStates().keySet();
+			for (Iterator it = s.iterator(); it.hasNext();) {
+				DFAState d = (DFAState) it.next();
+				System.out.print(d.stateNumber+" ");
+			}
+			System.out.println();
+
+			System.out.println("size="+s.size());
+			System.out.println("continguous states: ");
+			for (Iterator it = states.iterator(); it.hasNext();) {
+				DFAState d = (DFAState) it.next();
+				if ( d!=null ) {
+                    System.out.print(d.stateNumber+" ");
+                }
+			}
+			System.out.println();
+
+			//Set a = new HashSet();
+			List a = new ArrayList();
+			System.out.println("unique set from states table: ");
+			for (int i = 0; i <= getMaxStateNumber(); i++) {
+				DFAState d = getState(i);
+                if ( d==null ) {
+                    continue;
+                }
+                boolean found=false;
+				for (int j=0; j<a.size(); j++) {
+					DFAState old = (DFAState)a.get(j);
+					if ( old.equals(d) ) {
+						if ( old.stateNumber!=d.stateNumber ) {
+							System.out.println("WHAT! state["+i+"]="+d+" prev in list as "+old);
+						}
+						found=true;
+					}
+				}
+				if ( !found ) {
+					a.add(d);
+				}
+			}
+			for (Iterator it = a.iterator(); it.hasNext();) {
+				DFAState d = (DFAState) it.next();
+                if ( d!=null ) {
+                    System.out.print(d.stateNumber+" ");
+                }
+            }
+			System.out.println();
+			System.out.println("size="+a.size());
+
+			if ( a.equals(s) ) {
+				System.out.println("both sets same");
+			}
+			else {
+				System.out.println("sets NOT same");
+			}
+			System.out.println("stateCounter="+stateCounter);
+		}
+        */
+
+        // walk list of DFAState objects by state number,
+		// setting state numbers to 0..n-1
+		int snum=0;
+		for (int i = 0; i <= getMaxStateNumber(); i++) {
+			DFAState s = getState(i);
+            // some states are unused after creation most commonly due to cycles
+            // or conflict resolution.
+            if ( s==null ) {
+                continue;
+            }
+			// state i is mapped to DFAState with state number set to i originally
+			// so if it's less than i, then we renumbered it already; that
+			// happens when states have been merged or cycles occurred I think.
+			// states[50] will point to DFAState with s50 in it but
+			// states[103] might also point at this same DFAState.  Since
+			// 50 < 103 then it's already been renumbered as it points downwards.
+			boolean alreadyRenumbered = s.stateNumber<i;
+			if ( !alreadyRenumbered ) {
+				// state i is a valid state, reset it's state number
+				s.stateNumber = snum; // rewrite state numbers to be 0..n-1
+				snum++;
+			}
+		}
+        /*
+        if ( decisionNumber==14 ) {
+			//System.out.println("max state num: "+maxStateNumber);
+			System.out.println("after renum, DFA :"+decisionNumber+" "+this);
+			System.out.println("uniq states.size="+uniqueStates.size());
+
+			Set a = new HashSet();
+			System.out.println("after renumber; unique set from states table: ");
+			for (int i = 0; i <= getMaxStateNumber(); i++) {
+				DFAState d = getState(i);
+				a.add(d);
+			}
+			for (Iterator it = a.iterator(); it.hasNext();) {
+				DFAState d = (DFAState) it.next();
+				if ( d!=null ) {
+                    System.out.print(d.stateNumber+" ");
+                }
+			}
+			System.out.println();
+			System.out.println("size="+a.size());
+		}
+        */
+        if ( snum!=getNumberOfStates() ) {
+			ErrorManager.internalError("DFA "+decisionNumber+": "+
+				decisionNFAStartState.getDescription()+" num unique states "+getNumberOfStates()+
+				"!= num renumbered states "+snum);
+		}
+	}
+
+	// JAVA-SPECIFIC Accessors!!!!!  It is so impossible to get arrays
+	// or even consistently formatted strings acceptable to java that
+	// I am forced to build the individual char elements here
+
+	public List getJavaCompressedAccept() { return getRunLengthEncoding(accept); }
+	public List getJavaCompressedEOT() { return getRunLengthEncoding(eot); }
+	public List getJavaCompressedEOF() { return getRunLengthEncoding(eof); }
+	public List getJavaCompressedMin() { return getRunLengthEncoding(min); }
+	public List getJavaCompressedMax() { return getRunLengthEncoding(max); }
+	public List getJavaCompressedSpecial() { return getRunLengthEncoding(special); }
+	public List getJavaCompressedTransition() {
+		if ( transition==null || transition.size()==0 ) {
+			return null;
+		}
+		List encoded = new ArrayList(transition.size());
+		// walk Vector<Vector<FormattedInteger>> which is the transition[][] table
+		for (int i = 0; i < transition.size(); i++) {
+			Vector transitionsForState = (Vector) transition.elementAt(i);
+			encoded.add(getRunLengthEncoding(transitionsForState));
+		}
+		return encoded;
+	}
+
+	/** Compress the incoming data list so that runs of same number are
+	 *  encoded as number,value pair sequences.  3 -1 -1 -1 28 is encoded
+	 *  as 1 3 3 -1 1 28.  I am pretty sure this is the lossless compression
+	 *  that GIF files use.  Transition tables are heavily compressed by
+	 *  this technique.  I got the idea from JFlex http://jflex.de/
+	 *
+	 *  Return List<String> where each string is either \xyz for 8bit char
+	 *  and \uFFFF for 16bit.  Hideous and specific to Java, but it is the
+	 *  only target bad enough to need it.
+	 */
+	public List getRunLengthEncoding(List data) {
+		if ( data==null || data.size()==0 ) {
+			// for states with no transitions we want an empty string ""
+			// to hold its place in the transitions array.
+			List empty = new ArrayList();
+			empty.add("");
+			return empty;
+		}
+		int size = Math.max(2,data.size()/2);
+		List encoded = new ArrayList(size); // guess at size
+		// scan values looking for runs
+		int i = 0;
+		Integer emptyValue = Utils.integer(-1);
+		while ( i < data.size() ) {
+			Integer I = (Integer)data.get(i);
+			if ( I==null ) {
+				I = emptyValue;
+			}
+			// count how many v there are?
+			int n = 0;
+			for (int j = i; j < data.size(); j++) {
+				Integer v = (Integer)data.get(j);
+				if ( v==null ) {
+					v = emptyValue;
+				}
+				if ( I.equals(v) ) {
+					n++;
+				}
+				else {
+					break;
+				}
+			}
+			encoded.add(encodeIntAsCharEscape((char)n));
+			encoded.add(encodeIntAsCharEscape((char)I.intValue()));
+			i+=n;
+		}
+		return encoded;
+	}
+
+	public void createStateTables(CodeGenerator generator) {
+		//System.out.println("createTables:\n"+this);
+
+		description = getNFADecisionStartState().getDescription();
+		description =
+			generator.target.getTargetStringLiteralFromString(description);
+
+		// create all the tables
+		special = new Vector(this.getNumberOfStates()); // Vector<short>
+		special.setSize(this.getNumberOfStates());
+		specialStates = new ArrayList();				// List<DFAState>
+		specialStateSTs = new ArrayList();				// List<ST>
+		accept = new Vector(this.getNumberOfStates()); // Vector<int>
+		accept.setSize(this.getNumberOfStates());
+		eot = new Vector(this.getNumberOfStates()); // Vector<int>
+		eot.setSize(this.getNumberOfStates());
+		eof = new Vector(this.getNumberOfStates()); // Vector<int>
+		eof.setSize(this.getNumberOfStates());
+		min = new Vector(this.getNumberOfStates()); // Vector<int>
+		min.setSize(this.getNumberOfStates());
+		max = new Vector(this.getNumberOfStates()); // Vector<int>
+		max.setSize(this.getNumberOfStates());
+		transition = new Vector(this.getNumberOfStates()); // Vector<Vector<int>>
+		transition.setSize(this.getNumberOfStates());
+		transitionEdgeTables = new Vector(this.getNumberOfStates()); // Vector<Vector<int>>
+		transitionEdgeTables.setSize(this.getNumberOfStates());
+
+		// for each state in the DFA, fill relevant tables.
+		Iterator it = null;
+		if ( getUserMaxLookahead()>0 ) {
+			it = states.iterator();
+		}
+		else {
+			it = getUniqueStates().values().iterator();
+		}
+		while ( it.hasNext() ) {
+			DFAState s = (DFAState)it.next();
+			if ( s==null ) {
+				// ignore null states; some acylic DFA see this condition
+				// when inlining DFA (due to lacking of exit branch pruning?)
+				continue;
+			}
+			if ( s.isAcceptState() ) {
+				// can't compute min,max,special,transition on accepts
+				accept.set(s.stateNumber,
+						   Utils.integer(s.getUniquelyPredictedAlt()));
+			}
+			else {
+				createMinMaxTables(s);
+				createTransitionTableEntryForState(s);
+				createSpecialTable(s);
+				createEOTAndEOFTables(s);
+			}
+		}
+
+		// now that we have computed list of specialStates, gen code for 'em
+		for (int i = 0; i < specialStates.size(); i++) {
+			DFAState ss = (DFAState) specialStates.get(i);
+			StringTemplate stateST =
+				generator.generateSpecialState(ss);
+			specialStateSTs.add(stateST);
+		}
+
+		// check that the tables are not messed up by encode/decode
+		/*
+		testEncodeDecode(min);
+		testEncodeDecode(max);
+		testEncodeDecode(accept);
+		testEncodeDecode(special);
+		System.out.println("min="+min);
+		System.out.println("max="+max);
+		System.out.println("eot="+eot);
+		System.out.println("eof="+eof);
+		System.out.println("accept="+accept);
+		System.out.println("special="+special);
+		System.out.println("transition="+transition);
+		*/
+	}
+
+	/*
+	private void testEncodeDecode(List data) {
+		System.out.println("data="+data);
+		List encoded = getRunLengthEncoding(data);
+		StringBuffer buf = new StringBuffer();
+		for (int i = 0; i < encoded.size(); i++) {
+			String I = (String)encoded.get(i);
+			int v = 0;
+			if ( I.startsWith("\\u") ) {
+				v = Integer.parseInt(I.substring(2,I.length()), 16);
+			}
+			else {
+				v = Integer.parseInt(I.substring(1,I.length()), 8);
+			}
+			buf.append((char)v);
+		}
+		String encodedS = buf.toString();
+		short[] decoded = org.antlr.runtime.DFA.unpackEncodedString(encodedS);
+		//System.out.println("decoded:");
+		for (int i = 0; i < decoded.length; i++) {
+			short x = decoded[i];
+			if ( x!=((Integer)data.get(i)).intValue() ) {
+				System.err.println("problem with encoding");
+			}
+			//System.out.print(", "+x);
+		}
+		//System.out.println();
+	}
+	*/
+
+	protected void createMinMaxTables(DFAState s) {
+		int smin = Label.MAX_CHAR_VALUE + 1;
+		int smax = Label.MIN_ATOM_VALUE - 1;
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = (Transition) s.transition(j);
+			Label label = edge.label;
+			if ( label.isAtom() ) {
+				if ( label.getAtom()>=Label.MIN_CHAR_VALUE ) {
+					if ( label.getAtom()<smin ) {
+						smin = label.getAtom();
+					}
+					if ( label.getAtom()>smax ) {
+						smax = label.getAtom();
+					}
+				}
+			}
+			else if ( label.isSet() ) {
+				IntervalSet labels = (IntervalSet)label.getSet();
+				int lmin = labels.getMinElement();
+				// if valid char (don't do EOF) and less than current min
+				if ( lmin<smin && lmin>=Label.MIN_CHAR_VALUE ) {
+					smin = labels.getMinElement();
+				}
+				if ( labels.getMaxElement()>smax ) {
+					smax = labels.getMaxElement();
+				}
+			}
+		}
+
+		if ( smax<0 ) {
+			// must be predicates or pure EOT transition; just zero out min, max
+			smin = Label.MIN_CHAR_VALUE;
+			smax = Label.MIN_CHAR_VALUE;
+		}
+
+		min.set(s.stateNumber, Utils.integer((char)smin));
+		max.set(s.stateNumber, Utils.integer((char)smax));
+
+		if ( smax<0 || smin>Label.MAX_CHAR_VALUE || smin<0 ) {
+			ErrorManager.internalError("messed up: min="+min+", max="+max);
+		}
+	}
+
+	protected void createTransitionTableEntryForState(DFAState s) {
+		/*
+		System.out.println("createTransitionTableEntryForState s"+s.stateNumber+
+			" dec "+s.dfa.decisionNumber+" cyclic="+s.dfa.isCyclic());
+			*/
+		int smax = ((Integer)max.get(s.stateNumber)).intValue();
+		int smin = ((Integer)min.get(s.stateNumber)).intValue();
+
+		Vector stateTransitions = new Vector(smax-smin+1);
+		stateTransitions.setSize(smax-smin+1);
+		transition.set(s.stateNumber, stateTransitions);
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = (Transition) s.transition(j);
+			Label label = edge.label;
+			if ( label.isAtom() && label.getAtom()>=Label.MIN_CHAR_VALUE ) {
+				int labelIndex = label.getAtom()-smin; // offset from 0
+				stateTransitions.set(labelIndex,
+									 Utils.integer(edge.target.stateNumber));
+			}
+			else if ( label.isSet() ) {
+				IntervalSet labels = (IntervalSet)label.getSet();
+				int[] atoms = labels.toArray();
+				for (int a = 0; a < atoms.length; a++) {
+					// set the transition if the label is valid (don't do EOF)
+					if ( atoms[a]>=Label.MIN_CHAR_VALUE ) {
+						int labelIndex = atoms[a]-smin; // offset from 0
+						stateTransitions.set(labelIndex,
+											 Utils.integer(edge.target.stateNumber));
+					}
+				}
+			}
+		}
+		// track unique state transition tables so we can reuse
+		Integer edgeClass = (Integer)edgeTransitionClassMap.get(stateTransitions);
+		if ( edgeClass!=null ) {
+			//System.out.println("we've seen this array before; size="+stateTransitions.size());
+			transitionEdgeTables.set(s.stateNumber, edgeClass);
+		}
+		else {
+			/*
+			if ( stateTransitions.size()>255 ) {
+				System.out.println("edge edgeTable "+stateTransitions.size()+" s"+s.stateNumber+": "+Utils.integer(edgeTransitionClass));
+			}
+			else {
+				System.out.println("stateTransitions="+stateTransitions);
+			}
+			*/
+			edgeClass = Utils.integer(edgeTransitionClass);
+			transitionEdgeTables.set(s.stateNumber, edgeClass);
+			edgeTransitionClassMap.put(stateTransitions, edgeClass);
+			edgeTransitionClass++;
+		}
+	}
+
+	/** Set up the EOT and EOF tables; we cannot put -1 min/max values so
+	 *  we need another way to test that in the DFA transition function.
+	 */
+	protected void createEOTAndEOFTables(DFAState s) {
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = (Transition) s.transition(j);
+			Label label = edge.label;
+			if ( label.isAtom() ) {
+				if ( label.getAtom()==Label.EOT ) {
+					// eot[s] points to accept state
+					eot.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+				}
+				else if ( label.getAtom()==Label.EOF ) {
+					// eof[s] points to accept state
+					eof.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+				}
+			}
+			else if ( label.isSet() ) {
+				IntervalSet labels = (IntervalSet)label.getSet();
+				int[] atoms = labels.toArray();
+				for (int a = 0; a < atoms.length; a++) {
+					if ( atoms[a]==Label.EOT ) {
+						// eot[s] points to accept state
+						eot.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+					}
+					else if ( atoms[a]==Label.EOF ) {
+						eof.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+					}
+				}
+			}
+		}
+	}
+
+	protected void createSpecialTable(DFAState s) {
+		// number all special states from 0...n-1 instead of their usual numbers
+		boolean hasSemPred = false;
+
+		// TODO this code is very similar to canGenerateSwitch.  Refactor to share
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = (Transition) s.transition(j);
+			Label label = edge.label;
+			// can't do a switch if the edges have preds or are going to
+			// require gated predicates
+			if ( label.isSemanticPredicate() ||
+				 ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null)
+			{
+				hasSemPred = true;
+				break;
+			}
+		}
+		// if has pred or too big for table, make it special
+		int smax = ((Integer)max.get(s.stateNumber)).intValue();
+		int smin = ((Integer)min.get(s.stateNumber)).intValue();
+		if ( hasSemPred || smax-smin>MAX_STATE_TRANSITIONS_FOR_TABLE ) {
+			special.set(s.stateNumber,
+						Utils.integer(uniqueCompressedSpecialStateNum));
+			uniqueCompressedSpecialStateNum++;
+			specialStates.add(s);
+		}
+		else {
+			special.set(s.stateNumber, Utils.integer(-1)); // not special
+		}
+	}
+
+	public static String encodeIntAsCharEscape(int v) {
+		if ( v<=127 ) {
+			return "\\"+Integer.toOctalString(v);
+		}
+		String hex = Integer.toHexString(v|0x10000).substring(1,5);
+		return "\\u"+hex;
+	}
+
+	public int predict(IntStream input) {
+		Interpreter interp = new Interpreter(nfa.grammar, input);
+		return interp.predict(this);
+	}
+
+	/** Add a new DFA state to this DFA if not already present.
+     *  To force an acyclic, fixed maximum depth DFA, just always
+	 *  return the incoming state.  By not reusing old states,
+	 *  no cycles can be created.  If we're doing fixed k lookahead
+	 *  don't updated uniqueStates, just return incoming state, which
+	 *  indicates it's a new state.
+     */
+    protected DFAState addState(DFAState d) {
+		/*
+		if ( decisionNumber==14 ) {
+            System.out.println("addState: "+d.stateNumber);
+        }
+        */
+		if ( getUserMaxLookahead()>0 ) {
+			return d;
+		}
+		// does a DFA state exist already with everything the same
+		// except its state number?
+		DFAState existing = (DFAState)uniqueStates.get(d);
+		if ( existing != null ) {
+            /*
+            System.out.println("state "+d.stateNumber+" exists as state "+
+                existing.stateNumber);
+                */
+            // already there...get the existing DFA state
+			return existing;
+		}
+
+		// if not there, then add new state.
+		uniqueStates.put(d,d);
+        numberOfStates++;
+		return d;
+	}
+
+	public void removeState(DFAState d) {
+		DFAState it = (DFAState)uniqueStates.remove(d);
+		if ( it!=null ) {
+			numberOfStates--;
+		}
+	}
+
+	public Map getUniqueStates() {
+		return uniqueStates;
+	}
+
+	/** What is the max state number ever created?  This may be beyond
+	 *  getNumberOfStates().
+	 */
+	public int getMaxStateNumber() {
+		return states.size()-1;
+	}
+
+	public DFAState getState(int stateNumber) {
+		return (DFAState)states.get(stateNumber);
+	}
+
+	public void setState(int stateNumber, DFAState d) {
+		states.set(stateNumber, d);
+	}
+
+	/** Is the DFA reduced?  I.e., does every state have a path to an accept
+     *  state?  If not, don't delete as we need to generate an error indicating
+     *  which paths are "dead ends".  Also tracks list of alts with no accept
+     *  state in the DFA.  Must call verify() first before this makes sense.
+     */
+    public boolean isReduced() {
+        return reduced;
+    }
+
+    /** Is this DFA cyclic?  That is, are there any loops?  If not, then
+     *  the DFA is essentially an LL(k) predictor for some fixed, max k value.
+     *  We can build a series of nested IF statements to match this.  In the
+     *  presence of cycles, we need to build a general DFA and interpret it
+     *  to distinguish between alternatives.
+     */
+    public boolean isCyclic() {
+        return cyclic && getUserMaxLookahead()==0;
+    }
+
+	public boolean canInlineDecision() {
+		// TODO: and ! too big
+		return CodeGenerator.GEN_ACYCLIC_DFA_INLINE &&
+			!isCyclic() &&
+		    !probe.isNonLLStarDecision();
+	}
+
+	/** Is this DFA derived from the NFA for the Tokens rule? */
+	public boolean isTokensRuleDecision() {
+		if ( nfa.grammar.type!=Grammar.LEXER ) {
+			return false;
+		}
+		NFAState nfaStart = getNFADecisionStartState();
+		NFAState TokensRuleStart =
+			nfa.grammar.getRuleStartState(Grammar.ARTIFICIAL_TOKENS_RULENAME);
+		NFAState TokensDecisionStart =
+			(NFAState)TokensRuleStart.transition(0).target;
+		return nfaStart == TokensDecisionStart;
+	}
+
+	/** The user may specify a max, acyclic lookahead for any decision.  No
+	 *  DFA cycles are created when this value, k, is greater than 0.
+	 *  If this decision has no k lookahead specified, then try the grammar.
+	 */
+	public int getUserMaxLookahead() {
+		if ( user_k>=0 ) { // cache for speed
+			return user_k;
+		}
+		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decisionNumber);
+		Object k = blockAST.getOption("k");
+		if ( k==null ) {
+			user_k = nfa.grammar.getGrammarMaxLookahead();
+			return user_k;
+		}
+		if (k instanceof Integer) {
+			Integer kI = (Integer)k;
+			user_k = kI.intValue();
+		}
+		else {
+			// must be String "*"
+			if ( k.equals("*") ) {
+				user_k = 0;
+			}
+		}
+		return user_k;
+	}
+
+	public boolean getAutoBacktrackMode() {
+		String autoBacktrack =
+			(String)decisionNFAStartState.getAssociatedASTNode().getOption("backtrack");
+		if ( autoBacktrack==null ) {
+			autoBacktrack = (String)nfa.grammar.getOption("backtrack");
+		}
+		return autoBacktrack!=null&&autoBacktrack.equals("true");
+	}
+
+	public void setUserMaxLookahead(int k) {
+		this.user_k = k;
+	}
+
+	/** Return k if decision is LL(k) for some k else return max int */
+	public int getMaxLookaheadDepth() {
+		if ( isCyclic() ) {
+			return Integer.MAX_VALUE;
+		}
+		return max_k;
+	}
+
+    /** Return a list of Integer alt numbers for which no lookahead could
+     *  be computed or for which no single DFA accept state predicts those
+     *  alts.  Must call verify() first before this makes sense.
+     */
+    public List getUnreachableAlts() {
+        return unreachableAlts;
+    }
+
+	/** Once this DFA has been built, need to verify that:
+	 *
+	 *  1. it's reduced
+	 *  2. all alts have an accept state
+	 *
+	 *  Elsewhere, in the NFA converter, we need to verify that:
+	 *
+	 *  3. alts i and j have disjoint lookahead if no sem preds
+	 *  4. if sem preds, nondeterministic alts must be sufficiently covered
+	 */
+	public void verify() {
+		if ( !probe.nonLLStarDecision ) { // avoid if non-LL(*)
+			doesStateReachAcceptState(startState);
+		}
+	}
+
+    /** figure out if this state eventually reaches an accept state and
+     *  modify the instance variable 'reduced' to indicate if we find
+     *  at least one state that cannot reach an accept state.  This implies
+     *  that the overall DFA is not reduced.  This algorithm should be
+     *  linear in the number of DFA states.
+     *
+     *  The algorithm also tracks which alternatives have no accept state,
+     *  indicating a nondeterminism.
+	 *
+	 *  Also computes whether the DFA is cyclic.
+	 *
+     *  TODO: I call getUniquelyPredicatedAlt too much; cache predicted alt
+     */
+    protected boolean doesStateReachAcceptState(DFAState d) {
+		if ( d.isAcceptState() ) {
+            // accept states have no edges emanating from them so we can return
+            d.setAcceptStateReachable(REACHABLE_YES);
+            // this alt is uniquely predicted, remove from nondeterministic list
+            int predicts = d.getUniquelyPredictedAlt();
+            unreachableAlts.remove(Utils.integer(predicts));
+            return true;
+        }
+
+        // avoid infinite loops
+        d.setAcceptStateReachable(REACHABLE_BUSY);
+
+        boolean anEdgeReachesAcceptState = false;
+        // Visit every transition, track if at least one edge reaches stop state
+		// Cannot terminate when we know this state reaches stop state since
+		// all transitions must be traversed to set status of each DFA state.
+		for (int i=0; i<d.getNumberOfTransitions(); i++) {
+            Transition t = d.transition(i);
+            DFAState edgeTarget = (DFAState)t.target;
+            int targetStatus = edgeTarget.getAcceptStateReachable();
+            if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
+                cyclic = true;
+                continue;
+            }
+            if ( targetStatus==REACHABLE_YES ) { // avoid unnecessary work
+                anEdgeReachesAcceptState = true;
+                continue;
+            }
+            if ( targetStatus==REACHABLE_NO ) {  // avoid unnecessary work
+                continue;
+            }
+			// target must be REACHABLE_UNKNOWN (i.e., unvisited)
+            if ( doesStateReachAcceptState(edgeTarget) ) {
+                anEdgeReachesAcceptState = true;
+                // have to keep looking so don't break loop
+                // must cover all states even if we find a path for this state
+            }
+        }
+        if ( anEdgeReachesAcceptState ) {
+            d.setAcceptStateReachable(REACHABLE_YES);
+        }
+        else {
+			/*
+			if ( d.getNumberOfTransitions()==0 ) {
+				probe.reportDanglingState(d);
+			}
+			*/
+            d.setAcceptStateReachable(REACHABLE_NO);
+			reduced = false;
+        }
+        return anEdgeReachesAcceptState;
+    }
+
+    public NFAState getNFADecisionStartState() {
+        return decisionNFAStartState;
+    }
+
+	public DFAState getAcceptState(int alt) {
+		return altToAcceptState[alt];
+	}
+
+	public void setAcceptState(int alt, DFAState acceptState) {
+		altToAcceptState[alt] = acceptState;
+	}
+
+	public String getDescription() {
+		return description;
+	}
+
+	public int getDecisionNumber() {
+        return decisionNFAStartState.getDecisionNumber();
+    }
+
+    /** What GrammarAST node (derived from the grammar) is this DFA
+     *  associated with?  It will point to the start of a block or
+     *  the loop back of a (...)+ block etc...
+     */
+    public GrammarAST getDecisionASTNode() {
+        return decisionNFAStartState.getAssociatedASTNode();
+    }
+
+    public boolean isGreedy() {
+		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decisionNumber);
+		String v = (String)blockAST.getOption("greedy");
+		if ( v!=null && v.equals("false") ) {
+			return false;
+		}
+        return true;
+    }
+
+    public DFAState newState() {
+        DFAState n = new DFAState(this);
+        n.stateNumber = stateCounter;
+        stateCounter++;
+		states.setSize(n.stateNumber+1);
+		states.set(n.stateNumber, n); // track state num to state
+        return n;
+    }
+
+	public int getNumberOfStates() {
+		if ( getUserMaxLookahead()>0 ) {
+			// if using fixed lookahead then uniqueSets not set
+			return states.size();
+		}
+		return numberOfStates;
+	}
+
+	public int getNumberOfAlts() {
+		return nAlts;
+	}
+
+	public boolean analysisAborted() {
+		return probe.analysisAborted();
+	}
+
+    protected void initAltRelatedInfo() {
+        unreachableAlts = new LinkedList();
+        for (int i = 1; i <= nAlts; i++) {
+            unreachableAlts.add(Utils.integer(i));
+        }
+		altToAcceptState = new DFAState[nAlts+1];
+    }
+
+	public String toString() {
+		FASerializer serializer = new FASerializer(nfa.grammar);
+		if ( startState==null ) {
+			return "";
+		}
+		return serializer.serialize(startState, false);
+	}
+
+	/** EOT (end of token) is a label that indicates when the DFA conversion
+	 *  algorithm would "fall off the end of a lexer rule".  It normally
+	 *  means the default clause.  So for ('a'..'z')+ you would see a DFA
+	 *  with a state that has a..z and EOT emanating from it.  a..z would
+	 *  jump to a state predicting alt 1 and EOT would jump to a state
+	 *  predicting alt 2 (the exit loop branch).  EOT implies anything other
+	 *  than a..z.  If for some reason, the set is "all char" such as with
+	 *  the wildcard '.', then EOT cannot match anything.  For example,
+	 *
+	 *     BLOCK : '{' (.)* '}'
+	 *
+	 *  consumes all char until EOF when greedy=true.  When all edges are
+	 *  combined for the DFA state after matching '}', you will find that
+	 *  it is all char.  The EOT transition has nothing to match and is
+	 *  unreachable.  The findNewDFAStatesAndAddDFATransitions() method
+	 *  must know to ignore the EOT, so we simply remove it from the
+	 *  reachable labels.  Later analysis will find that the exit branch
+	 *  is not predicted by anything.  For greedy=false, we leave only
+	 *  the EOT label indicating that the DFA should stop immediately
+	 *  and predict the exit branch. The reachable labels are often a
+	 *  set of disjoint values like: [<EOT>, 42, {0..41, 43..65534}]
+	 *  due to DFA conversion so must construct a pure set to see if
+	 *  it is same as Label.ALLCHAR.
+	 *
+	 *  Only do this for Lexers.
+	 *
+	 *  If EOT coexists with ALLCHAR:
+	 *  1. If not greedy, modify the labels parameter to be EOT
+	 *  2. If greedy, remove EOT from the labels set
+	protected boolean reachableLabelsEOTCoexistsWithAllChar(OrderedHashSet labels)
+	{
+		Label eot = new Label(Label.EOT);
+		if ( !labels.containsKey(eot) ) {
+			return false;
+		}
+		System.out.println("### contains EOT");
+		boolean containsAllChar = false;
+		IntervalSet completeVocab = new IntervalSet();
+		int n = labels.size();
+		for (int i=0; i<n; i++) {
+			Label rl = (Label)labels.get(i);
+			if ( !rl.equals(eot) ) {
+				completeVocab.addAll(rl.getSet());
+			}
+		}
+		System.out.println("completeVocab="+completeVocab);
+		if ( completeVocab.equals(Label.ALLCHAR) ) {
+			System.out.println("all char");
+			containsAllChar = true;
+		}
+		return containsAllChar;
+	}
+	 */
+}
+
diff --git a/src/org/antlr/analysis/DFAOptimizer.java b/src/org/antlr/analysis/DFAOptimizer.java
new file mode 100644
index 0000000..8d865be
--- /dev/null
+++ b/src/org/antlr/analysis/DFAOptimizer.java
@@ -0,0 +1,263 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.tool.Grammar;
+import org.antlr.misc.Utils;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/** A module to perform optimizations on DFAs.
+ *
+ *  I could more easily (and more quickly) do some optimizations (such as
+ *  PRUNE_EBNF_EXIT_BRANCHES) during DFA construction, but then it
+ *  messes up the determinism checking.  For example, it looks like
+ *  loop exit branches are unreachable if you prune exit branches
+ *  during DFA construction and before determinism checks.
+ *
+ *  In general, ANTLR's NFA->DFA->codegen pipeline seems very robust
+ *  to me which I attribute to a uniform and consistent set of data
+ *  structures.  Regardless of what I want to "say"/implement, I do so
+ *  within the confines of, for example, a DFA.  The code generator
+ *  can then just generate code--it doesn't have to do much thinking.
+ *  Putting optimizations in the code gen code really starts to make
+ *  it a spagetti factory (uh oh, now I'm hungry!).  The pipeline is
+ *  very testable; each stage has well defined input/output pairs.
+ *
+ *  ### Optimization: PRUNE_EBNF_EXIT_BRANCHES
+ *
+ *  There is no need to test EBNF block exit branches.  Not only is it
+ *  an unneeded computation, but counter-intuitively, you actually get
+ *  better errors. You can report an error at the missing or extra
+ *  token rather than as soon as you've figured out you will fail.
+ *
+ *  Imagine optional block "( DOT CLASS )? SEMI".  ANTLR generates:
+ *
+ *  int alt=0;
+ *  if ( input.LA(1)==DOT ) {
+ *      alt=1;
+ *  }
+ *  else if ( input.LA(1)==SEMI ) {
+ *      alt=2;
+ *  }
+ *
+ *  Clearly, since Parser.match() will ultimately find the error, we
+ *  do not want to report an error nor do we want to bother testing
+ *  lookahead against what follows the (...)?  We want to generate
+ *  simply "should I enter the subrule?":
+ *
+ *  int alt=2;
+ *  if ( input.LA(1)==DOT ) {
+ *      alt=1;
+ *  }
+ *
+ *  NOTE 1. Greedy loops cannot be optimized in this way.  For example,
+ *  "(greedy=false:'x'|.)* '\n'".  You specifically need the exit branch
+ *  to tell you when to terminate the loop as the same input actually
+ *  predicts one of the alts (i.e., staying in the loop).
+ *
+ *  NOTE 2.  I do not optimize cyclic DFAs at the moment as it doesn't
+ *  seem to work. ;)  I'll have to investigate later to see what work I
+ *  can do on cyclic DFAs to make them have fewer edges.  Might have
+ *  something to do with the EOT token.
+ *
+ *  ### PRUNE_SUPERFLUOUS_EOT_EDGES
+ *
+ *  When a token is a subset of another such as the following rules, ANTLR
+ *  quietly assumes the first token to resolve the ambiguity.
+ *
+ *  EQ			: '=' ;
+ *  ASSIGNOP	: '=' | '+=' ;
+ *
+ *  It can yield states that have only a single edge on EOT to an accept
+ *  state.  This is a waste and messes up my code generation. ;)  If
+ *  Tokens rule DFA goes
+ *
+ * 		s0 -'='-> s3 -EOT-> s5 (accept)
+ *
+ *  then s5 should be pruned and s3 should be made an accept.  Do NOT do this
+ *  for keyword versus ID as the state with EOT edge emanating from it will
+ *  also have another edge.
+ *
+ *  ### Optimization: COLLAPSE_ALL_INCIDENT_EDGES
+ *
+ *  Done during DFA construction.  See method addTransition() in
+ *  NFAToDFAConverter.
+ *
+ *  ### Optimization: MERGE_STOP_STATES
+ *
+ *  Done during DFA construction.  See addDFAState() in NFAToDFAConverter.
+ */
+public class DFAOptimizer {
+	public static boolean PRUNE_EBNF_EXIT_BRANCHES = true;
+	public static boolean PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES = true;
+	public static boolean COLLAPSE_ALL_PARALLEL_EDGES = true;
+	public static boolean MERGE_STOP_STATES = true;
+
+	/** Used by DFA state machine generator to avoid infinite recursion
+	 *  resulting from cycles int the DFA.  This is a set of int state #s.
+	 */
+	protected Set visited = new HashSet();
+
+    protected Grammar grammar;
+
+    public DFAOptimizer(Grammar grammar) {
+		this.grammar = grammar;
+    }
+
+	public void optimize() {
+		// optimize each DFA in this grammar
+		for (int decisionNumber=1;
+			 decisionNumber<=grammar.getNumberOfDecisions();
+			 decisionNumber++)
+		{
+			DFA dfa = grammar.getLookaheadDFA(decisionNumber);
+			optimize(dfa);
+		}
+	}
+
+	protected void optimize(DFA dfa) {
+		if ( dfa==null ) {
+			return; // nothing to do
+		}
+		/*
+		System.out.println("Optimize DFA "+dfa.decisionNFAStartState.decisionNumber+
+						   " num states="+dfa.getNumberOfStates());
+		*/
+		//long start = System.currentTimeMillis();
+		if ( PRUNE_EBNF_EXIT_BRANCHES && dfa.canInlineDecision() ) {
+			visited.clear();
+			int decisionType =
+				dfa.getNFADecisionStartState().decisionStateType;
+			if ( dfa.isGreedy() &&
+				 (decisionType==NFAState.OPTIONAL_BLOCK_START ||
+				 decisionType==NFAState.LOOPBACK) )
+			{
+				optimizeExitBranches(dfa.startState);
+			}
+		}
+		// If the Tokens rule has syntactically ambiguous rules, try to prune
+		if ( PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES &&
+			 dfa.isTokensRuleDecision() &&
+			 dfa.probe.stateToSyntacticallyAmbiguousTokensRuleAltsMap.size()>0 )
+		{
+			visited.clear();
+			optimizeEOTBranches(dfa.startState);
+		}
+
+		/* ack...code gen needs this, cannot optimize
+		visited.clear();
+		unlinkUnneededStateData(dfa.startState);
+		*/
+		//long stop = System.currentTimeMillis();
+		//System.out.println("minimized in "+(int)(stop-start)+" ms");
+    }
+
+	protected void optimizeExitBranches(DFAState d) {
+		Integer sI = Utils.integer(d.stateNumber);
+		if ( visited.contains(sI) ) {
+			return; // already visited
+		}
+		visited.add(sI);
+		int nAlts = d.dfa.getNumberOfAlts();
+		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) d.transition(i);
+			DFAState edgeTarget = ((DFAState)edge.target);
+			/*
+			System.out.println(d.stateNumber+"-"+
+							   edge.label.toString(d.dfa.nfa.grammar)+"->"+
+							   edgeTarget.stateNumber);
+			*/
+			// if target is an accept state and that alt is the exit alt
+			if ( edgeTarget.isAcceptState() &&
+				edgeTarget.getUniquelyPredictedAlt()==nAlts)
+			{
+				/*
+				System.out.println("ignoring transition "+i+" to max alt "+
+					d.dfa.getNumberOfAlts());
+				*/
+				d.removeTransition(i);
+				i--; // back up one so that i++ of loop iteration stays within bounds
+			}
+			optimizeExitBranches(edgeTarget);
+		}
+	}
+
+	protected void optimizeEOTBranches(DFAState d) {
+		Integer sI = Utils.integer(d.stateNumber);
+		if ( visited.contains(sI) ) {
+			return; // already visited
+		}
+		visited.add(sI);
+		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) d.transition(i);
+			DFAState edgeTarget = ((DFAState)edge.target);
+			/*
+			System.out.println(d.stateNumber+"-"+
+							   edge.label.toString(d.dfa.nfa.grammar)+"->"+
+							   edgeTarget.stateNumber);
+			*/
+			// if only one edge coming out, it is EOT, and target is accept prune
+			if ( PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES &&
+				edgeTarget.isAcceptState() &&
+				d.getNumberOfTransitions()==1 &&
+				edge.label.isAtom() &&
+				edge.label.getAtom()==Label.EOT )
+			{
+				//System.out.println("state "+d+" can be pruned");
+				// remove the superfluous EOT edge
+				d.removeTransition(i);
+				d.setAcceptState(true); // make it an accept state
+				// force it to uniquely predict the originally predicted state
+				d.cachedUniquelyPredicatedAlt =
+					edgeTarget.getUniquelyPredictedAlt();
+				i--; // back up one so that i++ of loop iteration stays within bounds
+			}
+			optimizeEOTBranches(edgeTarget);
+		}
+	}
+
+	/** Walk DFA states, unlinking the nfa configs and whatever else I
+	 *  can to reduce memory footprint.
+	protected void unlinkUnneededStateData(DFAState d) {
+		Integer sI = Utils.integer(d.stateNumber);
+		if ( visited.contains(sI) ) {
+			return; // already visited
+		}
+		visited.add(sI);
+		d.nfaConfigurations = null;
+		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) d.transition(i);
+			DFAState edgeTarget = ((DFAState)edge.target);
+			unlinkUnneededStateData(edgeTarget);
+		}
+	}
+	 */
+
+}
diff --git a/src/org/antlr/analysis/DFAState.java b/src/org/antlr/analysis/DFAState.java
new file mode 100644
index 0000000..115f471
--- /dev/null
+++ b/src/org/antlr/analysis/DFAState.java
@@ -0,0 +1,811 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.misc.IntSet;
+import org.antlr.misc.OrderedHashSet;
+import org.antlr.misc.Utils;
+import org.antlr.tool.Grammar;
+
+import java.util.*;
+
+/** A DFA state represents a set of possible NFA configurations.
+ *  As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
+ *  to keep track of all possible states the NFA can be in after
+ *  reading each input symbol.  That is to say, after reading
+ *  input a1a2..an, the DFA is in a state that represents the
+ *  subset T of the states of the NFA that are reachable from the
+ *  NFA's start state along some path labeled a1a2..an."
+ *  In conventional NFA->DFA conversion, therefore, the subset T
+ *  would be a bitset representing the set of states the
+ *  NFA could be in.  We need to track the alt predicted by each
+ *  state as well, however.  More importantly, we need to maintain
+ *  a stack of states, tracking the closure operations as they
+ *  jump from rule to rule, emulating rule invocations (method calls).
+ *  Recall that NFAs do not normally have a stack like a pushdown-machine
+ *  so I have to add one to simulate the proper lookahead sequences for
+ *  the underlying LL grammar from which the NFA was derived.
+ *
+ *  I use a list of NFAConfiguration objects.  An NFAConfiguration
+ *  is both a state (ala normal conversion) and an NFAContext describing
+ *  the chain of rules (if any) followed to arrive at that state.  There
+ *  is also the semantic context, which is the "set" of predicates found
+ *  on the path to this configuration.
+ *
+ *  A DFA state may have multiple references to a particular state,
+ *  but with different NFAContexts (with same or different alts)
+ *  meaning that state was reached via a different set of rule invocations.
+ */
+public class DFAState extends State {
+    public static final int INITIAL_NUM_TRANSITIONS = 4;
+	public static final int PREDICTED_ALT_UNSET = NFA.INVALID_ALT_NUMBER-1;
+
+    /** We are part of what DFA?  Use this ref to get access to the
+     *  context trees for an alt.
+     */
+    public DFA dfa;
+
+    /** Track the transitions emanating from this DFA state.  The List
+     *  elements are Transition objects.
+     */
+    protected List transitions = new ArrayList(INITIAL_NUM_TRANSITIONS);
+
+	/** When doing an acyclic DFA, this is the number of lookahead symbols
+	 *  consumed to reach this state.  This value may be nonzero for most
+	 *  dfa states, but it is only a valid value if the user has specified
+	 *  a max fixed lookahead.
+	 */
+    protected int k;
+
+    /** The NFA->DFA algorithm may terminate leaving some states
+     *  without a path to an accept state, implying that upon certain
+     *  input, the decision is not deterministic--no decision about
+     *  predicting a unique alternative can be made.  Recall that an
+     *  accept state is one in which a unique alternative is predicted.
+     */
+    protected int acceptStateReachable = DFA.REACHABLE_UNKNOWN;
+
+    /** Rather than recheck every NFA configuration in a DFA state (after
+     *  resolving) in findNewDFAStatesAndAddDFATransitions just check
+     *  this boolean.  Saves a linear walk perhaps DFA state creation.
+     *  Every little bit helps.
+     */
+    protected boolean resolvedWithPredicates = false;
+
+	/** If a closure operation finds that we tried to invoke the same
+	 *  rule too many times (stack would grow beyond a threshold), it
+	 *  marks the state has aborted and notifies the DecisionProbe.
+	 */
+	protected boolean abortedDueToRecursionOverflow = false;
+
+	/** If we detect recursion on more than one alt, decision is non-LL(*),
+	 *  but try to isolate it to only those states whose closure operations
+	 *  detect recursion.  There may be other alts that are cool:
+	 *
+	 *  a : recur '.'
+	 *    | recur ';'
+	 *    | X Y  // LL(2) decision; don't abort and use k=1 plus backtracking
+	 *    | X Z
+	 *    ;
+	 */
+	protected boolean abortedDueToMultipleRecursiveAlts = false;
+
+	/** Build up the hash code for this state as NFA configurations
+     *  are added as it's monotonically increasing list of configurations.
+     */
+    protected int cachedHashCode;
+
+	protected int cachedUniquelyPredicatedAlt = PREDICTED_ALT_UNSET;
+
+    /** The set of NFA configurations (state,alt,context) for this DFA state */
+    protected Set nfaConfigurations = new HashSet();
+
+    /** Used to prevent the closure operation from looping to itself and
+     *  hence looping forever.  Sensitive to the NFA state, the alt, and
+     *  the context.  This just the nfa config set because we want to
+	 *  prevent closures only on states contributed by closure not reach
+	 *  operations.
+     */
+	protected Set closureBusy = new HashSet();
+
+	/** As this state is constructed (i.e., as NFA states are added), we
+     *  can easily check for non-epsilon transitions because the only
+     *  transition that could be a valid label is transition(0).  When we
+     *  process this node eventually, we'll have to walk all states looking
+     *  for all possible transitions.  That is of the order: size(label space)
+     *  times size(nfa states), which can be pretty damn big.  It's better
+     *  to simply track possible labels.
+     *  This is type List<Label>.
+     */
+    protected OrderedHashSet reachableLabels = new OrderedHashSet();
+
+    public DFAState(DFA dfa) {
+        this.dfa = dfa;
+    }
+
+    public Transition transition(int i) {
+        return (Transition)transitions.get(i);
+    }
+
+    public int getNumberOfTransitions() {
+        return transitions.size();
+    }
+
+    public void addTransition(Transition t) {
+        transitions.add(t);
+    }
+
+	/** Add a transition from this state to target with label.  Return
+	 *  the transition number from 0..n-1.
+	 */
+    public int addTransition(DFAState target, Label label) {
+        transitions.add( new Transition(label, target) );
+		return transitions.size()-1;
+    }
+
+    public Transition getTransition(int trans) {
+        return (Transition)transitions.get(trans);
+    }
+
+	public void removeTransition(int trans) {
+		transitions.remove(trans);
+	}
+
+    /** Add an NFA configuration to this DFA node.  Add uniquely
+     *  an NFA state/alt/syntactic&semantic context (chain of invoking state(s)
+     *  and semantic predicate contexts).
+     *
+     *  I don't see how there could be two configurations with same
+     *  state|alt|synCtx and different semantic contexts because the
+     *  semantic contexts are computed along the path to a particular state
+     *  so those two configurations would have to have the same predicate.
+     *  Nonetheless, the addition of configurations is unique on all
+     *  configuration info.  I guess I'm saying that syntactic context
+     *  implies semantic context as the latter is computed according to the
+     *  former.
+     *
+     *  As we add configurations to this DFA state, track the set of all possible
+     *  transition labels so we can simply walk it later rather than doing a
+     *  loop over all possible labels in the NFA.
+     */
+    public void addNFAConfiguration(NFAState state, NFAConfiguration c) {
+		if ( nfaConfigurations.contains(c) ) {
+            return;
+        }
+
+        nfaConfigurations.add(c);
+
+        // update hashCode; for some reason using context.hashCode() also
+        // makes the GC take like 70% of the CPU and is slow!
+        cachedHashCode += c.state + c.alt;
+
+        // update reachableLabels
+        if ( state.transition(0)!=null ) {
+            Label label = state.transition(0).label;
+            if ( !(label.isEpsilon()||label.isSemanticPredicate()) ) {
+                if ( state.transition(1)==null ) {
+                    c.singleAtomTransitionEmanating = true;
+                }
+                addReachableLabel(label);
+            }
+        }
+    }
+
+	public void addNFAConfiguration(NFAState state, int alt, NFAContext context, SemanticContext semanticContext) {
+		NFAConfiguration c = new NFAConfiguration(state.stateNumber,
+												  alt,
+												  context,
+												  semanticContext);
+		addNFAConfiguration(state, c);
+	}
+
+	/** Add label uniquely and disjointly; intersection with
+     *  another set or int/char forces breaking up the set(s).
+     *
+     *  Example, if reachable list of labels is [a..z, {k,9}, 0..9],
+     *  the disjoint list will be [{a..j,l..z}, k, 9, 0..8].
+     *
+     *  As we add NFA configurations to a DFA state, we might as well track
+     *  the set of all possible transition labels to make the DFA conversion
+     *  more efficient.  W/o the reachable labels, we'd need to check the
+     *  whole vocabulary space (could be 0..\uFFFF)!  The problem is that
+     *  labels can be sets, which may overlap with int labels or other sets.
+     *  As we need a deterministic set of transitions from any
+     *  state in the DFA, we must make the reachable labels set disjoint.
+     *  This operation amounts to finding the character classes for this
+     *  DFA state whereas with tools like flex, that need to generate a
+     *  homogeneous DFA, must compute char classes across all states.
+     *  We are going to generate DFAs with heterogeneous states so we
+     *  only care that the set of transitions out of a single state are
+     *  unique. :)
+     *
+     *  The idea for adding a new set, t, is to look for overlap with the
+     *  elements of existing list s.  Upon overlap, replace
+     *  existing set s[i] with two new disjoint sets, s[i]-t and s[i]&t.
+     *  (if s[i]-t is nil, don't add).  The remainder is t-s[i], which is
+     *  what you want to add to the set minus what was already there.  The
+     *  remainder must then be compared against the i+1..n elements in s
+     *  looking for another collision.  Each collision results in a smaller
+     *  and smaller remainder.  Stop when you run out of s elements or
+     *  remainder goes to nil.  If remainder is non nil when you run out of
+     *  s elements, then add remainder to the end.
+     *
+     *  Single element labels are treated as sets to make the code uniform.
+     */
+    protected void addReachableLabel(Label label) {
+        /*
+		System.out.println("addReachableLabel to state "+dfa.decisionNumber+"."+stateNumber+": "+label.getSet().toString(dfa.nfa.grammar));
+		System.out.println("start of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
+				"reachableLabels="+reachableLabels.toString());
+        */
+		if ( reachableLabels.contains(label) ) { // exact label present
+            return;
+        }
+        IntSet t = label.getSet();
+        IntSet remainder = t; // remainder starts out as whole set to add
+        int n = reachableLabels.size(); // only look at initial elements
+        // walk the existing list looking for the collision
+        for (int i=0; i<n; i++) {
+            Label rl = (Label)reachableLabels.get(i);
+            /*
+            if ( label.equals(rl) ) {
+                // OPTIMIZATION:
+                // exact label already here, just return; previous addition
+                // would have made everything unique/disjoint
+                return;
+            }
+            */
+            IntSet s_i = rl.getSet();
+            IntSet intersection = s_i.and(t);
+            /*
+			System.out.println("comparing ["+i+"]: "+label.toString(dfa.nfa.grammar)+" & "+
+                    rl.toString(dfa.nfa.grammar)+"="+
+                    intersection.toString(dfa.nfa.grammar));
+            */
+			if ( intersection.isNil() ) {
+                continue;
+            }
+
+            // For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
+            // (ignoring s_i-t if nil; don't put in list)
+
+            // Replace existing s_i with intersection since we
+            // know that will always be a non nil character class
+            reachableLabels.set(i, new Label(intersection));
+
+            // Compute s_i-t to see what is in current set and not in incoming
+            IntSet existingMinusNewElements = s_i.subtract(t);
+			//System.out.println(s_i+"-"+t+"="+existingMinusNewElements);
+            if ( !existingMinusNewElements.isNil() ) {
+                // found a new character class, add to the end (doesn't affect
+                // outer loop duration due to n computation a priori.
+                Label newLabel = new Label(existingMinusNewElements);
+                reachableLabels.add(newLabel);
+            }
+
+			/*
+            System.out.println("after collision, " +
+                    "reachableLabels="+reachableLabels.toString());
+					*/
+
+            // anything left to add to the reachableLabels?
+            remainder = t.subtract(s_i);
+            if ( remainder.isNil() ) {
+                break; // nothing left to add to set.  done!
+            }
+
+            t = remainder;
+        }
+        if ( !remainder.isNil() ) {
+			/*
+			System.out.println("before add remainder to state "+dfa.decisionNumber+"."+stateNumber+": " +
+					"reachableLabels="+reachableLabels.toString());
+			System.out.println("remainder state "+dfa.decisionNumber+"."+stateNumber+": "+remainder.toString(dfa.nfa.grammar));
+            */
+			Label newLabel = new Label(remainder);
+            reachableLabels.add(newLabel);
+        }
+		/*
+		System.out.println("#END of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
+				"reachableLabels="+reachableLabels.toString());
+				*/
+    }
+
+    public OrderedHashSet getReachableLabels() {
+        return reachableLabels;
+    }
+
+    public Set getNFAConfigurations() {
+        return this.nfaConfigurations;
+    }
+
+    public void setNFAConfigurations(Set configs) {
+        this.nfaConfigurations = configs;
+    }
+
+    /** A decent hash for a DFA state is the sum of the NFA state/alt pairs.
+     *  This is used when we add DFAState objects to the DFA.states Map and
+     *  when we compare DFA states.  Computed in addNFAConfiguration()
+     */
+    public int hashCode() {
+        return cachedHashCode;
+    }
+
+    /** Two DFAStates are equal if their NFA configuration sets are the
+	 *  same. This method is used to see if a DFA state already exists.
+	 *
+     *  Because the number of alternatives and number of NFA configurations are
+     *  finite, there is a finite number of DFA states that can be processed.
+     *  This is necessary to show that the algorithm terminates.
+	 *
+	 *  Cannot test the state numbers here because in DFA.addState we need
+	 *  to know if any other state exists that has this exact set of NFA
+	 *  configurations.  The DFAState state number is irrelevant.
+     */
+    public boolean equals(Object o) {
+        DFAState other = (DFAState)o;
+        if ( o==null ) {
+            return false;
+        }
+        if ( this.hashCode()!=other.hashCode() ) {
+            return false;
+        }
+		// if not same number of NFA configuraitons, cannot be same state
+		if ( this.nfaConfigurations.size() != other.nfaConfigurations.size() ) {
+			return false;
+		}
+
+		// compare set of NFA configurations in this set with other
+        Iterator iter = this.nfaConfigurations.iterator();
+        while (iter.hasNext()) {
+            NFAConfiguration myConfig = (NFAConfiguration) iter.next();
+			if ( !other.nfaConfigurations.contains(myConfig) ) {
+				return false;
+			}
+        }
+        return true;
+    }
+
+    /** Walk each configuration and if they are all the same alt, return
+     *  that alt else return NFA.INVALID_ALT_NUMBER.  Ignore resolved
+     *  configurations, but don't ignore resolveWithPredicate configs
+     *  because this state should not be an accept state.  We need to add
+     *  this to the work list and then have semantic predicate edges
+     *  emanating from it.
+     */
+    public int getUniquelyPredictedAlt() {
+		if ( cachedUniquelyPredicatedAlt!=PREDICTED_ALT_UNSET ) {
+			return cachedUniquelyPredicatedAlt;
+		}
+        int alt = NFA.INVALID_ALT_NUMBER;
+        Iterator iter = nfaConfigurations.iterator();
+        NFAConfiguration configuration;
+        while (iter.hasNext()) {
+            configuration = (NFAConfiguration) iter.next();
+            // ignore anything we resolved; predicates will still result
+            // in transitions out of this state, so must count those
+            // configurations; i.e., don't ignore resolveWithPredicate configs
+            if ( configuration.resolved ) {
+                continue;
+            }
+            if ( alt==NFA.INVALID_ALT_NUMBER ) {
+                alt = configuration.alt; // found first nonresolved alt
+            }
+            else if ( configuration.alt!=alt ) {
+                return NFA.INVALID_ALT_NUMBER;
+            }
+        }
+		this.cachedUniquelyPredicatedAlt = alt;
+        return alt;
+    }
+
+	/** Return the uniquely mentioned alt from the NFA configurations;
+	 *  Ignore the resolved bit etc...  Return INVALID_ALT_NUMBER
+	 *  if there is more than one alt mentioned.
+	 */ 
+	public int getUniqueAlt() {
+		int alt = NFA.INVALID_ALT_NUMBER;
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			if ( alt==NFA.INVALID_ALT_NUMBER ) {
+				alt = configuration.alt; // found first alt
+			}
+			else if ( configuration.alt!=alt ) {
+				return NFA.INVALID_ALT_NUMBER;
+			}
+		}
+		return alt;
+	}
+
+	/** When more than one alternative can match the same input, the first
+	 *  alternative is chosen to resolve the conflict.  The other alts
+	 *  are "turned off" by setting the "resolved" flag in the NFA
+	 *  configurations.  Return the set of disabled alternatives.  For
+	 *
+	 *  a : A | A | A ;
+	 *
+	 *  this method returns {2,3} as disabled.  This does not mean that
+	 *  the alternative is totally unreachable, it just means that for this
+	 *  DFA state, that alt is disabled.  There may be other accept states
+	 *  for that alt.
+	 */
+	public Set getDisabledAlternatives() {
+		Set disabled = new LinkedHashSet();
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			if ( configuration.resolved ) {
+				disabled.add(Utils.integer(configuration.alt));
+			}
+		}
+		return disabled;
+	}
+
+	/**
+	public int getNumberOfEOTNFAStates() {
+		int n = 0;
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			NFAState s = dfa.nfa.getState(configuration.state);
+			if ( s.isEOTState() ) {
+				n++;
+			}
+		}
+		return n;
+	}
+    */
+	
+	protected Set getNonDeterministicAlts() {
+		int user_k = dfa.getUserMaxLookahead();
+		if ( user_k>0 && user_k==k ) {
+			// if fixed lookahead, then more than 1 alt is a nondeterminism
+			// if we have hit the max lookahead
+			return getAltSet();
+		}
+		else if ( abortedDueToMultipleRecursiveAlts || abortedDueToRecursionOverflow ) {
+			// if we had to abort for non-LL(*) state assume all alts are a problem
+			return getAltSet();
+		}
+		else {
+			return getConflictingAlts();
+		}
+	}
+
+    /** Walk each NFA configuration in this DFA state looking for a conflict
+     *  where (s|i|ctx) and (s|j|ctx) exist, indicating that state s with
+     *  context conflicting ctx predicts alts i and j.  Return an Integer set
+	 *  of the alternative numbers that conflict.  Two contexts conflict if
+	 *  they are equal or one is a stack suffix of the other or one is
+	 *  the empty context.
+	 *
+     *  Use a hash table to record the lists of configs for each state
+	 *  as they are encountered.  We need only consider states for which
+	 *  there is more than one configuration.  The configurations' predicted
+	 *  alt must be different or must have different contexts to avoid a
+	 *  conflict.
+	 *
+	 *  Don't report conflicts for DFA states that have conflicting Tokens
+	 *  rule NFA states; they will be resolved in favor of the first rule.
+     */
+    protected Set getConflictingAlts() {
+		// TODO this is called multiple times: cache result?
+		//System.out.println("getNondetAlts for DFA state "+stateNumber);
+ 		Set nondeterministicAlts = new HashSet();
+
+		// If only 1 NFA conf then no way it can be nondeterministic;
+		// save the overhead.  There are many o-a->o NFA transitions
+		// and so we save a hash map and iterator creation for each
+		// state.
+		if ( nfaConfigurations.size()<=1 ) {
+			return null;
+		}
+
+		// First get a list of configurations for each state.
+		// Most of the time, each state will have one associated configuration
+		Iterator iter = nfaConfigurations.iterator();
+		Map stateToConfigListMap = new HashMap();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			Integer stateI = Utils.integer(configuration.state);
+			List prevConfigs = (List)stateToConfigListMap.get(stateI);
+			if ( prevConfigs==null ) {
+				prevConfigs = new ArrayList();
+				stateToConfigListMap.put(stateI, prevConfigs);
+			}
+			prevConfigs.add(configuration);
+		}
+
+		// potential conflicts are states with > 1 configuration and diff alts
+		Set states = stateToConfigListMap.keySet();
+		int numPotentialConflicts = 0;
+		for (Iterator it = states.iterator(); it.hasNext();) {
+			Integer stateI = (Integer) it.next();
+			boolean thisStateHasPotentialProblem = false;
+			List configsForState = (List)stateToConfigListMap.get(stateI);
+			int alt=0;
+			for (int i = 0; i < configsForState.size() && configsForState.size()>1 ; i++) {
+				NFAConfiguration c = (NFAConfiguration) configsForState.get(i);
+				if ( alt==0 ) {
+					alt = c.alt;
+				}
+				else if ( c.alt!=alt ) {
+					/*
+					System.out.println("potential conflict in state "+stateI+
+									   " configs: "+configsForState);
+					*/
+					// 11/28/2005: don't report closures that pinch back
+					// together in Tokens rule.  We want to silently resolve
+					// to the first token definition ala lex/flex by ignoring
+					// these conflicts.
+					if ( dfa.nfa.grammar.type!=Grammar.LEXER ||
+						 !dfa.decisionNFAStartState.enclosingRule.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
+					{
+						numPotentialConflicts++;
+						thisStateHasPotentialProblem = true;
+					}
+				}
+			}
+			if ( !thisStateHasPotentialProblem ) {
+				// remove NFA state's configurations from
+				// further checking; no issues with it
+				// (can't remove as it's concurrent modification; set to null)
+				stateToConfigListMap.put(stateI, null);
+			}
+		}
+
+		// a fast check for potential issues; most states have none
+		if ( numPotentialConflicts==0 ) {
+			return null;
+		}
+
+		// we have a potential problem, so now go through config lists again
+		// looking for different alts (only states with potential issues
+		// are left in the states set).  Now we will check context.
+		// For example, the list of configs for NFA state 3 in some DFA
+		// state might be:
+		//   [3|2|[28 18 $], 3|1|[28 $], 3|1, 3|2]
+		// I want to create a map from context to alts looking for overlap:
+		//   [28 18 $] -> 2
+		//   [28 $] -> 1
+		//   [$] -> 1,2
+		// Indeed a conflict exists as same state 3, same context [$], predicts
+		// alts 1 and 2.
+		// walk each state with potential conflicting configurations
+		for (Iterator it = states.iterator(); it.hasNext();) {
+			Integer stateI = (Integer) it.next();
+			List configsForState = (List)stateToConfigListMap.get(stateI);
+			// compare each configuration pair s, t to ensure:
+			// s.ctx different than t.ctx if s.alt != t.alt
+			for (int i = 0; configsForState!=null && i < configsForState.size(); i++) {
+				NFAConfiguration s = (NFAConfiguration) configsForState.get(i);
+				for (int j = i+1; j < configsForState.size(); j++) {
+					NFAConfiguration t = (NFAConfiguration)configsForState.get(j);
+					// conflicts means s.ctx==t.ctx or s.ctx is a stack
+					// suffix of t.ctx or vice versa (if alts differ).
+					// Also a conflict if s.ctx or t.ctx is empty
+					if ( s.alt != t.alt && s.context.conflictsWith(t.context) ) {
+						nondeterministicAlts.add(Utils.integer(s.alt));
+						nondeterministicAlts.add(Utils.integer(t.alt));
+					}
+				}
+			}
+		}
+
+		if ( nondeterministicAlts.size()==0 ) {
+			return null;
+		}
+        return nondeterministicAlts;
+    }
+
+	/** Get the set of all alts mentioned by all NFA configurations in this
+	 *  DFA state.
+	 */
+	public Set getAltSet() {
+		Set alts = new HashSet();
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			alts.add(Utils.integer(configuration.alt));
+		}
+		if ( alts.size()==0 ) {
+			return null;
+		}
+		return alts;
+	}
+
+	/** Get the set of all states mentioned by all NFA configurations in this
+	 *  DFA state associated with alt.
+	 */
+	public Set getNFAStatesForAlt(int alt) {
+		Set alts = new HashSet();
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			if ( configuration.alt == alt ) {
+				alts.add(Utils.integer(configuration.state));
+			}
+		}
+		return alts;
+	}
+
+	/** For gated productions, we need a list of all predicates for the
+	 *  target of an edge so we can gate the edge based upon the predicates
+	 *  associated with taking that path (if any).
+	 *
+	 *  experimental 11/29/2005
+	 *
+	public Set getGatedPredicatesInNFAConfigurations() {
+		Set preds = new HashSet();
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			if ( configuration.semanticContext.isGated() ) {
+				preds.add(configuration.semanticContext);
+			}
+		}
+		if ( preds.size()==0 ) {
+			return null;
+		}
+		return preds;
+	}
+	 */
+
+	public Set getSyntacticPredicatesInNFAConfigurations() {
+		Set synpreds = new HashSet();
+		Iterator iter = nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			SemanticContext gatedPredExpr =
+				configuration.semanticContext.getGatedPredicateContext();
+			// if this is a manual syn pred (gated and syn pred), add
+			if ( gatedPredExpr!=null &&
+				 configuration.semanticContext.isSyntacticPredicate() )
+			{
+				synpreds.add(configuration.semanticContext);
+			}
+		}
+		if ( synpreds.size()==0 ) {
+			return null;
+		}
+		return synpreds;
+	}
+
+	/** For gated productions, we need an OR'd list of all predicates for the
+	 *  target of an edge so we can gate the edge based upon the predicates
+	 *  associated with taking that path (if any).
+	 *
+	 *  For syntactic predicates, we only want to generate predicate
+	 *  evaluations as it transitions to an accept state; waste to
+	 *  do it earlier.  So, only add gated preds derived from manually-
+	 *  specified syntactic predicates if this is an accept state.
+	 *
+	 *  Also, since configurations w/o gated predicates are like true
+	 *  gated predicates, finding a configuration whose alt has no gated
+	 *  predicate implies we should evaluate the predicate to true. This
+	 *  means the whole edge has to be ungated. Consider:
+	 *
+	 *	 X : ('a' | {p}?=> 'a')
+	 *	   | 'a' 'b'
+	 *	   ;
+	 *
+	 *  Here, you 'a' gets you from s0 to s1 but you can't test p because
+	 *  plain 'a' is ok.  It's also ok for starting alt 2.  Hence, you can't
+	 *  test p.  Even on the edge going to accept state for alt 1 of X, you
+	 *  can't test p.  You can get to the same place with and w/o the context.
+	 *  Therefore, it is never ok to test p in this situation. 
+	 *
+	 *  TODO: cache this as it's called a lot; or at least set bit if >1 present in state
+	 */
+	public SemanticContext getGatedPredicatesInNFAConfigurations() {
+		Iterator iter = nfaConfigurations.iterator();
+		SemanticContext unionOfPredicatesFromAllAlts = null;
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			SemanticContext gatedPredExpr =
+				configuration.semanticContext.getGatedPredicateContext();
+			if ( gatedPredExpr==null ) {
+				// if we ever find a configuration w/o a gated predicate
+				// (even if it's a nongated predicate), we cannot gate
+				// the indident edges.
+				return null;
+			}
+			else if ( acceptState || !configuration.semanticContext.isSyntacticPredicate() ) {
+				// at this point we have a gated predicate and, due to elseif,
+				// we know it's an accept and not a syn pred.  In this case,
+				// it's safe to add the gated predicate to the union.  We
+				// only want to add syn preds if it's an accept state.  Other
+				// gated preds can be used with edges leading to accept states.
+				if ( unionOfPredicatesFromAllAlts==null ) {
+					unionOfPredicatesFromAllAlts = gatedPredExpr;
+				}
+				else {
+					unionOfPredicatesFromAllAlts =
+						SemanticContext.or(unionOfPredicatesFromAllAlts,gatedPredExpr);
+				}
+			}
+		}
+		if ( unionOfPredicatesFromAllAlts instanceof SemanticContext.TruePredicate ) {
+			return null;
+		}
+		return unionOfPredicatesFromAllAlts;
+	}
+
+    /** Is an accept state reachable from this state? */
+    public int getAcceptStateReachable() {
+        return acceptStateReachable;
+    }
+
+    public void setAcceptStateReachable(int acceptStateReachable) {
+        this.acceptStateReachable = acceptStateReachable;
+    }
+
+    public boolean isResolvedWithPredicates() {
+        return resolvedWithPredicates;
+    }
+
+    /** Print all NFA states plus what alts they predict */
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        buf.append(stateNumber+":{");
+        Iterator iter = nfaConfigurations.iterator();
+        int i = 1;
+        while (iter.hasNext()) {
+            NFAConfiguration configuration = (NFAConfiguration) iter.next();
+            if ( i>1 ) {
+                buf.append(", ");
+            }
+            buf.append(configuration);
+            i++;
+        }
+        buf.append("}");
+        return buf.toString();
+    }
+
+	public int getLookaheadDepth() {
+		return k;
+	}
+
+	public void setLookaheadDepth(int k) {
+		this.k = k;
+		if ( k > dfa.max_k ) { // track max k for entire DFA
+			dfa.max_k = k;
+		}
+	}
+
+}
diff --git a/src/org/antlr/analysis/DecisionProbe.java b/src/org/antlr/analysis/DecisionProbe.java
new file mode 100644
index 0000000..3e3107f
--- /dev/null
+++ b/src/org/antlr/analysis/DecisionProbe.java
@@ -0,0 +1,941 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+import org.antlr.tool.ANTLRParser;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+/** Collection of information about what is wrong with a decision as
+ *  discovered while building the DFA predictor.
+ *
+ *  The information is collected during NFA->DFA conversion and, while
+ *  some of this is available elsewhere, it is nice to have it all tracked
+ *  in one spot so a great error message can be easily had.  I also like
+ *  the fact that this object tracks it all for later perusing to make an
+ *  excellent error message instead of lots of imprecise on-the-fly warnings
+ *  (during conversion).
+ *
+ *  A decision normally only has one problem; e.g., some input sequence
+ *  can be matched by multiple alternatives.  Unfortunately, some decisions
+ *  such as
+ *
+ *  a : ( A | B ) | ( A | B ) | A ;
+ *
+ *  have multiple problems.  So in general, you should approach a decision
+ *  as having multiple flaws each one uniquely identified by a DFAState.
+ *  For example, statesWithSyntacticallyAmbiguousAltsSet tracks the set of
+ *  all DFAStates where ANTLR has discovered a problem.  Recall that a decision
+ *  is represented internall with a DFA comprised of multiple states, each of
+ *  which could potentially have problems.
+ *
+ *  Because of this, you need to iterate over this list of DFA states.  You'll
+ *  note that most of the informational methods like
+ *  getSampleNonDeterministicInputSequence() require a DFAState.  This state
+ *  will be one of the iterated states from stateToSyntacticallyAmbiguousAltsSet.
+ *
+ *  This class is not thread safe due to shared use of visited maps etc...
+ *  Only one thread should really need to access one DecisionProbe anyway.
+ */
+public class DecisionProbe {
+	public DFA dfa;
+
+	/** Track all DFA states with nondeterministic alternatives.
+	 *  By reaching the same DFA state, a path through the NFA for some input
+	 *  is able to reach the same NFA state by starting at more than one
+	 *  alternative's left edge.  Though, later, we may find that predicates
+	 *  resolve the issue, but track info anyway.
+	 *  Set<DFAState>.  Note that from the DFA state, you can ask for
+	 *  which alts are nondeterministic.
+	 */
+	protected Set statesWithSyntacticallyAmbiguousAltsSet = new HashSet();
+
+	/** Track just like stateToSyntacticallyAmbiguousAltsMap, but only
+	 *  for nondeterminisms that arise in the Tokens rule such as keyword vs
+	 *  ID rule.  The state maps to the list of Tokens rule alts that are
+	 *  in conflict.
+	 *  Map<DFAState, Set<int>>
+	 */
+	protected Map stateToSyntacticallyAmbiguousTokensRuleAltsMap = new HashMap();
+
+	/** Was a syntactic ambiguity resolved with predicates?  Any DFA
+	 *  state that predicts more than one alternative, must be resolved
+	 *  with predicates or it should be reported to the user.
+	 *  Set<DFAState>
+	 */
+	protected Set statesResolvedWithSemanticPredicatesSet = new HashSet();
+
+	/** Track the predicates for each alt per DFA state;
+	 *  more than one DFA state might have syntactically ambig alt prediction.
+	 *  This is Map<DFAState, Map<int,SemanticContext>>; that is, it
+	 *  maps DFA state to another map, mapping alt number to a
+	 *  SemanticContext (pred(s) to execute to resolve syntactic ambiguity).
+	 */
+	protected Map stateToAltSetWithSemanticPredicatesMap = new HashMap();
+
+	/** Map<DFAState,List<int>> Tracks alts insufficiently covered.
+	 *  For example, p1||true gets reduced to true and so leaves
+	 *  whole alt uncovered.  This maps DFA state to the set of alts
+	 */
+	protected Map stateToIncompletelyCoveredAltsMap = new HashMap();
+
+	/** The set of states w/o emanating edges and w/o resolving sem preds. */
+	protected Set danglingStates = new HashSet();
+
+	/** The overall list of alts within the decision that have at least one
+	 *  conflicting input sequence.
+	 */
+	protected Set altsWithProblem = new HashSet();
+
+	/** If decision with > 1 alt has recursion in > 1 alt, it's nonregular
+	 *  lookahead.  The decision cannot be made with a DFA.
+	 *  the alts are stored in altsWithProblem.
+	 */
+	protected boolean nonLLStarDecision = false;
+
+	/** Recursion is limited to a particular depth.  If that limit is exceeded
+	 *  the proposed new NFAConfiguration is recorded for the associated DFA state.
+	 *  Map<Integer DFA state number,List<NFAConfiguration>>.
+	 */
+	protected Map stateToRecursiveOverflowConfigurationsMap = new HashMap();
+
+	/** Left recursion discovered.  The proposed new NFAConfiguration
+	 *  is recorded for the associated DFA state.
+	 *  Map<DFAState,List<NFAConfiguration>>.
+	 */
+	protected Map stateToLeftRecursiveConfigurationsMap = new HashMap();
+
+	/** Did ANTLR have to terminate early on the analysis of this decision? */
+	protected boolean terminated = false;
+
+	/** Used to find paths through syntactically ambiguous DFA. */
+	protected Map stateReachable;
+	public static final Integer REACHABLE_BUSY = Utils.integer(-1);
+	public static final Integer REACHABLE_NO = Utils.integer(0);
+	public static final Integer REACHABLE_YES = Utils.integer(1);
+
+	/** Used while finding a path through an NFA whose edge labels match
+	 *  an input sequence.  Tracks the input position
+	 *  we were at the last time at this node.  If same input position, then
+	 *  we'd have reached same state without consuming input...probably an
+	 *  infinite loop.  Stop.  Set<String>.  The strings look like
+	 *  stateNumber_labelIndex.
+	 */
+	protected Set statesVisitedAtInputDepth;
+
+	protected Set statesVisitedDuringSampleSequence;
+
+	public static boolean verbose = false;
+
+	public DecisionProbe(DFA dfa) {
+		this.dfa = dfa;
+	}
+
+	// I N F O R M A T I O N  A B O U T  D E C I S I O N
+
+	/** Return a string like "3:22: ( A {;} | B )" that describes this
+	 *  decision.
+	 */
+	public String getDescription() {
+		return dfa.getNFADecisionStartState().getDescription();
+	}
+
+	public boolean isReduced() {
+		return dfa.isReduced();
+	}
+
+	public boolean isCyclic() {
+		return dfa.isCyclic();
+	}
+
+	/** If no states are dead-ends, no alts are unreachable, there are
+	 *  no nondeterminisms unresolved by syn preds, all is ok with decision.
+	 */
+	public boolean isDeterministic() {
+		if ( danglingStates.size()==0 &&
+			 statesWithSyntacticallyAmbiguousAltsSet.size()==0 &&
+			 dfa.getUnreachableAlts().size()==0 )
+		{
+			return true;
+		}
+
+		if ( statesWithSyntacticallyAmbiguousAltsSet.size()>0 ) {
+			Iterator it =
+				statesWithSyntacticallyAmbiguousAltsSet.iterator();
+			while (	it.hasNext() ) {
+				DFAState d = (DFAState) it.next();
+				if ( !statesResolvedWithSemanticPredicatesSet.contains(d) ) {
+					return false;
+				}
+			}
+			// no syntactically ambig alts were left unresolved by predicates
+			return true;
+		}
+		return false;
+	}
+
+	/** Did the analysis complete it's work? */
+	public boolean analysisAborted() {
+		return terminated;
+	}
+
+	public boolean analysisOverflowed() {
+		return stateToRecursiveOverflowConfigurationsMap.size()>0;
+	}
+
+	public boolean isNonLLStarDecision() {
+		return nonLLStarDecision;
+	}
+
+	/** How many states does the DFA predictor have? */
+	public int getNumberOfStates() {
+		return dfa.getNumberOfStates();
+	}
+
+	/** Get a list of all unreachable alternatives for this decision.  There
+	 *  may be multiple alternatives with ambiguous input sequences, but this
+	 *  is the overall list of unreachable alternatives (either due to
+	 *  conflict resolution or alts w/o accept states).
+	 */
+	public List getUnreachableAlts() {
+		return dfa.getUnreachableAlts();
+	}
+
+	/** return set of states w/o emanating edges and w/o resolving sem preds.
+	 *  These states come about because the analysis algorithm had to
+	 *  terminate early to avoid infinite recursion for example (due to
+	 *  left recursion perhaps).
+	 */
+	public Set getDanglingStates() {
+		return danglingStates;
+	}
+
+    public Set getNonDeterministicAlts() {
+        return altsWithProblem;
+	}
+
+	/** Return the sorted list of alts that conflict within a single state.
+	 *  Note that predicates may resolve the conflict.
+	 */
+	public List getNonDeterministicAltsForState(DFAState targetState) {
+		Set nondetAlts = targetState.getNonDeterministicAlts();
+		if ( nondetAlts==null ) {
+			return null;
+		}
+		List sorted = new LinkedList();
+		sorted.addAll(nondetAlts);
+		Collections.sort(sorted); // make sure it's 1, 2, ...
+		return sorted;
+	}
+
+	/** Return all DFA states in this DFA that have NFA configurations that
+	 *  conflict.  You must report a problem for each state in this set
+	 *  because each state represents a different input sequence.
+	 */
+	public Set getDFAStatesWithSyntacticallyAmbiguousAlts() {
+		return statesWithSyntacticallyAmbiguousAltsSet;
+	}
+
+	/** Which alts were specifically turned off to resolve nondeterminisms?
+	 *  This is different than the unreachable alts.  Disabled doesn't mean that
+	 *  the alternative is totally unreachable necessarily, it just means
+	 *  that for this DFA state, that alt is disabled.  There may be other
+	 *  accept states for that alt that make an alt reachable.
+	 */
+	public Set getDisabledAlternatives(DFAState d) {
+		return d.getDisabledAlternatives();
+	}
+
+	/** If a recursion overflow is resolve with predicates, then we need
+	 *  to shut off the warning that would be generated.
+	 */
+	public void removeRecursiveOverflowState(DFAState d) {
+		Integer stateI = Utils.integer(d.stateNumber);
+		stateToRecursiveOverflowConfigurationsMap.remove(stateI);
+	}
+
+	/*
+	public boolean dfaStateHasRecursionOverflow(DFAState d) {
+		Integer stateI = Utils.integer(d.stateNumber);
+		return stateToRecursiveOverflowConfigurationsMap.get(stateI)!=null;
+	}
+	*/
+
+	/** Return a List<Label> indicating an input sequence that can be matched
+	 *  from the start state of the DFA to the targetState (which is known
+	 *  to have a problem).
+	 */
+	public List getSampleNonDeterministicInputSequence(DFAState targetState) {
+		Set dfaStates = getDFAPathStatesToTarget(targetState);
+		statesVisitedDuringSampleSequence = new HashSet();
+		List labels = new ArrayList(); // may access ith element; use array
+		getSampleInputSequenceUsingStateSet(dfa.startState,
+											targetState,
+											dfaStates,
+											labels);
+		return labels;
+	}
+
+	/** Given List<Label>, return a String with a useful representation
+	 *  of the associated input string.  One could show something different
+	 *  for lexers and parsers, for example.
+	 */
+	public String getInputSequenceDisplay(List labels) {
+        Grammar g = dfa.nfa.grammar;
+		StringBuffer buf = new StringBuffer();
+		for (Iterator it = labels.iterator(); it.hasNext();) {
+			Label label = (Label) it.next();
+			buf.append(label.toString(g));
+			if ( it.hasNext() && g.type!=Grammar.LEXER ) {
+				buf.append(' ');
+			}
+		}
+		return buf.toString();
+	}
+
+    /** Given an alternative associated with a nondeterministic DFA state,
+	 *  find the path of NFA states associated with the labels sequence.
+	 *  Useful tracing where in the NFA, a single input sequence can be
+	 *  matched.  For different alts, you should get different NFA paths.
+	 *
+	 *  The first NFA state for all NFA paths will be the same: the starting
+	 *  NFA state of the first nondeterministic alt.  Imagine (A|B|A|A):
+	 *
+	 * 	5->9-A->o
+	 *  |
+	 *  6->10-B->o
+	 *  |
+	 *  7->11-A->o
+	 *  |
+	 *  8->12-A->o
+	 *
+	 *  There are 3 nondeterministic alts.  The paths should be:
+	 *  5 9 ...
+	 *  5 6 7 11 ...
+	 *  5 6 7 8 12 ...
+	 *
+	 *  The NFA path matching the sample input sequence (labels) is computed
+	 *  using states 9, 11, and 12 rather than 5, 7, 8 because state 5, for
+	 *  example can get to all ambig paths.  Must isolate for each alt (hence,
+	 *  the extra state beginning each alt in my NFA structures).  Here,
+	 *  firstAlt=1.
+	 */
+	public List getNFAPathStatesForAlt(int firstAlt,
+									   int alt,
+									   List labels)
+	{
+		NFAState nfaStart = dfa.getNFADecisionStartState();
+		List path = new LinkedList();
+		// first add all NFA states leading up to altStart state
+		for (int a=firstAlt; a<=alt; a++) {
+			NFAState s =
+				dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,a);
+			path.add(s);
+		}
+
+		// add first state of actual alt
+		NFAState altStart = dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,alt);
+		NFAState isolatedAltStart = (NFAState)altStart.transition(0).target;
+		path.add(isolatedAltStart);
+
+		// add the actual path now
+		statesVisitedAtInputDepth = new HashSet();
+		getNFAPath(isolatedAltStart,
+				   0,
+				   labels,
+				   path);
+        return path;
+	}
+
+	/** Each state in the DFA represents a different input sequence for an
+	 *  alt of the decision.  Given a DFA state, what is the semantic
+	 *  predicate context for a particular alt.
+	 */
+    public SemanticContext getSemanticContextForAlt(DFAState d, int alt) {
+		Map altToPredMap = (Map)stateToAltSetWithSemanticPredicatesMap.get(d);
+		if ( altToPredMap==null ) {
+			return null;
+		}
+		return (SemanticContext)altToPredMap.get(Utils.integer(alt));
+	}
+
+	public Set getNondeterministicStatesResolvedWithSemanticPredicate() {
+		return statesResolvedWithSemanticPredicatesSet;
+	}
+
+	/** Return a list of alts whose predicate context was insufficient to
+	 *  resolve a nondeterminism for state d.
+	 */
+    public List getIncompletelyCoveredAlts(DFAState d) {
+		return (List)stateToIncompletelyCoveredAltsMap.get(d);
+	}
+
+	public void issueWarnings() {
+		// NONREGULAR DUE TO RECURSION > 1 ALTS
+		// Issue this before aborted analysis, which might also occur
+		// if we take too long to terminate
+		if ( nonLLStarDecision && !dfa.getAutoBacktrackMode() ) {
+			ErrorManager.nonLLStarDecision(this);
+		}
+
+		if ( analysisAborted() ) {
+			// only report early termination errors if !backtracking
+			if ( !dfa.getAutoBacktrackMode() ) {
+				ErrorManager.analysisAborted(this);
+			}
+			// now just return...if we bailed out, don't spew other messages
+			return;
+		}
+
+		issueRecursionWarnings();
+
+		// generate a separate message for each problem state in DFA
+		Set resolvedStates = getNondeterministicStatesResolvedWithSemanticPredicate();
+		Set problemStates = getDFAStatesWithSyntacticallyAmbiguousAlts();
+		if ( problemStates.size()>0 ) {
+			Iterator it =
+				problemStates.iterator();
+			while (	it.hasNext() && !dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() ) {
+				DFAState d = (DFAState) it.next();
+				// don't report problem if resolved
+				if ( resolvedStates==null || !resolvedStates.contains(d) ) {
+					// first strip last alt from disableAlts if it's wildcard
+					// then don't print error if no more disable alts
+					Set disabledAlts = getDisabledAlternatives(d);
+					stripWildCardAlts(disabledAlts);
+					if ( disabledAlts.size()>0 ) {
+						ErrorManager.nondeterminism(this,d);
+					}
+				}
+				List insufficientAlts = getIncompletelyCoveredAlts(d);
+				if ( insufficientAlts!=null && insufficientAlts.size()>0 ) {
+					ErrorManager.insufficientPredicates(this,insufficientAlts);
+				}
+			}
+		}
+
+		Set danglingStates = getDanglingStates();
+		if ( danglingStates.size()>0 ) {
+			//System.err.println("no emanating edges for states: "+danglingStates);
+			for (Iterator it = danglingStates.iterator(); it.hasNext();) {
+				DFAState d = (DFAState) it.next();
+				ErrorManager.danglingState(this,d);
+			}
+		}
+
+		if ( !nonLLStarDecision ) {
+			List unreachableAlts = dfa.getUnreachableAlts();
+			if ( unreachableAlts!=null && unreachableAlts.size()>0 ) {
+				ErrorManager.unreachableAlts(this,unreachableAlts);
+			}
+		}
+	}
+
+	/** Get the last disabled alt number and check in the grammar to see
+	 *  if that alt is a simple wildcard.  If so, treat like an else clause
+	 *  and don't emit the error.  Strip out the last alt if it's wildcard.
+	 */
+	protected void stripWildCardAlts(Set disabledAlts) {
+		List sortedDisableAlts = new ArrayList(disabledAlts);
+		Collections.sort(sortedDisableAlts);
+		Integer lastAlt =
+			(Integer)sortedDisableAlts.get(sortedDisableAlts.size()-1);
+		GrammarAST blockAST =
+			dfa.nfa.grammar.getDecisionBlockAST(dfa.decisionNumber);
+		//System.out.println("block with error = "+blockAST.toStringTree());
+		GrammarAST lastAltAST = null;
+		if ( blockAST.getChild(0).getType()==ANTLRParser.OPTIONS ) {
+			// if options, skip first child: ( options { ( = greedy false ) )
+			lastAltAST = blockAST.getChild(lastAlt.intValue());
+		}
+		else {
+			lastAltAST = blockAST.getChild(lastAlt.intValue()-1);
+		}
+		//System.out.println("last alt is "+lastAltAST.toStringTree());
+		// if last alt looks like ( ALT . <end-of-alt> ) then wildcard
+		// Avoid looking at optional blocks etc... that have last alt
+		// as the EOB:
+		// ( BLOCK ( ALT 'else' statement <end-of-alt> ) <end-of-block> )
+		if ( lastAltAST.getType()!=ANTLRParser.EOB &&
+			 lastAltAST.getChild(0).getType()== ANTLRParser.WILDCARD &&
+			 lastAltAST.getChild(1).getType()== ANTLRParser.EOA )
+		{
+			//System.out.println("wildcard");
+			disabledAlts.remove(lastAlt);
+		}
+	}
+
+	protected void issueRecursionWarnings() {
+		// RECURSION OVERFLOW
+		Set dfaStatesWithRecursionProblems =
+			stateToRecursiveOverflowConfigurationsMap.keySet();
+		// now walk truly unique (unaliased) list of dfa states with inf recur
+		// Goal: create a map from alt to map<target,List<callsites>>
+		// Map<Map<String target, List<NFAState call sites>>
+		Map altToTargetToCallSitesMap = new HashMap();
+		// track a single problem DFA state for each alt
+		Map altToDFAState = new HashMap();
+		computeAltToProblemMaps(dfaStatesWithRecursionProblems,
+								stateToRecursiveOverflowConfigurationsMap,
+								altToTargetToCallSitesMap, // output param
+								altToDFAState);            // output param
+		//System.out.println("altToTargetToCallSitesMap="+altToTargetToCallSitesMap);
+
+		// walk each alt with recursion overflow problems and generate error
+		Set alts = altToTargetToCallSitesMap.keySet();
+		List sortedAlts = new ArrayList(alts);
+		Collections.sort(sortedAlts);
+		for (Iterator altsIt = sortedAlts.iterator(); altsIt.hasNext();) {
+			Integer altI = (Integer) altsIt.next();
+			Map targetToCallSiteMap =
+				(Map)altToTargetToCallSitesMap.get(altI);
+			Set targetRules = targetToCallSiteMap.keySet();
+			Collection callSiteStates = targetToCallSiteMap.values();
+			DFAState sampleBadState = (DFAState)altToDFAState.get(altI);
+			ErrorManager.recursionOverflow(this,
+										   sampleBadState,
+										   altI.intValue(),
+										   targetRules,
+										   callSiteStates);
+		}
+
+		/* All  recursion determines now before analysis
+		// LEFT RECURSION
+		// TODO: hideous cut/paste of code; try to refactor
+
+		Set dfaStatesWithLeftRecursionProblems =
+			stateToLeftRecursiveConfigurationsMap.keySet();
+		Set dfaStatesUnaliased =
+			getUnaliasedDFAStateSet(dfaStatesWithLeftRecursionProblems);
+
+		// now walk truly unique (unaliased) list of dfa states with inf recur
+		// Goal: create a map from alt to map<target,List<callsites>>
+		// Map<Map<String target, List<NFAState call sites>>
+		altToTargetToCallSitesMap = new HashMap();
+		// track a single problem DFA state for each alt
+		altToDFAState = new HashMap();
+		computeAltToProblemMaps(dfaStatesUnaliased,
+								stateToLeftRecursiveConfigurationsMap,
+								altToTargetToCallSitesMap, // output param
+								altToDFAState);            // output param
+
+		// walk each alt with recursion overflow problems and generate error
+		alts = altToTargetToCallSitesMap.keySet();
+		sortedAlts = new ArrayList(alts);
+		Collections.sort(sortedAlts);
+		for (Iterator altsIt = sortedAlts.iterator(); altsIt.hasNext();) {
+			Integer altI = (Integer) altsIt.next();
+			Map targetToCallSiteMap =
+				(Map)altToTargetToCallSitesMap.get(altI);
+			Set targetRules = targetToCallSiteMap.keySet();
+			Collection callSiteStates = targetToCallSiteMap.values();
+			ErrorManager.leftRecursion(this,
+									   altI.intValue(),
+									   targetRules,
+									   callSiteStates);
+		}
+		*/
+	}
+
+	private void computeAltToProblemMaps(Set dfaStatesUnaliased,
+										 Map configurationsMap,
+										 Map altToTargetToCallSitesMap,
+										 Map altToDFAState)
+	{
+		for (Iterator it = dfaStatesUnaliased.iterator(); it.hasNext();) {
+			Integer stateI = (Integer) it.next();
+			// walk this DFA's config list
+			List configs = (List)configurationsMap.get(stateI);
+			for (int i = 0; i < configs.size(); i++) {
+				NFAConfiguration c = (NFAConfiguration) configs.get(i);
+				NFAState ruleInvocationState = dfa.nfa.getState(c.state);
+				Transition transition0 = ruleInvocationState.transition(0);
+				RuleClosureTransition ref = (RuleClosureTransition)transition0;
+				String targetRule = ((NFAState)ref.target).getEnclosingRule();
+				Integer altI = Utils.integer(c.alt);
+				Map targetToCallSiteMap =
+					(Map)altToTargetToCallSitesMap.get(altI);
+				if ( targetToCallSiteMap==null ) {
+					targetToCallSiteMap = new HashMap();
+					altToTargetToCallSitesMap.put(altI, targetToCallSiteMap);
+				}
+				Set callSites =
+					(HashSet)targetToCallSiteMap.get(targetRule);
+				if ( callSites==null ) {
+					callSites = new HashSet();
+					targetToCallSiteMap.put(targetRule, callSites);
+				}
+				callSites.add(ruleInvocationState);
+				// track one problem DFA state per alt
+				if ( altToDFAState.get(altI)==null ) {
+					DFAState sampleBadState = dfa.getState(stateI.intValue());
+					altToDFAState.put(altI, sampleBadState);
+				}
+			}
+		}
+	}
+
+	private Set getUnaliasedDFAStateSet(Set dfaStatesWithRecursionProblems) {
+		Set dfaStatesUnaliased = new HashSet();
+		for (Iterator it = dfaStatesWithRecursionProblems.iterator(); it.hasNext();) {
+			Integer stateI = (Integer) it.next();
+			DFAState d = dfa.getState(stateI.intValue());
+			dfaStatesUnaliased.add(Utils.integer(d.stateNumber));
+		}
+		return dfaStatesUnaliased;
+	}
+
+
+	// T R A C K I N G  M E T H O D S
+
+    /** Report the fact that DFA state d is not a state resolved with
+     *  predicates and yet it has no emanating edges.  Usually this
+     *  is a result of the closure/reach operations being unable to proceed
+     */
+	public void reportDanglingState(DFAState d) {
+		danglingStates.add(d);
+	}
+
+	public void reportEarlyTermination() {
+		terminated = true;
+		dfa.nfa.grammar.setOfDFAWhoseConversionTerminatedEarly.add(dfa);
+	}
+
+	/** Report that at least 2 alts have recursive constructs.  There is
+	 *  no way to build a DFA so we terminated.
+	 */
+	public void reportNonLLStarDecision(DFA dfa) {
+		//System.out.println("non-LL(*) DFA "+dfa.decisionNumber);
+		nonLLStarDecision = true;
+		altsWithProblem.addAll(dfa.recursiveAltSet.toList());
+	}
+
+	public void reportRecursiveOverflow(DFAState d,
+										NFAConfiguration recursiveNFAConfiguration)
+	{
+		// track the state number rather than the state as d will change
+		// out from underneath us; hash wouldn't return any value
+		Integer stateI = Utils.integer(d.stateNumber);
+		List configs = (List)stateToRecursiveOverflowConfigurationsMap.get(stateI);
+		if ( configs==null ) {
+			configs = new ArrayList();
+			configs.add(recursiveNFAConfiguration);
+			stateToRecursiveOverflowConfigurationsMap.put(stateI, configs);
+		}
+		else {
+			configs.add(recursiveNFAConfiguration);
+		}
+	}
+
+	public void reportLeftRecursion(DFAState d,
+									NFAConfiguration leftRecursiveNFAConfiguration)
+	{
+		// track the state number rather than the state as d will change
+		// out from underneath us; hash wouldn't return any value
+		Integer stateI = Utils.integer(d.stateNumber);
+		List configs = (List)stateToLeftRecursiveConfigurationsMap.get(stateI);
+		if ( configs==null ) {
+			configs = new ArrayList();
+			configs.add(leftRecursiveNFAConfiguration);
+			stateToLeftRecursiveConfigurationsMap.put(stateI, configs);
+		}
+		else {
+			configs.add(leftRecursiveNFAConfiguration);
+		}
+	}
+
+	public void reportNondeterminism(DFAState d, Set nondeterministicAlts) {
+		altsWithProblem.addAll(nondeterministicAlts); // track overall list
+		statesWithSyntacticallyAmbiguousAltsSet.add(d);
+		dfa.nfa.grammar.setOfNondeterministicDecisionNumbers.add(
+			Utils.integer(dfa.getDecisionNumber())
+		);
+	}
+
+	/** Currently the analysis reports issues between token definitions, but
+	 *  we don't print out warnings in favor of just picking the first token
+	 *  definition found in the grammar ala lex/flex.
+	 */
+	public void reportLexerRuleNondeterminism(DFAState d, Set nondeterministicAlts) {
+		stateToSyntacticallyAmbiguousTokensRuleAltsMap.put(d,nondeterministicAlts);
+	}
+
+	public void reportNondeterminismResolvedWithSemanticPredicate(DFAState d)
+	{
+		statesResolvedWithSemanticPredicatesSet.add(d);
+		//System.out.println("resolved with pred: "+d);
+		dfa.nfa.grammar.setOfNondeterministicDecisionNumbersResolvedWithPredicates.add(
+			Utils.integer(dfa.getDecisionNumber())
+		);
+	}
+
+	/** Report the list of predicates found for each alternative; copy
+	 *  the list because this set gets altered later by the method
+	 *  tryToResolveWithSemanticPredicates() while flagging NFA configurations
+	 *  in d as resolved.
+	 */
+	public void reportAltPredicateContext(DFAState d, Map altPredicateContext) {
+		Map copy = new HashMap();
+		copy.putAll(altPredicateContext);
+		stateToAltSetWithSemanticPredicatesMap.put(d,copy);
+	}
+
+	public void reportIncompletelyCoveredAlts(DFAState d,
+											  List alts)
+	{
+		stateToIncompletelyCoveredAltsMap.put(d, alts);
+	}
+
+	// S U P P O R T
+
+	/** Given a start state and a target state, return true if start can reach
+	 *  target state.  Also, compute the set of DFA states
+	 *  that are on a path from start to target; return in states parameter.
+	 */
+	protected boolean reachesState(DFAState startState,
+								   DFAState targetState,
+								   Set states) {
+		if ( startState==targetState ) {
+			states.add(targetState);
+			//System.out.println("found target DFA state "+targetState.getStateNumber());
+			stateReachable.put(startState, REACHABLE_YES);
+			return true;
+		}
+
+		DFAState s = startState;
+		// avoid infinite loops
+		stateReachable.put(s, REACHABLE_BUSY);
+
+		// look for a path to targetState among transitions for this state
+		// stop when you find the first one; I'm pretty sure there is
+		// at most one path to any DFA state with conflicting predictions
+		for (int i=0; i<s.getNumberOfTransitions(); i++) {
+			Transition t = s.transition(i);
+			DFAState edgeTarget = (DFAState)t.target;
+			Integer targetStatus = (Integer)stateReachable.get(edgeTarget);
+			if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
+				continue;
+			}
+			if ( targetStatus==REACHABLE_YES ) { // return success!
+				stateReachable.put(s, REACHABLE_YES);
+				return true;
+			}
+			if ( targetStatus==REACHABLE_NO ) { // try another transition
+				continue;
+			}
+			// if null, target must be REACHABLE_UNKNOWN (i.e., unvisited)
+			if ( reachesState(edgeTarget, targetState, states) ) {
+				states.add(s);
+				stateReachable.put(s, REACHABLE_YES);
+				return true;
+			}
+		}
+
+		stateReachable.put(s, REACHABLE_NO);
+		return false; // no path to targetState found.
+	}
+
+	protected Set getDFAPathStatesToTarget(DFAState targetState) {
+		Set dfaStates = new HashSet();
+		stateReachable = new HashMap();
+		boolean reaches = reachesState(dfa.startState, targetState, dfaStates);
+		return dfaStates;
+	}
+
+    /** Given a set of DFA states, return a set of NFA states associated
+	 *  with alt collected from all DFA states.  If alt==0 then collect
+	 *  all NFA states regardless of alt.
+	protected Set getNFAStatesFromDFAStatesForAlt(Set dfaStates, int alt) {
+		Set nfaStates = new LinkedHashSet();
+		for (Iterator it = dfaStates.iterator(); it.hasNext();) {
+			DFAState d = (DFAState) it.next();
+			Set configs = d.getNFAConfigurations();
+			for (Iterator configIter = configs.iterator(); configIter.hasNext();) {
+				NFAConfiguration c = (NFAConfiguration) configIter.next();
+				if ( alt==0 || c.alt==alt ) {
+					nfaStates.add(Utils.integer(c.state));
+				}
+			}
+		}
+		return nfaStates;
+	}
+	 */
+
+	/** Given a start state and a final state, find a list of edge labels
+	 *  between the two ignoring epsilon.  Limit your scan to a set of states
+	 *  passed in.  This is used to show a sample input sequence that is
+	 *  nondeterministic with respect to this decision.  Return List<Label> as
+	 *  a parameter.  The incoming states set must be all states that lead
+	 *  from startState to targetState and no others so this algorithm doesn't
+	 *  take a path that eventually leads to a state other than targetState.
+	 *  Don't follow loops, leading to short (possibly shortest) path.
+	 */
+	protected void getSampleInputSequenceUsingStateSet(State startState,
+													   State targetState,
+													   Set states,
+													   List labels)
+	{
+		statesVisitedDuringSampleSequence.add(startState);
+
+		// pick the first edge in states as the one to traverse
+		for (int i=0; i<startState.getNumberOfTransitions(); i++) {
+			Transition t = startState.transition(i);
+			DFAState edgeTarget = (DFAState)t.target;
+			if ( states.contains(edgeTarget) &&
+				 !statesVisitedDuringSampleSequence.contains(edgeTarget) )
+			{
+				labels.add(t.label); // traverse edge and track label
+				if ( edgeTarget!=targetState ) {
+					// get more labels if not at target
+					getSampleInputSequenceUsingStateSet(edgeTarget,
+														targetState,
+														states,
+														labels);
+				}
+				// done with this DFA state as we've found a good path to target
+				return;
+			}
+		}
+		labels.add(new Label(Label.EPSILON)); // indicate no input found
+		// this happens on a : {p1}? a | A ;
+		//ErrorManager.error(ErrorManager.MSG_CANNOT_COMPUTE_SAMPLE_INPUT_SEQ);
+	}
+
+	/** Given a sample input sequence, you usually would like to know the
+	 *  path taken through the NFA.  Return the list of NFA states visited
+	 *  while matching a list of labels.  This cannot use the usual
+	 *  interpreter, which does a deterministic walk.  We need to be able to
+	 *  take paths that are turned off during nondeterminism resolution. So,
+	 *  just do a depth-first walk traversing edges labeled with the current
+	 *  label.  Return true if a path was found emanating from state s.
+	 */
+	protected boolean getNFAPath(NFAState s,     // starting where?
+								 int labelIndex, // 0..labels.size()-1
+								 List labels,    // input sequence
+								 List path)      // output list of NFA states
+	{
+		// track a visit to state s at input index labelIndex if not seen
+		String thisStateKey = getStateLabelIndexKey(s.stateNumber,labelIndex);
+		if ( statesVisitedAtInputDepth.contains(thisStateKey) ) {
+			/*
+			System.out.println("### already visited "+s.stateNumber+" previously at index "+
+						   labelIndex);
+			*/
+			return false;
+		}
+		statesVisitedAtInputDepth.add(thisStateKey);
+
+		/*
+		System.out.println("enter state "+s.stateNumber+" visited states: "+
+						   statesVisitedAtInputDepth);
+        */
+
+		// pick the first edge whose target is in states and whose
+		// label is labels[labelIndex]
+		for (int i=0; i<s.getNumberOfTransitions(); i++) {
+			Transition t = s.transition(i);
+			NFAState edgeTarget = (NFAState)t.target;
+			Label label = (Label)labels.get(labelIndex);
+			/*
+			System.out.println(s.stateNumber+"-"+
+							   t.label.toString(dfa.nfa.grammar)+"->"+
+							   edgeTarget.stateNumber+" =="+
+							   label.toString(dfa.nfa.grammar)+"?");
+			*/
+			if ( t.label.isEpsilon() ) {
+				// nondeterministically backtrack down epsilon edges
+				path.add(edgeTarget);
+				boolean found =
+					getNFAPath(edgeTarget, labelIndex, labels, path);
+				if ( found ) {
+					statesVisitedAtInputDepth.remove(thisStateKey);
+					return true; // return to "calling" state
+				}
+				path.remove(path.size()-1); // remove; didn't work out
+				continue; // look at the next edge
+			}
+			if ( t.label.matches(label) ) {
+				path.add(edgeTarget);
+				/*
+				System.out.println("found label "+
+								   t.label.toString(dfa.nfa.grammar)+
+								   " at state "+s.stateNumber+"; labelIndex="+labelIndex);
+				*/
+				if ( labelIndex==labels.size()-1 ) {
+					// found last label; done!
+					statesVisitedAtInputDepth.remove(thisStateKey);
+					return true;
+				}
+				// otherwise try to match remaining input
+				boolean found =
+					getNFAPath(edgeTarget, labelIndex+1, labels, path);
+				if ( found ) {
+					statesVisitedAtInputDepth.remove(thisStateKey);
+					return true;
+				}
+				/*
+				System.out.println("backtrack; path from "+s.stateNumber+"->"+
+								   t.label.toString(dfa.nfa.grammar)+" didn't work");
+				*/
+				path.remove(path.size()-1); // remove; didn't work out
+				continue; // keep looking for a path for labels
+			}
+		}
+		//System.out.println("no epsilon or matching edge; removing "+thisStateKey);
+		// no edge was found matching label; is ok, some state will have it
+		statesVisitedAtInputDepth.remove(thisStateKey);
+		return false;
+	}
+
+	protected String getStateLabelIndexKey(int s, int i) {
+		StringBuffer buf = new StringBuffer();
+		buf.append(s);
+		buf.append('_');
+		buf.append(i);
+		return buf.toString();
+	}
+
+	/** From an alt number associated with artificial Tokens rule, return
+	 *  the name of the token that is associated with that alt.
+	 */ 
+	public String getTokenNameForTokensRuleAlt(int alt) {
+		NFAState decisionState = dfa.getNFADecisionStartState();
+		NFAState altState =
+			dfa.nfa.grammar.getNFAStateForAltOfDecision(decisionState,alt);
+		NFAState decisionLeft = (NFAState)altState.transition(0).target;
+		RuleClosureTransition ruleCallEdge =
+			(RuleClosureTransition)decisionLeft.transition(0);
+		NFAState ruleStartState = (NFAState)ruleCallEdge.target;
+		//System.out.println("alt = "+decisionLeft.getEnclosingRule());
+		return ruleStartState.getEnclosingRule();
+	}
+}
diff --git a/src/org/antlr/analysis/Label.java b/src/org/antlr/analysis/Label.java
new file mode 100644
index 0000000..4bb1956
--- /dev/null
+++ b/src/org/antlr/analysis/Label.java
@@ -0,0 +1,374 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.IntSet;
+
+/** A state machine transition label.  A label can be either a simple
+ *  label such as a token or character.  A label can be a set of char or
+ *  tokens.  It can be an epsilon transition.  It can be a semantic predicate
+ *  (which assumes an epsilon transition) or a tree of predicates (in a DFA).
+ */
+public class Label implements Comparable, Cloneable {
+    public static final int INVALID = -6;
+
+    public static final int EPSILON = -5;
+
+    public static final String EPSILON_STR = "<EPSILON>";
+
+    /** label is a semantic predicate; implies label is epsilon also */
+    public static final int SEMPRED = -4;
+
+    /** label is a set of tokens or char */
+    public static final int SET = -3;
+
+    /** End of Token is like EOF for lexer rules.  It implies that no more
+     *  characters are available and that NFA conversion should terminate
+     *  for this path.  For example
+     *
+     *  A : 'a' 'b' | 'a' ;
+     *
+     *  yields a DFA predictor:
+     *
+     *  o-a->o-b->1   predict alt 1
+     *       |
+     *       |-EOT->o predict alt 2
+     *
+     *  To generate code for EOT, treat it as the "default" path, which
+     *  implies there is no way to mismatch a char for the state from
+     *  which the EOT emanates.
+     */
+    public static final int EOT = -2;
+
+    public static final int EOF = -1;
+
+	/** We have labels like EPSILON that are below 0; it's hard to
+	 *  store them in an array with negative index so use this
+	 *  constant as an index shift when accessing arrays based upon
+	 *  token type.  If real token type is i, then array index would be
+	 *  NUM_FAUX_LABELS + i.
+	 */
+	public static final int NUM_FAUX_LABELS = -INVALID;
+
+    /** Anything at this value or larger can be considered a simple atom int
+     *  for easy comparison during analysis only; faux labels are not used
+	 *  during parse time for real token types or char values.
+     */
+    public static final int MIN_ATOM_VALUE = EOT;
+
+    // TODO: is 0 a valid unicode char? max is FFFF -1, right?
+    public static final int MIN_CHAR_VALUE = '\u0000';
+    public static final int MAX_CHAR_VALUE = '\uFFFE';
+
+	/** End of rule token type; imaginary token type used only for
+	 *  local, partial FOLLOW sets to indicate that the local FOLLOW
+	 *  hit the end of rule.  During error recovery, the local FOLLOW
+	 *  of a token reference may go beyond the end of the rule and have
+	 *  to use FOLLOW(rule).  I have to just shift the token types to 2..n
+	 *  rather than 1..n to accommodate this imaginary token in my bitsets.
+	 *  If I didn't use a bitset implementation for runtime sets, I wouldn't
+	 *  need this.  EOF is another candidate for a run time token type for
+	 *  parsers.  Follow sets are not computed for lexers so we do not have
+	 *  this issue.
+	 */
+	public static final int EOR_TOKEN_TYPE =
+		org.antlr.runtime.Token.EOR_TOKEN_TYPE;
+
+	public static final int DOWN = org.antlr.runtime.Token.DOWN;
+	public static final int UP = org.antlr.runtime.Token.UP;
+
+    /** tokens and char range overlap; tokens are MIN_TOKEN_TYPE..n */
+	public static final int MIN_TOKEN_TYPE =
+		org.antlr.runtime.Token.MIN_TOKEN_TYPE;
+
+    /** The wildcard '.' char atom implies all valid characters==UNICODE */
+    //public static final IntSet ALLCHAR = IntervalSet.of(MIN_CHAR_VALUE,MAX_CHAR_VALUE);
+
+    /** The token type or character value; or, signifies special label. */
+    protected int label;
+
+    /** A tree of semantic predicates from the grammar AST if label==SEMPRED.
+     *  In the NFA, labels will always be exactly one predicate, but the DFA
+     *  may have to combine a bunch of them as it collects predicates from
+     *  multiple NFA configurations into a single DFA state.
+     */
+    protected SemanticContext semanticContext;
+
+    /** A set of token types or character codes if label==SET */
+	// TODO: try IntervalSet for everything
+    protected IntSet labelSet;
+
+    public Label(int label) {
+        this.label = label;
+    }
+
+    /** Make a semantic predicate label */
+    public Label(GrammarAST predicateASTNode) {
+        this(SEMPRED);
+        this.semanticContext = new SemanticContext.Predicate(predicateASTNode);
+    }
+
+    /** Make a semantic predicates label */
+    public Label(SemanticContext semCtx) {
+        this(SEMPRED);
+        this.semanticContext = semCtx;
+    }
+
+    /** Make a set label */
+    public Label(IntSet labelSet) {
+		if ( labelSet==null ) {
+			this.label = SET;
+			this.labelSet = IntervalSet.of(INVALID);
+			return;
+		}
+		int singleAtom = labelSet.getSingleElement();
+        if ( singleAtom!=INVALID ) {
+            // convert back to a single atomic element if |labelSet|==1
+            label = singleAtom;
+            return;
+        }
+        this.label = SET;
+        this.labelSet = labelSet;
+    }
+
+	public Object clone() {
+		Label l;
+		try {
+			l = (Label)super.clone();
+			l.label = this.label;
+            l.labelSet = new IntervalSet();
+			l.labelSet.addAll(this.labelSet);
+		}
+		catch (CloneNotSupportedException e) {
+			throw new InternalError();
+		}
+		return l;
+	}
+
+	public void add(Label a) {
+		if ( isAtom() ) {
+			labelSet = IntervalSet.of(label);
+			label=SET;
+			if ( a.isAtom() ) {
+				labelSet.add(a.getAtom());
+			}
+			else if ( a.isSet() ) {
+				labelSet.addAll(a.getSet());
+			}
+			else {
+				throw new IllegalStateException("can't add element to Label of type "+label);
+			}
+			return;
+		}
+		if ( isSet() ) {
+			if ( a.isAtom() ) {
+				labelSet.add(a.getAtom());
+			}
+			else if ( a.isSet() ) {
+				labelSet.addAll(a.getSet());
+			}
+			else {
+				throw new IllegalStateException("can't add element to Label of type "+label);
+			}
+			return;
+		}
+		throw new IllegalStateException("can't add element to Label of type "+label);
+	}
+
+    public boolean isAtom() {
+        return label>=MIN_ATOM_VALUE;
+    }
+    public boolean isEpsilon() {
+        return label==EPSILON;
+    }
+
+    public boolean isSemanticPredicate() {
+        return label==SEMPRED;
+    }
+
+    public boolean isSet() {
+        return label==SET;
+    }
+
+    /** return the single atom label or INVALID if not a single atom */
+    public int getAtom() {
+        if ( isAtom() ) {
+            return label;
+        }
+        return INVALID;
+    }
+
+    public IntSet getSet() {
+        if ( label!=SET ) {
+            // convert single element to a set if they ask for it.
+            return IntervalSet.of(label);
+        }
+        return labelSet;
+    }
+
+    public void setSet(IntSet set) {
+        label=SET;
+        labelSet = set;
+    }
+
+    public SemanticContext getSemanticContext() {
+        return semanticContext;
+    }
+
+	public boolean matches(int atom) {
+		if ( label==atom ) {
+			return true; // handle the single atom case efficiently
+		}
+		if ( isSet() ) {
+			return labelSet.member(atom);
+		}
+		return false;
+	}
+
+	public boolean matches(IntSet set) {
+		if ( isAtom() ) {
+			return set.member(getAtom());
+		}
+		if ( isSet() ) {
+			// matches if intersection non-nil
+			return !getSet().and(set).isNil();
+		}
+		return false;
+	}
+
+
+	public boolean matches(Label other) {
+		if ( other.isSet() ) {
+			return matches(other.getSet());
+		}
+		if ( other.isAtom() ) {
+			return matches(other.getAtom());
+		}
+		return false;
+	}
+
+    public int hashCode() {
+        switch (label) {
+            case SET :
+                return labelSet.hashCode();
+            case SEMPRED :
+                return semanticContext.hashCode();
+            default :
+                return label;
+        }
+    }
+
+    public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+		// labels must be the same even if epsilon or set or sempred etc...
+        if ( label!=((Label)o).label ) {
+            return false;
+        }
+		if ( label==SET ) {
+			return this.labelSet.equals(((Label)o).labelSet);
+		}
+		return true;  // label values are same, so true
+    }
+
+    public int compareTo(Object o) {
+        return this.label-((Label)o).label;
+    }
+
+    /** Predicates are lists of AST nodes from the NFA created from the
+     *  grammar, but the same predicate could be cut/paste into multiple
+     *  places in the grammar.  I must compare the text of all the
+     *  predicates to truly answer whether {p1,p2} .equals {p1,p2}.
+     *  Unfortunately, I cannot rely on the AST.equals() to work properly
+     *  so I must do a brute force O(n^2) nested traversal of the Set
+     *  doing a String compare.
+     *
+     *  At this point, Labels are not compared for equals when they are
+     *  predicates, but here's the code for future use.
+     */
+    /*
+    protected boolean predicatesEquals(Set others) {
+        Iterator iter = semanticContext.iterator();
+        while (iter.hasNext()) {
+            AST predAST = (AST) iter.next();
+            Iterator inner = semanticContext.iterator();
+            while (inner.hasNext()) {
+                AST otherPredAST = (AST) inner.next();
+                if ( !predAST.getText().equals(otherPredAST.getText()) ) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+      */
+
+    public String toString() {
+        switch (label) {
+            case SET :
+                return labelSet.toString();
+            case SEMPRED :
+                return "{"+semanticContext+"}?";
+            default :
+                return String.valueOf(label);
+        }
+    }
+
+    public String toString(Grammar g) {
+        switch (label) {
+            case SET :
+                return labelSet.toString(g);
+            case SEMPRED :
+                return "{"+semanticContext+"}?";
+            default :
+                return g.getTokenDisplayName(label);
+        }
+    }
+
+    /*
+    public String predicatesToString() {
+        if ( semanticContext==NFAConfiguration.DEFAULT_CLAUSE_SEMANTIC_CONTEXT ) {
+            return "!other preds";
+        }
+        StringBuffer buf = new StringBuffer();
+        Iterator iter = semanticContext.iterator();
+        while (iter.hasNext()) {
+            AST predAST = (AST) iter.next();
+            buf.append(predAST.getText());
+            if ( iter.hasNext() ) {
+                buf.append("&");
+            }
+        }
+        return buf.toString();
+    }
+    */
+}
diff --git a/src/org/antlr/analysis/LookaheadSet.java b/src/org/antlr/analysis/LookaheadSet.java
new file mode 100644
index 0000000..8239e06
--- /dev/null
+++ b/src/org/antlr/analysis/LookaheadSet.java
@@ -0,0 +1,92 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.IntSet;
+import org.antlr.tool.Grammar;
+
+/** An LL(1) lookahead set; contains a set of token types and a "hasEOF"
+ *  condition when the set contains EOF.  Since EOF is -1 everywhere and -1
+ *  cannot be stored in my BitSet, I set a condition here.  There may be other
+ *  reasons in the future to abstract a LookaheadSet over a raw BitSet.
+ */
+public class LookaheadSet {
+	public IntSet tokenTypeSet;
+	public boolean hasEOF;
+
+	public LookaheadSet() {
+		tokenTypeSet = new IntervalSet();
+	}
+
+	public LookaheadSet(IntSet s) {
+		this();
+		tokenTypeSet.addAll(s);
+	}
+
+	public LookaheadSet(int atom) {
+		tokenTypeSet = IntervalSet.of(atom);
+	}
+
+	public void orInPlace(LookaheadSet other) {
+		this.tokenTypeSet.addAll(other.tokenTypeSet);
+		this.hasEOF = this.hasEOF || other.hasEOF;
+	}
+
+	public boolean member(int a) {
+		return tokenTypeSet.member(a);
+	}
+
+	public void remove(int a) {
+		tokenTypeSet = tokenTypeSet.subtract(IntervalSet.of(a));
+	}
+
+	public String toString(Grammar g) {
+		if ( tokenTypeSet==null ) {
+			if ( hasEOF ) {
+				return "EOF";
+			}
+			return "";
+		}
+		String r = tokenTypeSet.toString(g);
+		if ( hasEOF ) {
+			return r+"+EOF";
+		}
+		return r;
+	}
+
+	public static LookaheadSet EOF() {
+		LookaheadSet eof = new LookaheadSet();
+		eof.hasEOF = true;
+		return eof;
+	}
+
+	public String toString() {
+		return toString(null);
+	}
+}
diff --git a/src/org/antlr/analysis/NFA.java b/src/org/antlr/analysis/NFA.java
new file mode 100644
index 0000000..de79eab
--- /dev/null
+++ b/src/org/antlr/analysis/NFA.java
@@ -0,0 +1,77 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.tool.Grammar;
+import org.antlr.tool.NFAFactory;
+
+import java.util.Vector;
+
+/** An NFA (collection of NFAStates) constructed from a grammar.  This
+ *  NFA is one big machine for entire grammar.  Decision points are recorded
+ *  by the Grammar object so we can, for example, convert to DFA or simulate
+ *  the NFA (interpret a decision).
+ */
+public class NFA {
+    public static final int INVALID_ALT_NUMBER = -1;
+
+    /** This NFA represents which grammar? */
+    public Grammar grammar;
+
+    /** The NFA states in this NFA.  Maps state number to NFAState object.
+     *  This is a Vector instead of a List because I need to be able to grow
+     *  this properly.  After talking to Josh Bloch, Collections guy at Sun,
+     *  I decided this was easiest solution.
+     */
+    protected Vector numberToStateList = new Vector(1000);
+
+    /** Which factory created this NFA? */
+    protected NFAFactory factory = null;
+
+    public NFA(Grammar g) {
+        this.grammar = g;
+    }
+
+    public void addState(NFAState state) {
+        numberToStateList.setSize(state.stateNumber+1); // make sure we have room
+        numberToStateList.set(state.stateNumber, state);
+    }
+
+    public NFAState getState(int s) {
+        return (NFAState)numberToStateList.get(s);
+    }
+
+    public NFAFactory getFactory() {
+        return factory;
+    }
+
+    public void setFactory(NFAFactory factory) {
+        this.factory = factory;
+    }
+}
+
diff --git a/src/org/antlr/analysis/NFAConfiguration.java b/src/org/antlr/analysis/NFAConfiguration.java
new file mode 100644
index 0000000..fb8a4f9
--- /dev/null
+++ b/src/org/antlr/analysis/NFAConfiguration.java
@@ -0,0 +1,147 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+/** An NFA state, predicted alt, and syntactic/semantic context.
+ *  The syntactic context is a pointer into the rule invocation
+ *  chain used to arrive at the state.  The semantic context is
+ *  the unordered set semantic predicates encountered before reaching
+ *  an NFA state.
+ */
+public class NFAConfiguration {
+    /** The NFA state associated with this configuration */
+    public int state;
+
+    /** What alt is predicted by this configuration */
+    public int alt;
+
+    /** What is the stack of rule invocations that got us to state? */
+    public NFAContext context;
+
+    /** The set of semantic predicates associated with this NFA
+     *  configuration.  The predicates were found on the way to
+     *  the associated NFA state in this syntactic context.
+     *  Set<AST>: track nodes in grammar containing the predicate
+     *  for error messages and such (nice to know where the predicate
+     *  came from in case of duplicates etc...).  By using a set,
+     *  the equals() method will correctly show {pred1,pred2} as equals()
+     *  to {pred2,pred1}.
+     */
+    public SemanticContext semanticContext = SemanticContext.EMPTY_SEMANTIC_CONTEXT;
+
+    /** Indicate that this configuration has been resolved and no further
+     *  DFA processing should occur with it.  Essentially, this is used
+     *  as an "ignore" bit so that upon a set of nondeterministic configurations
+     *  such as (s|2) and (s|3), I can set (s|3) to resolved=true (and any
+     *  other configuration associated with alt 3).
+     */
+    protected boolean resolved;
+
+    /** This bit is used to indicate a semantic predicate will be
+     *  used to resolve the conflict.  Method
+     *  DFA.findNewDFAStatesAndAddDFATransitions will add edges for
+     *  the predicates after it performs the reach operation.  The
+     *  nondeterminism resolver sets this when it finds a set of
+     *  nondeterministic configurations (as it does for "resolved" field)
+     *  that have enough predicates to resolve the conflit.
+     */
+    protected boolean resolveWithPredicate;
+
+    /** Lots of NFA states have only epsilon edges (1 or 2).  We can
+     *  safely consider only n>0 during closure.
+     */
+    protected int numberEpsilonTransitionsEmanatingFromState;
+
+    /** Indicates that the NFA state associated with this configuration
+     *  has exactly one transition and it's an atom (not epsilon etc...).
+     */
+    protected boolean singleAtomTransitionEmanating;
+
+    public NFAConfiguration(int state,
+                            int alt,
+                            NFAContext context,
+                            SemanticContext semanticContext)
+    {
+        this.state = state;
+        this.alt = alt;
+        this.context = context;
+        this.semanticContext = semanticContext;
+    }
+
+    /** An NFA configuration is equal to another if both have
+     *  the same state, the predict the same alternative, and
+     *  syntactic/semantic contexts are the same.  I don't think
+     *  the state|alt|ctx could be the same and have two different
+     *  semantic contexts, but might as well define equals to be
+     *  everything.
+     */
+    public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+        NFAConfiguration other = (NFAConfiguration)o;
+        return this.state==other.state &&
+               this.alt==other.alt &&
+               this.context.equals(other.context)&&
+               this.semanticContext.equals(other.semanticContext);
+    }
+
+    public int hashCode() {
+        int h = state + alt + context.hashCode();
+        return h;
+    }
+
+	public String toString() {
+		return toString(true);
+	}
+
+	public String toString(boolean showAlt) {
+		StringBuffer buf = new StringBuffer();
+		buf.append(state);
+		if ( showAlt ) {
+			buf.append("|");
+			buf.append(alt);
+		}
+		if ( context.parent!=null ) {
+            buf.append("|");
+            buf.append(context);
+        }
+        if ( semanticContext!=null &&
+             semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
+            buf.append("|");
+            buf.append(semanticContext);
+        }
+        if ( resolved ) {
+            buf.append("|resolved");
+        }
+        if ( resolveWithPredicate ) {
+            buf.append("|resolveWithPredicate");
+        }
+        return buf.toString();
+    }
+}
diff --git a/src/org/antlr/analysis/NFAContext.java b/src/org/antlr/analysis/NFAContext.java
new file mode 100644
index 0000000..b56d9d0
--- /dev/null
+++ b/src/org/antlr/analysis/NFAContext.java
@@ -0,0 +1,285 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+/** A tree node for tracking the call chains for NFAs that invoke
+ *  other NFAs.  These trees only have to point upwards to their parents
+ *  so we can walk back up the tree (i.e., pop stuff off the stack).  We
+ *  never walk from stack down down through the children.
+ *
+ *  Each alt predicted in a decision has its own context tree,
+ *  representing all possible return nodes.  The initial stack has
+ *  EOF ("$") in it.  So, for m alternative productions, the lookahead
+ *  DFA will have m NFAContext trees.
+ *
+ *  To "push" a new context, just do "new NFAContext(context-parent, state)"
+ *  which will add itself to the parent.  The root is NFAContext(null, null).
+ *
+ *  The complete context for an NFA configuration is the set of invoking states
+ *  on the path from this node thru the parent pointers to the root.
+ */
+public class NFAContext {
+	/** This is similar to Bermudez's m constant in his LAR(m) where
+	 *  you bound the stack so your states don't explode.  The main difference
+	 *  is that I bound only recursion on the stack, not the simple stack size.
+	 *  This looser constraint will let the conversion roam further to find
+	 *  lookahead to resolve a decision.
+	 *
+	 *  Bermudez's m operates differently as it is his LR stack depth
+	 *  I'm pretty sure it therefore includes all stack symbols.  Here I
+	 *  restrict the size of an NFA configuration to be finite because a
+	 *  stack component may mention the same NFA invocation state at
+	 *  most m times.  Hence, the number of DFA states will not grow forever.
+	 *  With recursive rules like
+	 *
+	 *    e : '(' e ')' | INT ;
+	 *
+	 *  you could chase your tail forever if somebody said "s : e '.' | e ';' ;"
+	 *  This constant prevents new states from being created after a stack gets
+	 *  "too big".
+	 *
+	 *  Imagine doing a depth-first search on the DFA...as you chase an input
+	 *  sequence you can recurse to same rule such as e above.  You'd have a
+	 *  chain of ((((.  When you get do some point, you have to give up.  The
+	 *  states in the chain will have longer and longer NFA config stacks.
+	 *  Must limit size.
+	 *
+	 *  TODO: i wonder if we can recognize recursive loops and use a simple cycle?
+	 *
+	 *  max=0 implies you cannot ever jump to another rule during closure.
+	 *  max=1 implies you can make as many calls as you want--you just
+	 *        can't ever visit a state that is on your rule invocation stack.
+	 * 		  I.e., you cannot ever recurse.
+	 *  max=2 implies you are able to recurse once (i.e., call a rule twice
+	 *  	  from the same place).
+	 *
+	 *  This tracks recursion to a rule specific to an invocation site!
+	 *  It does not detect multiple calls to a rule from different rule
+	 *  invocation states.  We are guaranteed to terminate because the
+	 *  stack can only grow as big as the number of NFA states * max.
+	 *
+	 *  I noticed that the Java grammar didn't work with max=1, but did with
+	 *  max=4.  Let's set to 4. Recursion is sometimes needed to resolve some
+	 *  fixed lookahead decisions.
+	 */
+	public static int MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = 4;
+
+    public NFAContext parent;
+
+    /** The NFA state that invoked another rule's start state is recorded
+     *  on the rule invocation context stack.
+     */
+    public NFAState invokingState;
+
+    /** Computing the hashCode is very expensive and closureBusy()
+     *  uses it to track when it's seen a state|ctx before to avoid
+     *  infinite loops.  As we add new contexts, record the hash code
+     *  as this.invokingState + parent.cachedHashCode.  Avoids walking
+     *  up the tree for every hashCode().  Note that this caching works
+     *  because a context is a monotonically growing tree of context nodes
+     *  and nothing on the stack is ever modified...ctx just grows
+     *  or shrinks.
+     */
+    protected int cachedHashCode;
+
+    public NFAContext(NFAContext parent, NFAState invokingState) {
+        this.parent = parent;
+        this.invokingState = invokingState;
+        if ( invokingState!=null ) {
+            this.cachedHashCode = invokingState.stateNumber;
+        }
+        if ( parent!=null ) {
+            this.cachedHashCode += parent.cachedHashCode;
+        }
+    }
+
+	/** Two contexts are equals() if both have
+	 *  same call stack; walk upwards to the root.
+	 *  Recall that the root sentinel node has no invokingStates and no parent.
+	 *  Note that you may be comparing contexts in different alt trees.
+	 *
+	 *  The hashCode is now cheap as it's computed once upon each context
+	 *  push on the stack.  Use it to make equals() more efficient.
+	 */
+	public boolean equals(Object o) {
+		NFAContext other = ((NFAContext)o);
+		if ( this.cachedHashCode != other.cachedHashCode ) {
+			return false; // can't be same if hash is different
+		}
+		if ( this==other ) {
+			return true;
+		}
+		// System.out.println("comparing "+this+" with "+other);
+		NFAContext sp = this;
+		while ( sp.parent!=null && other.parent!=null ) {
+			if ( sp.invokingState != other.invokingState ) {
+				return false;
+			}
+			sp = sp.parent;
+			other = other.parent;
+		}
+		if ( !(sp.parent==null && other.parent==null) ) {
+			return false; // both pointers must be at their roots after walk
+		}
+		return true;
+	}
+
+	/** Two contexts conflict() if they are equals() or one is a stack suffix
+	 *  of the other.  For example, contexts [21 12 $] and [21 9 $] do not
+	 *  conflict, but [21 $] and [21 12 $] do conflict.  Note that I should
+	 *  probably not show the $ in this case.  There is a dummy node for each
+	 *  stack that just means empty; $ is a marker that's all.
+	 *
+	 *  This is used in relation to checking conflicts associated with a
+	 *  single NFA state's configurations within a single DFA state.
+	 *  If there are configurations s and t within a DFA state such that
+	 *  s.state=t.state && s.alt != t.alt && s.ctx conflicts t.ctx then
+	 *  the DFA state predicts more than a single alt--it's nondeterministic.
+	 *  Two contexts conflict if they are the same or if one is a suffix
+	 *  of the other.
+	 *
+	 *  When comparing contexts, if one context has a stack and the other
+	 *  does not then they should be considered the same context.  The only
+	 *  way for an NFA state p to have an empty context and a nonempty context
+	 *  is the case when closure falls off end of rule without a call stack
+	 *  and re-enters the rule with a context.  This resolves the issue I
+	 *  discussed with Sriram Srinivasan Feb 28, 2005 about not terminating
+	 *  fast enough upon nondeterminism.
+	 */
+	public boolean conflictsWith(NFAContext other) {
+		return this.suffix(other); // || this.equals(other);
+	}
+
+	/** [$] suffix any context
+	 *  [21 $] suffix [21 12 $]
+	 *  [21 12 $] suffix [21 $]
+	 *  [21 18 $] suffix [21 18 12 9 $]
+	 *  [21 18 12 9 $] suffix [21 18 $]
+	 *  [21 12 $] not suffix [21 9 $]
+	 *
+	 *  Example "[21 $] suffix [21 12 $]" means: rule r invoked current rule
+	 *  from state 21.  Rule s invoked rule r from state 12 which then invoked
+	 *  current rule also via state 21.  While the context prior to state 21
+	 *  is different, the fact that both contexts emanate from state 21 implies
+	 *  that they are now going to track perfectly together.  Once they
+	 *  converged on state 21, there is no way they can separate.  In other
+	 *  words, the prior stack state is not consulted when computing where to
+	 *  go in the closure operation.  ?$ and ??$ are considered the same stack.
+	 *  If ? is popped off then $ and ?$ remain; they are now an empty and
+	 *  nonempty context comparison.  So, if one stack is a suffix of
+	 *  another, then it will still degenerate to the simple empty stack
+	 *  comparison case.
+	 */
+	protected boolean suffix(NFAContext other) {
+		NFAContext sp = this;
+		// if one of the contexts is empty, it never enters loop and returns true
+		while ( sp.parent!=null && other.parent!=null ) {
+			if ( sp.invokingState != other.invokingState ) {
+				return false;
+			}
+			sp = sp.parent;
+			other = other.parent;
+		}
+		//System.out.println("suffix");
+		return true;
+	}
+
+    /** Walk upwards to the root of the call stack context looking
+     *  for a particular invoking state.
+	public boolean contains(int state) {
+        NFAContext sp = this;
+		int n = 0; // track recursive invocations of state
+		System.out.println("this.context is "+sp);
+		while ( sp.parent!=null ) {
+            if ( sp.invokingState.stateNumber == state ) {
+				return true;
+            }
+            sp = sp.parent;
+        }
+        return false;
+    }
+	 */
+
+	/** Given an NFA state number, how many times has the NFA-to-DFA
+	 *  conversion pushed that state on the stack?  In other words,
+	 *  the NFA state must be a rule invocation state and this method
+	 *  tells you how many times you've been to this state.  If none,
+	 *  then you have not called the target rule from this state before
+	 *  (though another NFA state could have called that target rule).
+	 *  If n=1, then you've been to this state before during this
+	 *  DFA construction and are going to invoke that rule again.
+	 *
+	 *  Note that many NFA states can invoke rule r, but we ignore recursion
+	 *  unless you hit the same rule invocation state again.
+	 */
+	public int recursionDepthEmanatingFromState(int state) {
+		NFAContext sp = this;
+		int n = 0; // track recursive invocations of target from this state
+		//System.out.println("this.context is "+sp);
+		while ( sp.parent!=null ) {
+			if ( sp.invokingState.stateNumber == state ) {
+				n++;
+			}
+			sp = sp.parent;
+		}
+		return n;
+	}
+
+    public int hashCode() {
+        return cachedHashCode;
+        /*
+        int h = 0;
+        NFAContext sp = this;
+        while ( sp.parent!=null ) {
+            h += sp.invokingState.getStateNumber();
+            sp = sp.parent;
+        }
+        return h;
+        */
+    }
+
+	/** A context is empty if there is no parent; meaning nobody pushed
+	 *  anything on the call stack.
+	 */
+	public boolean isEmpty() {
+		return parent==null;
+	}
+
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        NFAContext sp = this;
+        buf.append("[");
+        while ( sp.parent!=null ) {
+            buf.append(sp.invokingState.stateNumber);
+            buf.append(" ");
+            sp = sp.parent;
+        }
+        buf.append("$]");
+        return buf.toString();
+    }
+}
diff --git a/src/org/antlr/analysis/NFAConversionThread.java b/src/org/antlr/analysis/NFAConversionThread.java
new file mode 100644
index 0000000..29d2bee
--- /dev/null
+++ b/src/org/antlr/analysis/NFAConversionThread.java
@@ -0,0 +1,38 @@
+package org.antlr.analysis;
+
+import org.antlr.misc.Barrier;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.ErrorManager;
+
+/** Convert all decisions i..j inclusive in a thread */
+public class NFAConversionThread implements Runnable {
+	Grammar grammar;
+	int i, j;
+	Barrier barrier;
+	public NFAConversionThread(Grammar grammar,
+							   Barrier barrier,
+							   int i,
+							   int j)
+	{
+		this.grammar = grammar;
+		this.barrier = barrier;
+		this.i = i;
+		this.j = j;
+	}
+	public void run() {
+		for (int decision=i; decision<=j; decision++) {
+			NFAState decisionStartState = grammar.getDecisionNFAStartState(decision);
+			if ( decisionStartState.getNumberOfTransitions()>1 ) {
+				grammar.createLookaheadDFA(decision);
+			}
+		}
+		// now wait for others to finish
+		try {
+			barrier.waitForRelease();
+		}
+		catch(InterruptedException e) {
+			ErrorManager.internalError("what the hell? DFA interruptus", e);
+		}
+	}
+}
+
diff --git a/src/org/antlr/analysis/NFAState.java b/src/org/antlr/analysis/NFAState.java
new file mode 100644
index 0000000..d70c06e
--- /dev/null
+++ b/src/org/antlr/analysis/NFAState.java
@@ -0,0 +1,252 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.tool.GrammarAST;
+
+/** A state within an NFA. At most 2 transitions emanate from any NFA state. */
+public class NFAState extends State {
+	// I need to distinguish between NFA decision states for (...)* and (...)+
+	// during NFA interpretation.
+	public static final int LOOPBACK = 1;
+	public static final int BLOCK_START = 2;
+	public static final int OPTIONAL_BLOCK_START = 3;
+	public static final int BYPASS = 4;
+	public static final int RIGHT_EDGE_OF_BLOCK = 5;
+
+	public static final int MAX_TRANSITIONS = 2;
+
+	/** How many transitions; 0, 1, or 2 transitions */
+	int numTransitions = 0;
+	Transition[] transition = new Transition[MAX_TRANSITIONS];
+
+	/** Which NFA are we in? */
+	public NFA nfa = null;
+
+	/** What's its decision number from 1..n? */
+	protected int decisionNumber = 0;
+
+	/** Subrules (...)* and (...)+ have more than one decision point in
+	 *  the NFA created for them.  They both have a loop-exit-or-stay-in
+	 *  decision node (the loop back node).  They both have a normal
+	 *  alternative block decision node at the left edge.  The (...)* is
+	 *  worse as it even has a bypass decision (2 alts: stay in or bypass)
+	 *  node at the extreme left edge.  This is not how they get generated
+	 *  in code as a while-loop or whatever deals nicely with either.  For
+	 *  error messages (where I need to print the nondeterministic alts)
+	 *  and for interpretation, I need to use the single DFA that is created
+	 *  (for efficiency) but interpret the results differently depending
+	 *  on which of the 2 or 3 decision states uses the DFA.  For example,
+	 *  the DFA will always report alt n+1 as the exit branch for n real
+	 *  alts, so I need to translate that depending on the decision state.
+	 *
+	 *  If decisionNumber>0 then this var tells you what kind of decision
+	 *  state it is.
+	 */
+	public int decisionStateType;
+
+	/** What rule do we live in? */
+	protected String enclosingRule;
+
+	/** During debugging and for nondeterminism warnings, it's useful
+	 *  to know what relationship this node has to the original grammar.
+	 *  For example, "start of alt 1 of rule a".
+	 */
+	protected String description;
+
+	/** Associate this NFAState with the corresponding GrammarAST node
+	 *  from which this node was created.  This is useful not only for
+	 *  associating the eventual lookahead DFA with the associated
+	 *  Grammar position, but also for providing users with
+	 *  nondeterminism warnings.  Mainly used by decision states to
+	 *  report line:col info.  Could also be used to track line:col
+	 *  for elements such as token refs.
+	 */
+	protected GrammarAST associatedASTNode;
+
+	/** Is this state the sole target of an EOT transition? */
+	protected boolean EOTTargetState = false;
+
+	/** Jean Bovet needs in the GUI to know which state pairs correspond
+	 *  to the start/stop of a block.
+	  */
+	public int endOfBlockStateNumber = State.INVALID_STATE_NUMBER;
+
+	public NFAState(NFA nfa) {
+		this.nfa = nfa;
+	}
+
+	public int getNumberOfTransitions() {
+		return numTransitions;
+	}
+
+	public void addTransition(Transition e) {
+		if ( numTransitions>transition.length ) {
+			throw new IllegalArgumentException("You can only have "+transition.length+" transitions");
+		}
+		if ( e!=null ) {
+			transition[numTransitions] = e;
+			numTransitions++;
+		}
+	}
+
+	/** Used during optimization to reset a state to have the (single)
+	 *  transition another state has.
+	 */
+	public void setTransition0(Transition e) {
+		transition[0] = e;
+		transition[1] = null;
+		numTransitions = 1;
+	}
+
+	public Transition transition(int i) {
+		return transition[i];
+	}
+
+	/** The DFA decision for this NFA decision state always has
+	 *  an exit path for loops as n+1 for n alts in the loop.
+	 *  That is really useful for displaying nondeterministic alts
+	 *  and so on, but for walking the NFA to get a sequence of edge
+	 *  labels or for actually parsing, we need to get the real alt
+	 *  number.  The real alt number for exiting a loop is always 1
+	 *  as transition 0 points at the exit branch (we compute DFAs
+	 *  always for loops at the loopback state).
+	 *
+	 *  For walking/parsing the loopback state:
+	 * 		1 2 3 display alt (for human consumption)
+	 * 		2 3 1 walk alt
+	 *
+	 *  For walking the block start:
+	 * 		1 2 3 display alt
+	 * 		1 2 3
+	 *
+	 *  For walking the bypass state of a (...)* loop:
+	 * 		1 2 3 display alt
+	 * 		1 1 2 all block alts map to entering loop exit means take bypass
+	 *
+	 *  Non loop EBNF do not need to be translated; they are ignored by
+	 *  this method as decisionStateType==0.
+	 *
+	 *  Return same alt if we can't translate.
+	 */
+	public int translateDisplayAltToWalkAlt(DFA dfa, int displayAlt) {
+		if ( decisionNumber==0 || decisionStateType==0 ) {
+			return displayAlt;
+		}
+		int walkAlt = 0;
+		// find the NFA loopback state associated with this DFA
+		// and count number of alts (all alt numbers are computed
+		// based upon the loopback's NFA state.
+		/*
+		DFA dfa = nfa.grammar.getLookaheadDFA(decisionNumber);
+		if ( dfa==null ) {
+			ErrorManager.internalError("can't get DFA for decision "+decisionNumber);
+		}
+		*/
+		NFAState nfaStart = dfa.getNFADecisionStartState();
+		int nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(nfaStart);
+		switch ( decisionStateType ) {
+			case LOOPBACK :
+				walkAlt = displayAlt % nAlts + 1; // rotate right mod 1..3
+				break;
+			case BLOCK_START :
+			case OPTIONAL_BLOCK_START :
+				walkAlt = displayAlt; // identity transformation
+				break;
+			case BYPASS :
+				if ( displayAlt == nAlts ) {
+					walkAlt = 2; // bypass
+				}
+				else {
+					walkAlt = 1; // any non exit branch alt predicts entering
+				}
+				break;
+		}
+		return walkAlt;
+	}
+
+	// Setter/Getters
+
+	/** What AST node is associated with this NFAState?  When you
+	 *  set the AST node, I set the node to point back to this NFA state.
+	 */
+	public void setDecisionASTNode(GrammarAST decisionASTNode) {
+		decisionASTNode.setNFAStartState(this);
+		this.associatedASTNode = decisionASTNode;
+	}
+
+	public GrammarAST getAssociatedASTNode() {
+		return associatedASTNode;
+	}
+
+	 public void setAssociatedASTNode(GrammarAST ASTNode) {
+		this.associatedASTNode = ASTNode;
+	}
+
+	public String getDescription() {
+		return description;
+	}
+
+	public void setDescription(String description) {
+		this.description = description;
+	}
+
+	public int getDecisionNumber() {
+		return decisionNumber;
+	}
+
+	public void setDecisionNumber(int decisionNumber) {
+		this.decisionNumber = decisionNumber;
+	}
+
+	public void setEnclosingRuleName(String rule) {
+		this.enclosingRule = rule;
+	}
+
+	public String getEnclosingRule() {
+		return enclosingRule;
+	}
+
+	public boolean isEOTTargetState() {
+		return EOTTargetState;
+	}
+
+	public void setEOTTargetState(boolean eot) {
+		EOTTargetState = eot;
+	}
+
+	public boolean isDecisionState() {
+		return decisionStateType>0;
+	}
+
+	public String toString() {
+		return String.valueOf(stateNumber);
+	}
+
+}
+
diff --git a/src/org/antlr/analysis/NFAToDFAConverter.java b/src/org/antlr/analysis/NFAToDFAConverter.java
new file mode 100644
index 0000000..7c23b4a
--- /dev/null
+++ b/src/org/antlr/analysis/NFAToDFAConverter.java
@@ -0,0 +1,1742 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.misc.IntSet;
+import org.antlr.misc.OrderedHashSet;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+/** Code that embodies the NFA conversion to DFA. */
+public class NFAToDFAConverter {
+	/** A list of DFA states we still need to process during NFA conversion */
+	protected List work = new LinkedList();
+
+	/** While converting NFA, we must track states that
+	 *  reference other rule's NFAs so we know what to do
+	 *  at the end of a rule.  We need to know what context invoked
+	 *  this rule so we can know where to continue looking for NFA
+	 *  states.  I'm tracking a context tree (record of rule invocation
+	 *  stack trace) for each alternative that could be predicted.
+	 */
+	protected NFAContext[] contextTrees;
+
+	/** We are converting which DFA? */
+	protected DFA dfa;
+
+	public static boolean debug = false;
+
+	/** Should ANTLR launch multiple threads to convert NFAs to DFAs?
+	 *  With a 2-CPU box, I note that it's about the same single or
+	 *  multithreaded.  Both CPU meters are going even when single-threaded
+	 *  so I assume the GC is killing us.  Could be the compiler.  When I
+	 *  run java -Xint mode, I get about 15% speed improvement with multiple
+	 *  threads.
+	 */
+	public static boolean SINGLE_THREADED_NFA_CONVERSION = true;
+
+	public NFAToDFAConverter(DFA dfa) {
+		this.dfa = dfa;
+		NFAState nfaStartState = dfa.getNFADecisionStartState();
+		int nAlts = dfa.getNumberOfAlts();
+		initContextTrees(nAlts);
+	}
+
+	public void convert() {
+		dfa.conversionStartTime = System.currentTimeMillis();
+
+		// create the DFA start state
+		dfa.startState = computeStartState();
+
+		// while more DFA states to check, process them
+		while ( work.size()>0 &&
+			    !dfa.probe.analysisAborted() &&
+				!dfa.probe.nonLLStarDecision &&
+				!dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() )
+		{
+			DFAState d = (DFAState) work.get(0);
+			if ( dfa.nfa.grammar.getWatchNFAConversion() ) {
+				System.out.println("convert DFA state "+d.stateNumber+
+								   " ("+d.getNFAConfigurations().size()+" nfa states)");
+			}
+			int k = dfa.getUserMaxLookahead();
+			if ( k>0 && k==d.getLookaheadDepth() ) {
+				// we've hit max lookahead, make this a stop state
+				//System.out.println("stop state @k="+k+" (terminated early)");
+				resolveNonDeterminisms(d);
+				// Check to see if we need to add any semantic predicate transitions
+				if ( d.isResolvedWithPredicates() ) {
+					addPredicateTransitions(d);
+				}
+				else {
+					d.setAcceptState(true); // must convert to accept state at k
+				}
+			}
+			else {
+				findNewDFAStatesAndAddDFATransitions(d);
+			}
+			work.remove(0); // done with it; remove from work list
+		}
+
+		// walk all accept states and find the synpreds
+		// I used to do this in the code generator, but that is too late.
+		// This converter tries to avoid computing DFA for decisions in
+		// syntactic predicates that are not ever used such as those
+		// created by autobacktrack mode.
+		int nAlts = dfa.getNumberOfAlts();
+		for (int i=1; i<=nAlts; i++) {
+			DFAState a = dfa.getAcceptState(i);
+			if ( a!=null ) {
+				Set synpreds = a.getSyntacticPredicatesInNFAConfigurations();
+				if ( synpreds!=null ) {
+					// add all the predicates we find (should be just one, right?)
+					for (Iterator it = synpreds.iterator(); it.hasNext();) {
+						SemanticContext semctx = (SemanticContext) it.next();
+						// System.out.println("synpreds: "+semctx);
+						dfa.nfa.grammar.synPredUsedInDFA(dfa, semctx);
+					}
+				}
+			}
+		}
+
+	}
+
+	/** From this first NFA state of a decision, create a DFA.
+	 *  Walk each alt in decision and compute closure from the start of that
+	 *  rule, making sure that the closure does not include other alts within
+	 *  that same decision.  The idea is to associate a specific alt number
+	 *  with the starting closure so we can trace the alt number for all states
+	 *  derived from this.  At a stop state in the DFA, we can return this alt
+	 *  number, indicating which alt is predicted.
+	 *
+	 *  If this DFA is derived from an loop back NFA state, then the first
+	 *  transition is actually the exit branch of the loop.  Rather than make
+	 *  this alternative one, let's make this alt n+1 where n is the number of
+	 *  alts in this block.  This is nice to keep the alts of the block 1..n;
+	 *  helps with error messages.
+	 *
+	 *  I handle nongreedy in findNewDFAStatesAndAddDFATransitions
+	 *  when nongreedy and EOT transition.  Make state with EOT emanating
+	 *  from it the accept state.
+	 */
+	protected DFAState computeStartState() {
+		NFAState alt = dfa.decisionNFAStartState;
+		DFAState startState = dfa.newState();
+		int i = 0;
+		int altNum = 1;
+		while ( alt!=null ) {
+			// find the set of NFA states reachable without consuming
+			// any input symbols for each alt.  Keep adding to same
+			// overall closure that will represent the DFA start state,
+			// but track the alt number
+			NFAContext initialContext = contextTrees[i];
+			// if first alt is derived from loopback/exit branch of loop,
+			// make alt=n+1 for n alts instead of 1
+			if ( i==0 &&
+				 dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK )
+			{
+				int numAltsIncludingExitBranch = dfa.nfa.grammar
+						.getNumberOfAltsForDecisionNFA(dfa.decisionNFAStartState);
+				altNum = numAltsIncludingExitBranch;
+				closure((NFAState)alt.transition(0).target,
+						altNum,
+						initialContext,
+						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
+						startState,
+						true);
+				altNum = 1; // make next alt the first
+			}
+			else {
+				closure((NFAState)alt.transition(0).target,
+						altNum,
+						initialContext,
+						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
+						startState,
+						true);
+				altNum++;
+			}
+			i++;
+
+			// move to next alternative
+			if ( alt.transition(1)==null ) {
+				break;
+			}
+			alt = (NFAState)alt.transition(1).target;
+		}
+
+		// now DFA start state has the complete closure for the decision
+		// but we have tracked which alt is associated with which
+		// NFA states.
+		dfa.addState(startState); // make sure dfa knows about this state
+		work.add(startState);
+		return startState;
+	}
+
+	/** From this node, add a d--a-->t transition for all
+	 *  labels 'a' where t is a DFA node created
+	 *  from the set of NFA states reachable from any NFA
+	 *  state in DFA state d.
+	 */
+	protected void findNewDFAStatesAndAddDFATransitions(DFAState d) {
+		//System.out.println("work on DFA state "+d);
+		OrderedHashSet labels = d.getReachableLabels();
+		/*
+		System.out.println("reachable="+labels.toString());
+		System.out.println("|reachable|/|nfaconfigs|="+
+				labels.size()+"/"+d.getNFAConfigurations().size()+"="+
+				labels.size()/(float)d.getNFAConfigurations().size());
+		*/
+
+		// normally EOT is the "default" clause and decisions just
+		// choose that last clause when nothing else matches.  DFA conversion
+		// continues searching for a unique sequence that predicts the
+		// various alts or until it finds EOT.  So this rule
+		//
+		// DUH : ('x'|'y')* "xy!";
+		//
+		// does not need a greedy indicator.  The following rule works fine too
+		//
+		// A : ('x')+ ;
+		//
+		// When the follow branch could match what is in the loop, by default,
+		// the nondeterminism is resolved in favor of the loop.  You don't
+		// get a warning because the only way to get this condition is if
+		// the DFA conversion hits the end of the token.  In that case,
+		// we're not *sure* what will happen next, but it could be anything.
+		// Anyway, EOT is the default case which means it will never be matched
+		// as resolution goes to the lowest alt number.  Exit branches are
+		// always alt n+1 for n alts in a block.
+		//
+		// When a loop is nongreedy and we find an EOT transition, the DFA
+		// state should become an accept state, predicting exit of loop.  It's
+		// just reversing the resolution of ambiguity.
+		// TODO: should this be done in the resolveAmbig method?
+		Label EOTLabel = new Label(Label.EOT);
+		boolean containsEOT = labels.contains(EOTLabel);
+		if ( !dfa.isGreedy() && containsEOT ) {
+			convertToEOTAcceptState(d);
+			return; // no more work to do on this accept state
+		}
+
+		// if in filter mode for lexer, want to match shortest not longest
+		// string so if we see an EOT edge emanating from this state, then
+		// convert this state to an accept state.  This only counts for
+		// The Tokens rule as all other decisions must continue to look for
+		// longest match.
+		// [Taking back out a few days later on Jan 17, 2006.  This could
+		//  be an option for the future, but this was wrong soluion for
+		//  filtering.]
+		/*
+		if ( dfa.nfa.grammar.type==Grammar.LEXER && containsEOT ) {
+			String filterOption = (String)dfa.nfa.grammar.getOption("filter");
+			boolean filterMode = filterOption!=null && filterOption.equals("true");
+			if ( filterMode && d.dfa.isTokensRuleDecision() ) {
+				DFAState t = reach(d, EOTLabel);
+				if ( t.getNFAConfigurations().size()>0 ) {
+					convertToEOTAcceptState(d);
+					//System.out.println("state "+d+" has EOT target "+t.stateNumber);
+					return;
+				}
+			}
+		}
+		*/
+
+		int numberOfEdgesEmanating = 0;
+		Map targetToLabelMap = new HashMap();
+		// for each label that could possibly emanate from NFAStates of d
+		// (abort if we find any closure operation on a configuration of d
+		//  that finds multiple alts with recursion, non-LL(*), as we cannot
+		//  trust any reach operations from d since we are blind to some
+		//  paths.  Leave state a dead-end and try to resolve with preds)
+		for (int i=0; !d.abortedDueToMultipleRecursiveAlts && i<labels.size(); i++) {
+			Label label = (Label)labels.get(i);
+			DFAState t = reach(d, label);
+			if ( debug ) {
+				System.out.println("DFA state after reach "+d+"-" +
+								   label.toString(dfa.nfa.grammar)+"->"+t);
+			}
+            if ( t==null ) {
+                // nothing was reached by label due to conflict resolution
+				// EOT also seems to be in here occasionally probably due
+				// to an end-of-rule state seeing it even though we'll pop
+				// an invoking state off the state; don't bother to conflict
+				// as this labels set is a covering approximation only.
+                continue;
+			}
+			if ( t.getUniqueAlt()==NFA.INVALID_ALT_NUMBER ) {
+				// Only compute closure if a unique alt number is not known.
+				// If a unique alternative is mentioned among all NFA
+				// configurations then there is no possibility of needing to look
+				// beyond this state; also no possibility of a nondeterminism.
+				// This optimization May 22, 2006 just dropped -Xint time
+				// for analysis of Java grammar from 11.5s to 2s!  Wow.
+				closure(t);  // add any NFA states reachable via epsilon
+			}
+
+			/*
+			System.out.println("DFA state after closure "+d+"-"+
+							   label.toString(dfa.nfa.grammar)+
+							   "->"+t);
+							   */
+
+			// add if not in DFA yet even if its closure aborts due to non-LL(*);
+			// getting to the state is ok, we just can't see where to go next--it's
+			// a blind alley.
+			DFAState targetState = addDFAStateToWorkList(t);
+
+			numberOfEdgesEmanating +=
+				addTransition(d, label, targetState, targetToLabelMap);
+
+			// lookahead of target must be one larger than d's k
+			targetState.setLookaheadDepth(d.getLookaheadDepth() + 1);
+
+			// closure(t) might have aborted, but addDFAStateToWorkList will try
+			// to resolve t with predicates.  If that fails, must give an error
+			// Note: this is tested on the target of d not d.
+			if ( t.abortedDueToMultipleRecursiveAlts && !t.isResolvedWithPredicates() ) {
+				// no predicates to resolve non-LL(*) decision, report
+				t.dfa.probe.reportNonLLStarDecision(t.dfa);
+			}
+		}
+
+		//System.out.println("DFA after reach / closures:\n"+dfa);
+
+		if ( !d.isResolvedWithPredicates() && numberOfEdgesEmanating==0 ) {
+			// TODO: can fixed lookahead hit a dangling state case?
+			// TODO: yes, with left recursion
+			// TODO: alter DANGLING err template to have input to that state
+			//System.err.println("dangling state alts: "+d.getAltSet());
+			dfa.probe.reportDanglingState(d);
+			// turn off all configurations except for those associated with
+			// min alt number; somebody has to win else some input will not
+			// predict any alt.
+			int minAlt = resolveByPickingMinAlt(d, null);
+			convertToAcceptState(d, minAlt); // force it to be an accept state
+		}
+
+		// Check to see if we need to add any semantic predicate transitions
+		if ( d.isResolvedWithPredicates() ) {
+			addPredicateTransitions(d);
+		}
+	}
+
+	/** Add a transition from state d to targetState with label in normal case.
+	 *  if COLLAPSE_ALL_INCIDENT_EDGES, however, try to merge all edges from
+	 *  d to targetState; this means merging their labels.  Another optimization
+	 *  is to reduce to a single EOT edge any set of edges from d to targetState
+	 *  where there exists an EOT state.  EOT is like the wildcard so don't
+	 *  bother to test any other edges.  Example:
+	 *
+	 *  NUM_INT
+	 *    : '1'..'9' ('0'..'9')* ('l'|'L')?
+     *    | '0' ('x'|'X') ('0'..'9'|'a'..'f'|'A'..'F')+ ('l'|'L')?
+     *    | '0' ('0'..'7')* ('l'|'L')?
+	 *    ;
+	 *
+	 *  The normal decision to predict alts 1, 2, 3 is:
+	 *
+	 *  if ( (input.LA(1)>='1' && input.LA(1)<='9') ) {
+     *       alt7=1;
+     *  }
+     *  else if ( input.LA(1)=='0' ) {
+     *      if ( input.LA(2)=='X'||input.LA(2)=='x' ) {
+     *          alt7=2;
+     *      }
+     *      else if ( (input.LA(2)>='0' && input.LA(2)<='7') ) {
+     *           alt7=3;
+     *      }
+     *      else if ( input.LA(2)=='L'||input.LA(2)=='l' ) {
+     *           alt7=3;
+     *      }
+     *      else {
+     *           alt7=3;
+     *      }
+     *  }
+     *  else error
+	 *
+     *  Clearly, alt 3 is predicted with extra work since it tests 0..7
+	 *  and [lL] before finally realizing that any character is actually
+	 *  ok at k=2.
+	 *
+	 *  A better decision is as follows:
+     *
+	 *  if ( (input.LA(1)>='1' && input.LA(1)<='9') ) {
+	 *      alt7=1;
+	 *  }
+	 *  else if ( input.LA(1)=='0' ) {
+	 *      if ( input.LA(2)=='X'||input.LA(2)=='x' ) {
+	 *          alt7=2;
+	 *      }
+	 *      else {
+	 *          alt7=3;
+	 *      }
+	 *  }
+	 *
+	 *  The DFA originally has 3 edges going to the state the predicts alt 3,
+	 *  but upon seeing the EOT edge (the "else"-clause), this method
+	 *  replaces the old merged label (which would have (0..7|l|L)) with EOT.
+	 *  The code generator then leaves alt 3 predicted with a simple else-
+	 *  clause. :)
+	 *
+	 *  The only time the EOT optimization makes no sense is in the Tokens
+	 *  rule.  We want EOT to truly mean you have matched an entire token
+	 *  so don't bother actually rewinding to execute that rule unless there
+	 *  are actions in that rule.  For now, since I am not preventing
+	 *  backtracking from Tokens rule, I will simply allow the optimization.
+	 */
+	protected static int addTransition(DFAState d,
+									   Label label,
+									   DFAState targetState,
+									   Map targetToLabelMap)
+	{
+		//System.out.println(d.stateNumber+"-"+label.toString(dfa.nfa.grammar)+"->"+targetState.stateNumber);
+		int n = 0;
+		if ( DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES ) {
+			// track which targets we've hit
+			Integer tI = Utils.integer(targetState.stateNumber);
+			Transition oldTransition = (Transition)targetToLabelMap.get(tI);
+			if ( oldTransition!=null ) {
+				//System.out.println("extra transition to "+tI+" upon "+label.toString(dfa.nfa.grammar));
+				// already seen state d to target transition, just add label
+				// to old label unless EOT
+				if ( label.getAtom()==Label.EOT ) {
+					// merge with EOT means old edge can go away
+					oldTransition.label = new Label(Label.EOT);
+				}
+				else {
+					// don't add anything to EOT, it's essentially the wildcard
+					if ( oldTransition.label.getAtom()!=Label.EOT ) {
+						// ok, not EOT, add in this label to old label
+						oldTransition.label.add(label);
+					}
+					//System.out.println("label updated to be "+oldTransition.label.toString(dfa.nfa.grammar));
+				}
+			}
+			else {
+				// make a transition from d to t upon 'a'
+				n = 1;
+				label = (Label)label.clone(); // clone in case we alter later
+				int transitionIndex = d.addTransition(targetState, label);
+				Transition trans = d.getTransition(transitionIndex);
+				// track target/transition pairs
+				targetToLabelMap.put(tI, trans);
+			}
+		}
+		else {
+			n = 1;
+			d.addTransition(targetState, label);
+		}
+		return n;
+	}
+
+	/** For all NFA states (configurations) merged in d,
+	 *  compute the epsilon closure; that is, find all NFA states reachable
+	 *  from the NFA states in d via purely epsilon transitions.
+	 */
+	public void closure(DFAState d) {
+		if ( debug ) {
+			System.out.println("closure("+d+")");
+		}
+		Set configs = new HashSet();
+		// Because we are adding to the configurations in closure
+		// must clone initial list so we know when to stop doing closure
+		// TODO: expensive, try to get around this alloc / copy
+		configs.addAll(d.getNFAConfigurations());
+		// for each NFA configuration in d (abort if we detect non-LL(*) state)
+		Iterator iter = configs.iterator();
+		while (!d.abortedDueToMultipleRecursiveAlts && iter.hasNext() ) {
+			NFAConfiguration c = (NFAConfiguration)iter.next();
+			if ( c.singleAtomTransitionEmanating ) {
+				continue; // ignore NFA states w/o epsilon transitions
+			}
+			//System.out.println("go do reach for NFA state "+c.state);
+			// figure out reachable NFA states from each of d's nfa states
+			// via epsilon transitions
+			closure(dfa.nfa.getState(c.state),
+					c.alt,
+					c.context,
+					c.semanticContext,
+					d,
+					false);
+		}
+		d.closureBusy = null; // wack all that memory used during closure
+	}
+
+	/** Where can we get from NFA state p traversing only epsilon transitions?
+	 *  Add new NFA states + context to DFA state d.  Also add semantic
+	 *  predicates to semantic context if collectPredicates is set.  We only
+	 *  collect predicates at hoisting depth 0, meaning before any token/char
+	 *  have been recognized.  This corresponds, during analysis, to the
+	 *  initial DFA start state construction closure() invocation.
+	 *
+	 *  There are four cases of interest (the last being the usual transition):
+	 *
+	 *   1. Traverse an edge that takes us to the start state of another
+	 *      rule, r.  We must push this state so that if the DFA
+	 *      conversion hits the end of rule r, then it knows to continue
+	 *      the conversion at state following state that "invoked" r. By
+	 *      construction, there is a single transition emanating from a rule
+	 *      ref node.
+	 *
+	 *   2. Reach an NFA state associated with the end of a rule, r, in the
+	 *      grammar from which it was built.  We must add an implicit (i.e.,
+	 *      don't actually add an epsilon transition) epsilon transition
+	 *      from r's end state to the NFA state following the NFA state
+	 *      that transitioned to rule r's start state.  Because there are
+	 *      many states that could reach r, the context for a rule invocation
+	 *      is part of a call tree not a simple stack.  When we fall off end
+	 *      of rule, "pop" a state off the call tree and add that state's
+	 *      "following" node to d's NFA configuration list.  The context
+	 *      for this new addition will be the new "stack top" in the call tree.
+	 *
+	 *   3. Like case 2, we reach an NFA state associated with the end of a
+	 *      rule, r, in the grammar from which NFA was built.  In this case,
+	 *      however, we realize that during this NFA->DFA conversion, no state
+	 *      invoked the current rule's NFA.  There is no choice but to add
+	 *      all NFA states that follow references to r's start state.  This is
+	 *      analogous to computing the FOLLOW(r) in the LL(k) world.  By
+	 *      construction, even rule stop state has a chain of nodes emanating
+	 *      from it that points to every possible following node.  This case
+	 *      is conveniently handled then by the 4th case.
+	 *
+	 *   4. Normal case.  If p can reach another NFA state q, then add
+	 *      q to d's configuration list, copying p's context for q's context.
+	 *      If there is a semantic predicate on the transition, then AND it
+	 *      with any existing semantic context.
+	 *
+	 *   Current state p is always added to d's configuration list as it's part
+	 *   of the closure as well.
+	 *
+	 *  When is a closure operation in a cycle condition?  While it is
+	 *  very possible to have the same NFA state mentioned twice
+	 *  within the same DFA state, there are two situations that
+	 *  would lead to nontermination of closure operation:
+	 *
+	 *  o   Whenever closure reaches a configuration where the same state
+	 *      with same or a suffix context already exists.  This catches
+	 *      the IF-THEN-ELSE tail recursion cycle and things like
+	 *
+	 *      a : A a | B ;
+	 *
+	 *      the context will be $ (empty stack).
+	 *
+	 *      We have to check
+	 *      larger context stacks because of (...)+ loops.  For
+	 *      example, the context of a (...)+ can be nonempty if the
+	 *      surrounding rule is invoked by another rule:
+	 *
+	 *      a : b A | X ;
+	 *      b : (B|)+ ;  // nondeterministic by the way
+	 *
+	 *      The context of the (B|)+ loop is "invoked from item
+	 *      a : . b A ;" and then the empty alt of the loop can reach back
+	 *      to itself.  The context stack will have one "return
+	 *      address" element and so we must check for same state, same
+	 *      context for arbitrary context stacks.
+	 *
+	 *      Idea: If we've seen this configuration before during closure, stop.
+	 *      We also need to avoid reaching same state with conflicting context.
+	 *      Ultimately analysis would stop and we'd find the conflict, but we
+	 *      should stop the computation.  Previously I only checked for
+	 *      exact config.  Need to check for same state, suffix context
+	 * 		not just exact context.
+	 *
+	 *  o   Whenever closure reaches a configuration where state p
+	 *      is present in its own context stack.  This means that
+	 *      p is a rule invocation state and the target rule has
+	 *      been called before.  NFAContext.MAX_RECURSIVE_INVOCATIONS
+	 *      (See the comment there also) determines how many times
+	 *      it's possible to recurse; clearly we cannot recurse forever.
+	 *      Some grammars such as the following actually require at
+	 *      least one recursive call to correctly compute the lookahead:
+	 *
+	 *      a : L ID R
+	 *        | b
+	 *        ;
+	 *      b : ID
+	 *        | L a R
+	 *        ;
+	 *
+	 *      Input L ID R is ambiguous but to figure this out, ANTLR
+	 *      needs to go a->b->a->b to find the L ID sequence.
+	 *
+	 *      Do not allow closure to add a configuration that would
+	 *      allow too much recursion.
+	 *
+	 *      This case also catches infinite left recursion.
+	 */
+	public void closure(NFAState p,
+						int alt,
+						NFAContext context,
+						SemanticContext semanticContext,
+						DFAState d,
+						boolean collectPredicates)
+	{
+		if ( debug ){
+			System.out.println("closure at NFA state "+p.stateNumber+"|"+
+							   alt+" filling DFA state "+d.stateNumber+" with context "+context
+							   );
+		}
+
+		if ( d.abortedDueToMultipleRecursiveAlts ) {
+			// keep walking back out, we're in the process of terminating
+			// this closure operation on NFAState p contained with DFAState d
+			return;
+		}
+
+		/* NOTE SURE WE NEED THIS FAILSAFE NOW 11/8/2006 and it complicates
+		   MY ALGORITHM TO HAVE TO ABORT ENTIRE DFA CONVERSION
+		   */
+		if ( DFA.MAX_TIME_PER_DFA_CREATION>0 &&
+			 System.currentTimeMillis() - d.dfa.conversionStartTime >=
+			 DFA.MAX_TIME_PER_DFA_CREATION )
+		{
+			// report and back your way out; we've blown up somehow
+			dfa.probe.reportEarlyTermination();
+			return;
+		}
+
+		NFAConfiguration proposedNFAConfiguration =
+				new NFAConfiguration(p.stateNumber,
+						alt,
+						context,
+						semanticContext);
+
+		// Avoid infinite recursion
+		if ( closureIsBusy(d, proposedNFAConfiguration) ) {
+			if ( debug ) {
+				System.out.println("avoid visiting exact closure computation NFA config: "+proposedNFAConfiguration);
+				System.out.println("state is "+d.dfa.decisionNumber+"."+d);
+			}
+			return;
+		}
+
+		// set closure to be busy for this NFA configuration
+		d.closureBusy.add(proposedNFAConfiguration);
+
+		// p itself is always in closure
+		d.addNFAConfiguration(p, proposedNFAConfiguration);
+
+		// Case 1: are we a reference to another rule?
+		Transition transition0 = p.transition(0);
+		if ( transition0 instanceof RuleClosureTransition ) {
+			int depth = context.recursionDepthEmanatingFromState(p.stateNumber);
+			// Detect recursion by more than a single alt, which indicates
+			// that the decision's lookahead language is non-regular; terminate
+			if ( depth == 1 && d.dfa.getUserMaxLookahead()==0 ) { // k=* only
+			//if ( depth >= NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK ) {
+				d.dfa.recursiveAltSet.add(alt); // indicate that this alt is recursive
+				if ( d.dfa.recursiveAltSet.size()>1 ) {
+					//System.out.println("recursive alts: "+d.dfa.recursiveAltSet.toString());
+					d.abortedDueToMultipleRecursiveAlts = true;
+					return;
+				}
+				/*
+				System.out.println("alt "+alt+" in rule "+p.enclosingRule+" dec "+d.dfa.decisionNumber+
+					" ctx: "+context);
+				System.out.println("d="+d);
+				*/
+			}
+			// Detect an attempt to recurse too high
+			// if this context has hit the max recursions for p.stateNumber,
+			// don't allow it to enter p.stateNumber again
+			if ( depth >= NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK ) {
+				/*
+				System.out.println("OVF state "+d);
+				System.out.println("proposed "+proposedNFAConfiguration);
+				*/
+				d.dfa.probe.reportRecursiveOverflow(d, proposedNFAConfiguration);
+				d.abortedDueToRecursionOverflow = true;
+				return;
+			}
+
+			// otherwise, it's cool to (re)enter target of this rule ref
+			RuleClosureTransition ref = (RuleClosureTransition)transition0;
+			// first create a new context and push onto call tree,
+			// recording the fact that we are invoking a rule and
+			// from which state (case 2 below will get the following state
+			// via the RuleClosureTransition emanating from the invoking state
+			// pushed on the stack).
+			// Reset the context to reflect the fact we invoked rule
+			NFAContext newContext = new NFAContext(context, p);
+			// System.out.print("invoking rule "+nfa.getGrammar().getRuleName(ref.getRuleIndex()));
+			// System.out.println(" context="+context);
+			// traverse epsilon edge to new rule
+			NFAState ruleTarget = (NFAState)ref.target;
+			closure(ruleTarget, alt, newContext, semanticContext, d, collectPredicates);
+		}
+		// Case 2: end of rule state, context (i.e., an invoker) exists
+		else if ( p.isAcceptState() && context.parent!=null ) {
+			NFAState whichStateInvokedRule = context.invokingState;
+			RuleClosureTransition edgeToRule =
+				(RuleClosureTransition)whichStateInvokedRule.transition(0);
+			NFAState continueState = edgeToRule.getFollowState();
+			NFAContext newContext = context.parent; // "pop" invoking state
+			closure(continueState, alt, newContext, semanticContext, d, collectPredicates);
+		}
+		/*
+		11/27/2005: I tried adding this but it highlighted that
+		lexer rules needed to be called from Tokens not just ref'd directly
+		so their contexts are different for F : I '.' ;  I : '0' ;  otherwise
+		we get an ambiguity.  The context of state following '0' has same
+		NFA state with [6 $] and [$] hence they conflict.  We need to get
+		the other stack call in there.
+		else if ( dfa.nfa.grammar.type == Grammar.LEXER &&
+			      p.isAcceptState() &&
+			context.invokingState.enclosingRule.equals("Tokens") )
+		{
+			// hit the end of a lexer rule when no one has invoked that rule
+			// (this will be the case if Tokens rule analysis reaches the
+			// stop state of a token in its alt list).
+			// Must not follow the FOLLOW links; must return
+			return;
+		}
+		*/
+		// Case 3: end of rule state, nobody invoked this rule (no context)
+		//    Fall thru to be handled by case 4 automagically.
+		// Case 4: ordinary NFA->DFA conversion case: simple epsilon transition
+		else {
+			// recurse down any epsilon transitions
+			if ( transition0!=null && transition0.isEpsilon() ) {
+				closure((NFAState)transition0.target,
+						alt,
+						context,
+						semanticContext,
+						d,
+						collectPredicates);
+			}
+			else if ( transition0!=null && transition0.isSemanticPredicate() ) {
+				// continue closure here too, but add the sem pred to ctx
+				SemanticContext newSemanticContext = semanticContext;
+				if ( collectPredicates ) {
+					// AND the previous semantic context with new pred
+					SemanticContext labelContext =
+						transition0.label.getSemanticContext();
+					// do not hoist syn preds from other rules; only get if in
+					// starting state's rule (i.e., context is empty)
+					int walkAlt =
+						dfa.decisionNFAStartState.translateDisplayAltToWalkAlt(dfa, alt);
+					NFAState altLeftEdge =
+						dfa.nfa.grammar.getNFAStateForAltOfDecision(dfa.decisionNFAStartState,walkAlt);
+					/*
+					System.out.println("state "+p.stateNumber+" alt "+alt+" walkAlt "+walkAlt+" trans to "+transition0.target);
+					System.out.println("DFA start state "+dfa.decisionNFAStartState.stateNumber);
+					System.out.println("alt left edge "+altLeftEdge.stateNumber+
+						", epsilon target "+
+						altLeftEdge.transition(0).target.stateNumber);
+					*/
+					if ( !labelContext.isSyntacticPredicate() ||
+						 p==altLeftEdge.transition(0).target )
+					{
+						//System.out.println("&"+labelContext+" enclosingRule="+p.enclosingRule);
+						newSemanticContext =
+							SemanticContext.and(semanticContext, labelContext);
+					}
+				}
+				closure((NFAState)transition0.target,
+						alt,
+						context,
+						newSemanticContext,
+						d,
+						collectPredicates);
+			}
+			Transition transition1 = p.transition(1);
+			if ( transition1!=null && transition1.isEpsilon() ) {
+				closure((NFAState)transition1.target,
+						alt,
+						context,
+						semanticContext,
+						d,
+						collectPredicates);
+			}
+		}
+
+		// don't remove "busy" flag as we want to prevent all
+		// references to same config of state|alt|ctx|semCtx even
+		// if resulting from another NFA state
+	}
+
+	/** A closure operation should abort if that computation has already
+	 *  been done or a computation with a conflicting context has already
+	 *  been done.  If proposed NFA config's state and alt are the same
+	 *  there is potentially a problem.  If the stack context is identical
+	 *  then clearly the exact same computation is proposed.  If a context
+	 *  is a suffix of the other, then again the computation is in an
+	 *  identical context.  ?$ and ??$ are considered the same stack.
+	 *  We have to walk configurations linearly doing the comparison instead
+	 *  of a set for exact matches.
+	 *
+	 *  We cannot use a set hash table for this lookup as contexts that are
+	 *  suffixes could be !equal() but their hashCode()s would be different;
+	 *  that's a problem for a HashSet.  This costs a lot actually, it
+	 *  takes about 490ms vs 355ms for Java grammar's analysis phase when
+	 *  I moved away from hash lookup.  Argh!  Still it's small.  For newbie
+	 *  generated grammars though this really speeds things up because it
+	 *  avoids chasing its tail during closure operations on highly left-
+	 *  recursive grammars.
+	 *
+	 *  Ok, backing this out to use exact match again for speed.  We will
+	 *  always detect the conflict later when checking for context suffixes...
+	 *  I was just trying to prevent unnecessary closures for random crap
+	 *  submitted by newbies.  Instead now I check for left-recursive stuff
+	 *  and terminate before analysis obviates the need to do this more
+	 *  expensive computation.
+	 *
+	 *  If the semantic context is different, then allow new computation.
+	 */
+	public static boolean closureIsBusy(DFAState d,
+										NFAConfiguration proposedNFAConfiguration)
+	{
+		// Check epsilon cycle (same state, same alt, same context)
+		return d.closureBusy.contains(proposedNFAConfiguration);
+		/*
+		// Uncomment to get all conflicts not just exact context matches
+		for (int i = 0; i < d.closureBusy.size(); i++) {
+			NFAConfiguration c = (NFAConfiguration) d.closureBusy.get(i);
+			if ( proposedNFAConfiguration.state==c.state &&
+				 proposedNFAConfiguration.alt==c.alt &&
+				 proposedNFAConfiguration.semanticContext.equals(c.semanticContext) &&
+				 proposedNFAConfiguration.context.suffix(c.context) )
+			{
+				// if computing closure of start state, we tried to
+				// recompute a closure, must be left recursion.  We got back
+				// to the same computation.  After having consumed no input,
+				// we're back.  Only track rule invocation states
+				if ( (dfa.startState==null ||
+					  d.stateNumber==dfa.startState.stateNumber) &&
+					 p.transition(0) instanceof RuleClosureTransition )
+				{
+					d.dfa.probe.reportLeftRecursion(d, proposedNFAConfiguration);
+				}
+				return true;
+			}
+		}
+		return false;
+		*/
+	}
+
+	/** Given the set of NFA states in DFA state d, find all NFA states
+	 *  reachable traversing label arcs.  By definition, there can be
+	 *  only one DFA state reachable by an atom from DFA state d so we must
+	 *  find and merge all NFA states reachable via label.  Return a new
+	 *  DFAState that has all of those NFA states with their context (i.e.,
+	 *  which alt do they predict and where to return to if they fall off
+	 *  end of a rule).
+	 *
+	 *  Because we cannot jump to another rule nor fall off the end of a rule
+	 *  via a non-epsilon transition, NFA states reachable from d have the
+	 *  same configuration as the NFA state in d.  So if NFA state 7 in d's
+	 *  configurations can reach NFA state 13 then 13 will be added to the
+	 *  new DFAState (labelDFATarget) with the same configuration as state
+	 *  7 had.
+	 *
+	 *  This method does not see EOT transitions off the end of token rule
+	 *  accept states if the rule was invoked by somebody.
+	 */
+	public DFAState reach(DFAState d, Label label) {
+		DFAState labelDFATarget = dfa.newState();
+		// for each NFA state in d, add in target states for label
+		int intLabel = label.getAtom();
+		IntSet setLabel = label.getSet();
+		Iterator iter = d.getNFAConfigurations().iterator();
+		while ( iter.hasNext() ) {
+			NFAConfiguration c = (NFAConfiguration)iter.next();
+			if ( c.resolved || c.resolveWithPredicate ) {
+				continue; // the conflict resolver indicates we must leave alone
+			}
+			NFAState p = dfa.nfa.getState(c.state);
+			// by design of the grammar->NFA conversion, only transition 0
+			// may have a non-epsilon edge.
+			Transition edge = p.transition(0);
+			if ( edge==null || !c.singleAtomTransitionEmanating ) {
+				continue;
+			}
+			Label edgeLabel = edge.label;
+
+			// SPECIAL CASE
+			// if it's an EOT transition on end of lexer rule, but context
+			// stack is not empty, then don't see the EOT; the closure
+			// will have added in the proper states following the reference
+			// to this rule in the invoking rule.  In other words, if
+			// somebody called this rule, don't see the EOT emanating from
+			// this accept state.
+			if ( c.context.parent!=null &&
+				 edgeLabel.isAtom() &&
+				 edgeLabel.getAtom()==Label.EOT )
+			{
+				continue;
+			}
+
+			// Labels not unique at this point (not until addReachableLabels)
+			// so try simple int label match before general set intersection
+			//System.out.println("comparing "+edgeLabel+" with "+label);
+			boolean matched =
+				(!label.isSet()&&edgeLabel.getAtom()==intLabel)||
+				(!edgeLabel.getSet().and(setLabel).isNil());
+			if ( matched ) {
+				// found a transition with label;
+				// add NFA target to (potentially) new DFA state
+                labelDFATarget.addNFAConfiguration(
+					(NFAState)edge.target,
+					c.alt,
+					c.context,
+					c.semanticContext);
+			}
+		}
+        if ( labelDFATarget.getNFAConfigurations().size()==0 ) {
+            // kill; it's empty
+            dfa.setState(labelDFATarget.stateNumber, null);
+            labelDFATarget = null;
+        }
+        return labelDFATarget;
+	}
+
+	/** Walk the configurations of this DFA state d looking for the
+	 *  configuration, c, that has a transition on EOT.  State d should
+	 *  be converted to an accept state predicting the c.alt.  Blast
+	 *  d's current configuration set and make it just have config c.
+	 *
+	 *  TODO: can there be more than one config with EOT transition?
+	 *  That would mean that two NFA configurations could reach the
+	 *  end of the token with possibly different predicted alts.
+	 *  Seems like that would be rare or impossible.  Perhaps convert
+	 *  this routine to find all such configs and give error if >1.
+	 */
+	protected void convertToEOTAcceptState(DFAState d) {
+		Label eot = new Label(Label.EOT);
+		Iterator iter = d.getNFAConfigurations().iterator();
+		while ( iter.hasNext() ) {
+			NFAConfiguration c =
+					(NFAConfiguration)iter.next();
+			if ( c.resolved || c.resolveWithPredicate ) {
+				continue; // the conflict resolver indicates we must leave alone
+			}
+			NFAState p = dfa.nfa.getState(c.state);
+			Transition edge = p.transition(0);
+			Label edgeLabel = edge.label;
+			if ( edgeLabel.equals(eot) ) {
+				//System.out.println("config with EOT: "+c);
+				d.setAcceptState(true);
+				//System.out.println("d goes from "+d);
+				d.getNFAConfigurations().clear();
+				d.addNFAConfiguration(p,c.alt,c.context,c.semanticContext);
+				//System.out.println("to "+d);
+				return; // assume only one EOT transition
+			}
+		}
+	}
+
+	/** Add a new DFA state to the DFA if not already present.
+     *  If the DFA state uniquely predicts a single alternative, it
+     *  becomes a stop state; don't add to work list.  Further, if
+     *  there exists an NFA state predicted by > 1 different alternatives
+     *  and with the same syn and sem context, the DFA is nondeterministic for
+     *  at least one input sequence reaching that NFA state.
+     */
+    protected DFAState addDFAStateToWorkList(DFAState d) {
+        DFAState existingState = dfa.addState(d);
+		if ( d != existingState ) {
+			// already there...use/return the existing DFA state.
+			// But also set the states[d.stateNumber] to the existing
+			// DFA state because the closureIsBusy must report
+			// infinite recursion on a state before it knows
+			// whether or not the state will already be
+			// found after closure on it finishes.  It could be
+			// refer to a state that will ultimately not make it
+			// into the reachable state space and the error
+			// reporting must be able to compute the path from
+			// start to the error state with infinite recursion
+			dfa.setState(d.stateNumber, existingState);
+			return existingState;
+		}
+
+		// if not there, then examine new state.
+
+		// resolve syntactic conflicts by choosing a single alt or
+        // by using semantic predicates if present.
+        resolveNonDeterminisms(d);
+
+        // If deterministic, don't add this state; it's an accept state
+        // Just return as a valid DFA state
+		int alt = d.getUniquelyPredictedAlt();
+		if ( alt!=NFA.INVALID_ALT_NUMBER ) { // uniquely predicts an alt?
+			d = convertToAcceptState(d, alt);
+			/*
+			System.out.println("convert to accept; DFA "+d.dfa.decisionNumber+" state "+d.stateNumber+" uniquely predicts alt "+
+				d.getUniquelyPredictedAlt());
+				*/
+		}
+		else {
+            // unresolved, add to work list to continue NFA conversion
+            work.add(d);
+        }
+        return d;
+    }
+
+	protected DFAState convertToAcceptState(DFAState d, int alt) {
+		// only merge stop states if they are deterministic and no
+		// recursion problems and only if they have the same gated pred
+		// context!
+		// Later, the error reporting may want to trace the path from
+		// the start state to the nondet state
+		if ( DFAOptimizer.MERGE_STOP_STATES &&
+			d.getNonDeterministicAlts()==null &&
+			!d.abortedDueToRecursionOverflow &&
+			!d.abortedDueToMultipleRecursiveAlts )
+		{
+			// check to see if we already have an accept state for this alt
+			// [must do this after we resolve nondeterminisms in general]
+			DFAState acceptStateForAlt = dfa.getAcceptState(alt);
+			if ( acceptStateForAlt!=null ) {
+				// we already have an accept state for alt;
+				// Are their gate sem pred contexts the same?
+				// For now we assume a braindead version: both must not
+				// have gated preds or share exactly same single gated pred.
+				// The equals() method is only defined on Predicate contexts not
+				// OR etc...
+				SemanticContext gatedPreds = d.getGatedPredicatesInNFAConfigurations();
+				SemanticContext existingStateGatedPreds =
+					acceptStateForAlt.getGatedPredicatesInNFAConfigurations();
+				if ( (gatedPreds==null && existingStateGatedPreds==null) ||
+				     ((gatedPreds!=null && existingStateGatedPreds!=null) &&
+					  gatedPreds.equals(existingStateGatedPreds)) )
+				{
+					// make this d.statenumber point at old DFA state
+					dfa.setState(d.stateNumber, acceptStateForAlt);
+					dfa.removeState(d);    // remove this state from unique DFA state set
+					d = acceptStateForAlt; // use old accept state; throw this one out
+					return d;
+				}
+				// else consider it a new accept state; fall through.
+			}
+			d.setAcceptState(true); // new accept state for alt
+			dfa.setAcceptState(alt, d);
+			return d;
+		}
+		d.setAcceptState(true); // new accept state for alt
+		dfa.setAcceptState(alt, d);
+		return d;
+	}
+
+	/** If > 1 NFA configurations within this DFA state have identical
+	 *  NFA state and context, but differ in their predicted
+	 *  TODO update for new context suffix stuff 3-9-2005
+	 *  alternative then a single input sequence predicts multiple alts.
+	 *  The NFA decision is therefore syntactically indistinguishable
+	 *  from the left edge upon at least one input sequence.  We may
+	 *  terminate the NFA to DFA conversion for these paths since no
+	 *  paths emanating from those NFA states can possibly separate
+	 *  these conjoined twins once interwined to make things
+	 *  deterministic (unless there are semantic predicates; see below).
+	 *
+	 *  Upon a nondeterministic set of NFA configurations, we should
+	 *  report a problem to the grammar designer and resolve the issue
+	 *  by aribitrarily picking the first alternative (this usually
+	 *  ends up producing the most natural behavior).  Pick the lowest
+	 *  alt number and just turn off all NFA configurations
+	 *  associated with the other alts. Rather than remove conflicting
+	 *  NFA configurations, I set the "resolved" bit so that future
+	 *  computations will ignore them.  In this way, we maintain the
+	 *  complete DFA state with all its configurations, but prevent
+	 *  future DFA conversion operations from pursuing undesirable
+	 *  paths.  Remember that we want to terminate DFA conversion as
+	 *  soon as we know the decision is deterministic *or*
+	 *  nondeterministic.
+	 *
+	 *  [BTW, I have convinced myself that there can be at most one
+	 *  set of nondeterministic configurations in a DFA state.  Only NFA
+	 *  configurations arising from the same input sequence can appear
+	 *  in a DFA state.  There is no way to have another complete set
+	 *  of nondeterministic NFA configurations without another input
+	 *  sequence, which would reach a different DFA state.  Therefore,
+	 *  the two nondeterministic NFA configuration sets cannot collide
+	 *  in the same DFA state.]
+	 *
+	 *  Consider DFA state {(s|1),(s|2),(s|3),(t|3),(v|4)} where (s|a)
+	 *  is state 's' and alternative 'a'.  Here, configuration set
+	 *  {(s|1),(s|2),(s|3)} predicts 3 different alts.  Configurations
+	 *  (s|2) and (s|3) are "resolved", leaving {(s|1),(t|3),(v|4)} as
+	 *  items that must still be considered by the DFA conversion
+	 *  algorithm in DFA.findNewDFAStatesAndAddDFATransitions().
+	 *
+	 *  Consider the following grammar where alts 1 and 2 are no
+	 *  problem because of the 2nd lookahead symbol.  Alts 3 and 4 are
+	 *  identical and will therefore reach the rule end NFA state but
+	 *  predicting 2 different alts (no amount of future lookahead
+	 *  will render them deterministic/separable):
+	 *
+	 *  a : A B
+	 *    | A C
+	 *    | A
+	 *    | A
+	 *    ;
+	 *
+	 *  Here is a (slightly reduced) NFA of this grammar:
+	 *
+	 *  (1)-A->(2)-B->(end)-EOF->(8)
+	 *   |              ^
+	 *  (2)-A->(3)-C----|
+	 *   |              ^
+	 *  (4)-A->(5)------|
+	 *   |              ^
+	 *  (6)-A->(7)------|
+	 *
+	 *  where (n) is NFA state n.  To begin DFA conversion, the start
+	 *  state is created:
+	 *
+	 *  {(1|1),(2|2),(4|3),(6|4)}
+	 *
+	 *  Upon A, all NFA configurations lead to new NFA states yielding
+	 *  new DFA state:
+	 *
+	 *  {(2|1),(3|2),(5|3),(7|4),(end|3),(end|4)}
+	 *
+	 *  where the configurations with state end in them are added
+	 *  during the epsilon closure operation.  State end predicts both
+	 *  alts 3 and 4.  An error is reported, the latter configuration is
+	 *  flagged as resolved leaving the DFA state as:
+	 *
+	 *  {(2|1),(3|2),(5|3),(7|4|resolved),(end|3),(end|4|resolved)}
+	 *
+	 *  As NFA configurations are added to a DFA state during its
+	 *  construction, the reachable set of labels is computed.  Here
+	 *  reachable is {B,C,EOF} because there is at least one NFA state
+	 *  in the DFA state that can transition upon those symbols.
+	 *
+	 *  The final DFA looks like:
+	 *
+	 *  {(1|1),(2|2),(4|3),(6|4)}
+	 *              |
+	 *              v
+	 *  {(2|1),(3|2),(5|3),(7|4),(end|3),(end|4)} -B-> (end|1)
+	 *              |                        |
+	 *              C                        ----EOF-> (8,3)
+	 *              |
+	 *              v
+	 *           (end|2)
+	 *
+	 *  Upon AB, alt 1 is predicted.  Upon AC, alt 2 is predicted.
+	 *  Upon A EOF, alt 3 is predicted.  Alt 4 is not a viable
+	 *  alternative.
+	 *
+	 *  The algorithm is essentially to walk all the configurations
+	 *  looking for a conflict of the form (s|i) and (s|j) for i!=j.
+	 *  Use a hash table to track state+context pairs for collisions
+	 *  so that we have O(n) to walk the n configurations looking for
+	 *  a conflict.  Upon every conflict, track the alt number so
+	 *  we have a list of all nondeterministically predicted alts. Also
+	 *  track the minimum alt.  Next go back over the configurations, setting
+	 *  the "resolved" bit for any that have an alt that is a member of
+	 *  the nondeterministic set.  This will effectively remove any alts
+	 *  but the one we want from future consideration.
+	 *
+	 *  See resolveWithSemanticPredicates()
+	 *
+	 *  AMBIGUOUS TOKENS
+	 *
+	 *  With keywords and ID tokens, there is an inherit ambiguity in that
+	 *  "int" can be matched by ID also.  Each lexer rule has an EOT
+	 *  transition emanating from it which is used whenever the end of
+	 *  a rule is reached and another token rule did not invoke it.  EOT
+	 *  is the only thing that can be seen next.  If two rules are identical
+	 *  like "int" and "int" then the 2nd def is unreachable and you'll get
+	 *  a warning.  We prevent a warning though for the keyword/ID issue as
+	 *  ID is still reachable.  This can be a bit weird.  '+' rule then a
+	 *  '+'|'+=' rule will fail to match '+' for the 2nd rule.
+	 *
+	 *  If all NFA states in this DFA state are targets of EOT transitions,
+	 *  (and there is more than one state plus no unique alt is predicted)
+	 *  then DFA conversion will leave this state as a dead state as nothing
+	 *  can be reached from this state.  To resolve the ambiguity, just do
+	 *  what flex and friends do: pick the first rule (alt in this case) to
+	 *  win.  This means you should put keywords before the ID rule.
+	 *  If the DFA state has only one NFA state then there is no issue:
+	 *  it uniquely predicts one alt. :)  Problem
+	 *  states will look like this during conversion:
+	 *
+	 *  DFA 1:{9|1, 19|2, 14|3, 20|2, 23|2, 24|2, ...}-<EOT>->5:{41|3, 42|2}
+	 *
+	 *  Worse, when you have two identical literal rules, you will see 3 alts
+	 *  in the EOT state (one for ID and one each for the identical rules).
+	 */
+	public void resolveNonDeterminisms(DFAState d) {
+		if ( debug ) {
+			System.out.println("resolveNonDeterminisms "+d.toString());
+		}
+		boolean conflictingLexerRules = false;
+		Set nondeterministicAlts = d.getNonDeterministicAlts();
+		if ( debug && nondeterministicAlts!=null ) {
+			System.out.println("nondet alts="+nondeterministicAlts);
+		}
+
+		// CHECK FOR AMBIGUOUS EOT (if |allAlts|>1 and EOT state, resolve)
+		// grab any config to see if EOT state; any other configs must
+		// transition on EOT to get to this DFA state as well so all
+		// states in d must be targets of EOT.  These are the end states
+		// created in NFAFactory.build_EOFState
+		NFAConfiguration anyConfig;
+		Iterator itr = d.nfaConfigurations.iterator();
+        anyConfig = (NFAConfiguration)itr.next();
+		NFAState anyState = dfa.nfa.getState(anyConfig.state);
+		// if d is target of EOT and more than one predicted alt
+		// indicate that d is nondeterministic on all alts otherwise
+		// it looks like state has no problem
+		if ( anyState.isEOTTargetState() ) {
+			Set allAlts = d.getAltSet();
+			// is more than 1 alt predicted?
+			if ( allAlts!=null && allAlts.size()>1 ) {
+				nondeterministicAlts = allAlts;
+				// track Tokens rule issues differently than other decisions
+				if ( d.dfa.isTokensRuleDecision() ) {
+					dfa.probe.reportLexerRuleNondeterminism(d,allAlts);
+					//System.out.println("Tokens rule DFA state "+d+" nondeterministic");
+					conflictingLexerRules = true;
+				}
+			}
+		}
+
+		// if no problems return unless we aborted work on d to avoid inf recursion
+		if ( !d.abortedDueToRecursionOverflow && nondeterministicAlts==null ) {
+			return; // no problems, return
+		}
+
+		// if we're not a conflicting lexer rule and we didn't abort, report ambig
+		// We should get a report for abort so don't give another
+		if ( !d.abortedDueToRecursionOverflow && !conflictingLexerRules ) {
+			// TODO: with k=x option set, this is called twice for same state
+			dfa.probe.reportNondeterminism(d, nondeterministicAlts);
+			// TODO: how to turn off when it's only the FOLLOW that is
+			// conflicting.  This used to shut off even alts i,j < n
+			// conflict warnings. :(
+			/*
+			if ( dfa.isGreedy() ) {
+				// if nongreedy then they have said to let it fall out of loop
+				// don't report the problem
+				dfa.probe.reportNondeterminism(d);
+			}
+			else {
+				// TODO: remove when sure it's cool
+				dfa.probe.reportNondeterminism(d);
+				System.out.println("temp warning: warning suppressed for nongreedy loop");
+			}
+			*/
+		}
+
+		// ATTEMPT TO RESOLVE WITH SEMANTIC PREDICATES
+		boolean resolved =
+			tryToResolveWithSemanticPredicates(d, nondeterministicAlts);
+		if ( resolved ) {
+			d.resolvedWithPredicates = true;
+			dfa.probe.reportNondeterminismResolvedWithSemanticPredicate(d);
+			return;
+		}
+
+		// RESOLVE SYNTACTIC CONFLICT BY REMOVING ALL BUT ONE ALT
+		resolveByChoosingFirstAlt(d, nondeterministicAlts);
+
+		//System.out.println("state "+d.stateNumber+" resolved to alt "+winningAlt);
+	}
+
+	protected int resolveByChoosingFirstAlt(DFAState d, Set nondeterministicAlts) {
+		int winningAlt = 0;
+		if ( dfa.isGreedy() ) {
+			winningAlt = resolveByPickingMinAlt(d,nondeterministicAlts);
+		}
+		else {
+			// If nongreedy, the exit alt shout win, but only if it's
+			// involved in the nondeterminism!
+			/*
+			System.out.println("resolving exit alt for decision="+
+				dfa.decisionNumber+" state="+d);
+			System.out.println("nondet="+nondeterministicAlts);
+			System.out.println("exit alt "+exitAlt);
+			*/
+			int exitAlt = dfa.getNumberOfAlts();
+			if ( nondeterministicAlts.contains(Utils.integer(exitAlt)) ) {
+				// if nongreedy and exit alt is one of those nondeterministic alts
+				// predicted, resolve in favor of what follows block
+				winningAlt = resolveByPickingExitAlt(d,nondeterministicAlts);
+			}
+			else {
+				winningAlt = resolveByPickingMinAlt(d,nondeterministicAlts);
+			}
+		}
+		return winningAlt;
+	}
+
+	/** Turn off all configurations associated with the
+	 *  set of incoming nondeterministic alts except the min alt number.
+	 *  There may be many alts among the configurations but only turn off
+	 *  the ones with problems (other than the min alt of course).
+	 *
+	 *  If nondeterministicAlts is null then turn off all configs 'cept those
+	 *  associated with the minimum alt.
+	 *
+	 *  Return the min alt found.
+	 */
+	protected int resolveByPickingMinAlt(DFAState d, Set nondeterministicAlts) {
+		int min = Integer.MAX_VALUE;
+		if ( nondeterministicAlts!=null ) {
+			min = getMinAlt(nondeterministicAlts);
+		}
+		else {
+			// else walk the actual configurations to find the min
+			min = getMinAlt(d);
+		}
+
+		turnOffOtherAlts(d, min, nondeterministicAlts);
+
+		return min;
+	}
+
+	/** Resolve state d by choosing exit alt, which is same value as the
+	 *  number of alternatives.  Return that exit alt.
+	 */
+	protected int resolveByPickingExitAlt(DFAState d, Set nondeterministicAlts) {
+		int exitAlt = dfa.getNumberOfAlts();
+		turnOffOtherAlts(d, exitAlt, nondeterministicAlts);
+		return exitAlt;
+	}
+
+	/** turn off all states associated with alts other than the good one
+	 *  (as long as they are one of the nondeterministic ones)
+	 */
+	protected static void turnOffOtherAlts(DFAState d, int min, Set nondeterministicAlts) {
+		Iterator iter = d.nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			if ( configuration.alt!=min ) {
+				if ( nondeterministicAlts==null ||
+					 nondeterministicAlts.contains(Utils.integer(configuration.alt)) )
+				{
+					configuration.resolved = true;
+				}
+			}
+		}
+	}
+
+	protected static int getMinAlt(DFAState d) {
+		int min = Integer.MAX_VALUE;
+		Iterator iter = d.nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			if ( configuration.alt<min ) {
+				min = configuration.alt;
+			}
+		}
+		return min;
+	}
+
+	protected static int getMinAlt(Set nondeterministicAlts) {
+		int min = Integer.MAX_VALUE;
+		Iterator iter = nondeterministicAlts.iterator();
+		while (iter.hasNext()) {
+			Integer altI = (Integer) iter.next();
+			int alt = altI.intValue();
+			if ( alt < min ) {
+				min = alt;
+			}
+		}
+		return min;
+	}
+
+	/** See if a set of nondeterministic alternatives can be disambiguated
+	 *  with the semantic predicate contexts of the alternatives.
+	 *
+	 *  Without semantic predicates, syntactic conflicts are resolved
+	 *  by simply choosing the first viable alternative.  In the
+	 *  presence of semantic predicates, you can resolve the issue by
+	 *  evaluating boolean expressions at run time.  During analysis,
+	 *  this amounts to suppressing grammar error messages to the
+	 *  developer.  NFA configurations are always marked as "to be
+	 *  resolved with predicates" so that
+	 *  DFA.findNewDFAStatesAndAddDFATransitions() will know to ignore
+	 *  these configurations and add predicate transitions to the DFA
+	 *  after adding token/char labels.
+	 *
+	 *  During analysis, we can simply make sure that for n
+	 *  ambiguously predicted alternatives there are at least n-1
+	 *  unique predicate sets.  The nth alternative can be predicted
+	 *  with "not" the "or" of all other predicates.  NFA configurations without
+	 *  predicates are assumed to have the default predicate of
+	 *  "true" from a user point of view.  When true is combined via || with
+	 *  another predicate, the predicate is a tautology and must be removed
+	 *  from consideration for disambiguation:
+	 *
+	 *  a : b | B ; // hoisting p1||true out of rule b, yields no predicate
+	 *  b : {p1}? B | B ;
+	 *
+	 *  This is done down in getPredicatesPerNonDeterministicAlt().
+	 */
+	protected boolean tryToResolveWithSemanticPredicates(DFAState d,
+														 Set nondeterministicAlts)
+	{
+		Map altToPredMap =
+				getPredicatesPerNonDeterministicAlt(d, nondeterministicAlts);
+
+		if ( altToPredMap.size()==0 ) {
+			return false;
+		}
+
+		//System.out.println("nondeterministic alts with predicates: "+altToPredMap);
+		dfa.probe.reportAltPredicateContext(d, altToPredMap);
+
+		if ( nondeterministicAlts.size()-altToPredMap.size()>1 ) {
+			// too few predicates to resolve; just return
+			// TODO: actually do we need to gen error here?
+			return false;
+		}
+
+		// Handle case where 1 predicate is missing
+		// Case 1. Semantic predicates
+		// If the missing pred is on nth alt, !(union of other preds)==true
+		// so we can avoid that computation.  If naked alt is ith, then must
+		// test it with !(union) since semantic predicated alts are order
+		// independent
+		// Case 2: Syntactic predicates
+		// The naked alt is always assumed to be true as the order of
+		// alts is the order of precedence.  The naked alt will be a tautology
+		// anyway as it's !(union of other preds).  This implies
+		// that there is no such thing as noviable alt for synpred edges
+		// emanating from a DFA state.
+		if ( altToPredMap.size()==nondeterministicAlts.size()-1 ) {
+			// if there are n-1 predicates for n nondeterministic alts, can fix
+			org.antlr.misc.BitSet ndSet = org.antlr.misc.BitSet.of(nondeterministicAlts);
+			org.antlr.misc.BitSet predSet = org.antlr.misc.BitSet.of(altToPredMap);
+			int nakedAlt = ndSet.subtract(predSet).getSingleElement();
+			SemanticContext nakedAltPred = null;
+			if ( nakedAlt == max(nondeterministicAlts) ) {
+				// the naked alt is the last nondet alt and will be the default clause
+				nakedAltPred = new SemanticContext.TruePredicate();
+			}
+			else {
+				// pretend naked alternative is covered with !(union other preds)
+				// unless it's a synpred since those have precedence same
+				// as alt order
+				SemanticContext unionOfPredicatesFromAllAlts =
+					getUnionOfPredicates(altToPredMap);
+				//System.out.println("all predicates "+unionOfPredicatesFromAllAlts);
+				if ( unionOfPredicatesFromAllAlts.isSyntacticPredicate() ) {
+					nakedAltPred = new SemanticContext.TruePredicate();
+				}
+				else {
+					nakedAltPred =
+						SemanticContext.not(unionOfPredicatesFromAllAlts);
+				}
+			}
+
+			//System.out.println("covering naked alt="+nakedAlt+" with "+nakedAltPred);
+
+			altToPredMap.put(Utils.integer(nakedAlt), nakedAltPred);
+			// set all config with alt=nakedAlt to have the computed predicate
+			Iterator iter = d.nfaConfigurations.iterator();
+			NFAConfiguration configuration;
+			while (iter.hasNext()) {
+				configuration = (NFAConfiguration) iter.next();
+				if ( configuration.alt == nakedAlt ) {
+					configuration.semanticContext = nakedAltPred;
+				}
+			}
+		}
+
+		if ( altToPredMap.size()==nondeterministicAlts.size() ) {
+			// RESOLVE CONFLICT by picking one NFA configuration for each alt
+			// and setting its resolvedWithPredicate flag
+			// First, prevent a recursion warning on this state due to
+			// pred resolution
+			if ( d.abortedDueToRecursionOverflow ) {
+				d.dfa.probe.removeRecursiveOverflowState(d);
+			}
+			Iterator iter = d.nfaConfigurations.iterator();
+			NFAConfiguration configuration;
+			while (iter.hasNext()) {
+				configuration = (NFAConfiguration) iter.next();
+				SemanticContext semCtx = (SemanticContext)
+						altToPredMap.get(Utils.integer(configuration.alt));
+				if ( semCtx!=null ) {
+					// resolve (first found) with pred
+					// and remove alt from problem list
+					configuration.resolveWithPredicate = true;
+					configuration.semanticContext = semCtx; // reset to combined
+					altToPredMap.remove(Utils.integer(configuration.alt));
+					// notify grammar that we've used the preds contained in semCtx
+					if ( semCtx.isSyntacticPredicate() ) {
+						dfa.nfa.grammar.synPredUsedInDFA(dfa, semCtx);
+					}
+				}
+				else if ( nondeterministicAlts.contains(Utils.integer(configuration.alt)) ) {
+					// resolve all configurations for nondeterministic alts
+					// for which there is no predicate context by turning it off
+					configuration.resolved = true;
+				}
+			}
+			return true;
+		}
+
+		return false;  // couldn't fix the problem with predicates
+	}
+
+	/** Return a mapping from nondeterministc alt to combined list of predicates.
+	 *  If both (s|i|semCtx1) and (t|i|semCtx2) exist, then the proper predicate
+	 *  for alt i is semCtx1||semCtx2 because you have arrived at this single
+	 *  DFA state via two NFA paths, both of which have semantic predicates.
+	 *  We ignore deterministic alts because syntax alone is sufficient
+	 *  to predict those.  Do not include their predicates.
+	 *
+	 *  Alts with no predicate are assumed to have {true}? pred.
+	 *
+	 *  When combining via || with "true", all predicates are removed from
+	 *  consideration since the expression will always be true and hence
+	 *  not tell us how to resolve anything.  So, if any NFA configuration
+	 *  in this DFA state does not have a semantic context, the alt cannot
+	 *  be resolved with a predicate.
+	 */
+	protected Map getPredicatesPerNonDeterministicAlt(DFAState d,
+													  Set nondeterministicAlts)
+	{
+		// map alt to combined SemanticContext
+		Map altToPredicateContextMap = new HashMap();
+		// init the alt to predicate set map
+		Map altToSetOfContextsMap = new HashMap();
+		for (Iterator it = nondeterministicAlts.iterator(); it.hasNext();) {
+			Integer altI = (Integer) it.next();
+			altToSetOfContextsMap.put(altI, new HashSet());
+		}
+		Set altToIncompletePredicateContextSet = new HashSet();
+		Iterator iter = d.nfaConfigurations.iterator();
+		NFAConfiguration configuration;
+		// for each configuration, create a unique set of predicates
+		// Also, track the alts with at least one uncovered configuration
+		// (one w/o a predicate); tracks tautologies like p1||true
+		while (iter.hasNext()) {
+			configuration = (NFAConfiguration) iter.next();
+			Integer altI = Utils.integer(configuration.alt);
+			// if alt is nondeterministic, combine its predicates
+			if ( nondeterministicAlts.contains(altI) ) {
+				// if there is a predicate for this NFA configuration, OR in
+				if ( configuration.semanticContext !=
+					 SemanticContext.EMPTY_SEMANTIC_CONTEXT )
+				{
+					/*
+					SemanticContext altsExistingPred =(SemanticContext)
+							altToPredicateContextMap.get(Utils.integer(configuration.alt));
+					if ( altsExistingPred!=null ) {
+						// must merge all predicates from configs with same alt
+						SemanticContext combinedContext =
+								SemanticContext.or(
+										altsExistingPred,
+										configuration.semanticContext);
+						System.out.println(altsExistingPred+" OR "+
+										   configuration.semanticContext+
+										   "="+combinedContext);
+						altToPredicateContextMap.put(
+								Utils.integer(configuration.alt),
+								combinedContext
+						);
+					}
+					else {
+						// not seen before, just add it
+						altToPredicateContextMap.put(
+								Utils.integer(configuration.alt),
+								configuration.semanticContext
+						);
+					}
+					*/
+					Set predSet = (Set)altToSetOfContextsMap.get(altI);
+					predSet.add(configuration.semanticContext);
+				}
+				else {
+					// if no predicate, but it's part of nondeterministic alt
+					// then at least one path exists not covered by a predicate.
+					// must remove predicate for this alt; track incomplete alts
+					altToIncompletePredicateContextSet.add(altI);
+				}
+			}
+		}
+
+		// For each alt, OR together all unique predicates associated with
+		// all configurations
+		// Also, track the list of incompletely covered alts: those alts
+		// with at least 1 predicate and at least one configuration w/o a
+		// predicate. We want this in order to report to the decision probe.
+		List incompletelyCoveredAlts = new ArrayList();
+		for (Iterator it = nondeterministicAlts.iterator(); it.hasNext();) {
+			Integer altI = (Integer) it.next();
+			Set predSet = (Set)altToSetOfContextsMap.get(altI);
+			if ( altToIncompletePredicateContextSet.contains(altI) ) {
+				SemanticContext insufficientPred =(SemanticContext)
+						altToPredicateContextMap.get(altI);
+				if ( predSet.size()>0 ) {
+					incompletelyCoveredAlts.add(altI);
+				}
+				continue;
+			}
+			SemanticContext combinedContext = null;
+			for (Iterator itrSet = predSet.iterator(); itrSet.hasNext();) {
+				SemanticContext ctx = (SemanticContext) itrSet.next();
+				combinedContext =
+						SemanticContext.or(combinedContext,ctx);
+			}
+			altToPredicateContextMap.put(altI, combinedContext);
+		}
+
+		// remove any predicates from incompletely covered alts
+		/*
+		iter = altToIncompletePredicateContextSet.iterator();
+		List incompletelyCoveredAlts = new ArrayList();
+		while (iter.hasNext()) {
+			Integer alt = (Integer) iter.next();
+			SemanticContext insufficientPred =(SemanticContext)
+					altToPredicateContextMap.get(alt);
+			if ( insufficientPred!=null ) {
+				incompletelyCoveredAlts.add(alt);
+			}
+			altToPredicateContextMap.remove(alt);
+		}
+		*/
+
+		if ( incompletelyCoveredAlts.size()>0 ) {
+			dfa.probe.reportIncompletelyCoveredAlts(d,
+													incompletelyCoveredAlts);
+		}
+
+		return altToPredicateContextMap;
+	}
+
+	/** OR together all predicates from the alts.  Note that the predicate
+	 *  for an alt could itself be a combination of predicates.
+	 */
+	protected static SemanticContext getUnionOfPredicates(Map altToPredMap) {
+		Iterator iter;
+		SemanticContext unionOfPredicatesFromAllAlts = null;
+		iter = altToPredMap.values().iterator();
+		while ( iter.hasNext() ) {
+			SemanticContext semCtx = (SemanticContext)iter.next();
+			if ( unionOfPredicatesFromAllAlts==null ) {
+				unionOfPredicatesFromAllAlts = semCtx;
+			}
+			else {
+				unionOfPredicatesFromAllAlts =
+						SemanticContext.or(unionOfPredicatesFromAllAlts,semCtx);
+			}
+		}
+		return unionOfPredicatesFromAllAlts;
+	}
+
+	/** for each NFA config in d, look for "predicate required" sign set
+	 *  during nondeterminism resolution.
+	 *
+	 *  Add the predicate edges sorted by the alternative number; I'm fairly
+	 *  sure that I could walk the configs backwards so they are added to
+	 *  the predDFATarget in the right order, but it's best to make sure.
+	 *  Predicates succeed in the order they are specifed.  Alt i wins
+	 *  over alt i+1 if both predicates are true.
+	 */
+	protected void addPredicateTransitions(DFAState d) {
+		List configsWithPreds = new ArrayList();
+		// get a list of all configs with predicates
+		Iterator iter = d.getNFAConfigurations().iterator();
+		while ( iter.hasNext() ) {
+			NFAConfiguration c = (NFAConfiguration)iter.next();
+			if ( c.resolveWithPredicate ) {
+				configsWithPreds.add(c);
+			}
+		}
+		// Sort ascending according to alt; alt i has higher precedence than i+1
+		Collections.sort(configsWithPreds,
+			 new Comparator() {
+				 public int compare(Object a, Object b) {
+					 NFAConfiguration ca = (NFAConfiguration)a;
+					 NFAConfiguration cb = (NFAConfiguration)b;
+					 if ( ca.alt < cb.alt ) return -1;
+					 else if ( ca.alt > cb.alt ) return 1;
+					 return 0;
+				 }
+			 });
+		List predConfigsSortedByAlt = configsWithPreds;
+		// Now, we can add edges emanating from d for these preds in right order
+		for (int i = 0; i < predConfigsSortedByAlt.size(); i++) {
+			NFAConfiguration c = (NFAConfiguration)predConfigsSortedByAlt.get(i);
+			DFAState predDFATarget = d.dfa.getAcceptState(c.alt);
+			if ( predDFATarget==null ) {
+				predDFATarget = dfa.newState(); // create if not there.
+				// create a new DFA state that is a target of the predicate from d
+				predDFATarget.addNFAConfiguration(dfa.nfa.getState(c.state),
+												  c.alt,
+												  c.context,
+												  c.semanticContext);
+				predDFATarget.setAcceptState(true);
+				DFAState existingState = dfa.addState(predDFATarget);
+				if ( predDFATarget != existingState ) {
+					// already there...use/return the existing DFA state that
+					// is a target of this predicate.  Make this state number
+					// point at the existing state
+					dfa.setState(predDFATarget.stateNumber, existingState);
+					predDFATarget = existingState;
+				}
+			}
+			// add a transition to pred target from d
+			d.addTransition(predDFATarget, new Label(c.semanticContext));
+		}
+	}
+
+	protected void initContextTrees(int numberOfAlts) {
+        contextTrees = new NFAContext[numberOfAlts];
+        for (int i = 0; i < contextTrees.length; i++) {
+            int alt = i+1;
+            // add a dummy root node so that an NFA configuration can
+            // always point at an NFAContext.  If a context refers to this
+            // node then it implies there is no call stack for
+            // that configuration
+            contextTrees[i] = new NFAContext(null, null);
+        }
+    }
+
+	public static int max(Set s) {
+		if ( s==null ) {
+			return Integer.MIN_VALUE;
+		}
+		int i = 0;
+		int m = 0;
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			i++;
+			Integer I = (Integer) it.next();
+			if ( i==1 ) { // init m with first value
+				m = I.intValue();
+				continue;
+			}
+			if ( I.intValue()>m ) {
+				m = I.intValue();
+			}
+		}
+		return m;
+	}
+}
diff --git a/src/org/antlr/analysis/RuleClosureTransition.java b/src/org/antlr/analysis/RuleClosureTransition.java
new file mode 100644
index 0000000..2edc164
--- /dev/null
+++ b/src/org/antlr/analysis/RuleClosureTransition.java
@@ -0,0 +1,60 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+/** A transition used to reference another rule.  It tracks two targets
+ *  really: the actual transition target and the state following the
+ *  state that refers to the other rule.  Conversion of an NFA that
+ *  falls off the end of a rule will be able to figure out who invoked
+ *  that rule because of these special transitions.
+ */
+public class RuleClosureTransition extends Transition {
+    /** Index of rule targeted by this transition */
+    protected int ruleIndex;
+
+    /** What node to begin computations following ref to rule */
+    protected NFAState followState;
+
+    public RuleClosureTransition(int ruleIndex,
+                             NFAState ruleStart,
+                             NFAState followState)
+    {
+        super(Label.EPSILON, ruleStart);
+        this.ruleIndex = ruleIndex;
+        this.followState = followState;
+    }
+
+    public NFAState getFollowState() {
+        return followState;
+    }
+
+    public int getRuleIndex() {
+        return ruleIndex;
+    }
+}
+
diff --git a/src/org/antlr/analysis/SemanticContext.java b/src/org/antlr/analysis/SemanticContext.java
new file mode 100644
index 0000000..4468c17
--- /dev/null
+++ b/src/org/antlr/analysis/SemanticContext.java
@@ -0,0 +1,482 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.tool.ANTLRParser;
+import org.antlr.tool.GrammarAST;
+import org.antlr.tool.Grammar;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Iterator;
+
+/** A binary tree structure used to record the semantic context in which
+ *  an NFA configuration is valid.  It's either a single predicate or
+ *  a tree representing an operation tree such as: p1&&p2 or p1||p2.
+ *
+ *  For NFA o-p1->o-p2->o, create tree AND(p1,p2).
+ *  For NFA (1)-p1->(2)
+ *           |       ^
+ *           |       |
+ *          (3)-p2----
+ *  we will have to combine p1 and p2 into DFA state as we will be
+ *  adding NFA configurations for state 2 with two predicates p1,p2.
+ *  So, set context for combined NFA config for state 2: OR(p1,p2).
+ *
+ *  I have scoped the AND, NOT, OR, and Predicate subclasses of
+ *  SemanticContext within the scope of this outer class.
+ *
+ *  July 7, 2006: TJP altered OR to be set of operands. the Binary tree
+ *  made it really hard to reduce complicated || sequences to their minimum.
+ *  Got huge repeated || conditions.
+ */
+public abstract class SemanticContext {
+	/** Create a default value for the semantic context shared among all
+	 *  NFAConfigurations that do not have an actual semantic context.
+	 *  This prevents lots of if!=null type checks all over; it represents
+	 *  just an empty set of predicates.
+	 */
+	public static final SemanticContext EMPTY_SEMANTIC_CONTEXT = new Predicate();
+
+	/** Given a semantic context expression tree, return a tree with all
+	 *  nongated predicates set to true and then reduced.  So p&&(q||r) would
+	 *  return p&&r if q is nongated but p and r are gated.
+	 */
+	public abstract SemanticContext getGatedPredicateContext();
+
+	/** Generate an expression that will evaluate the semantic context,
+	 *  given a set of output templates.
+	 */
+	public abstract StringTemplate genExpr(CodeGenerator generator,
+										   StringTemplateGroup templates,
+										   DFA dfa);
+
+	public abstract boolean isSyntacticPredicate();
+
+	/** Notify the indicated grammar of any syn preds used within this context */
+	public void trackUseOfSyntacticPredicates(Grammar g) {
+	}
+
+	public static class Predicate extends SemanticContext {
+		/** The AST node in tree created from the grammar holding the predicate */
+		protected GrammarAST predicate;
+
+		/** Is this a {...}?=> gating predicate or a normal disambiguating {..}?
+		 *  If any predicate in expression is gated, then expression is considered
+		 *  gated.
+		 *
+		 *  The simple Predicate object's predicate AST's type is used to set
+		 *  gated to true if type==GATED_SEMPRED.
+		 */
+		protected boolean gated = false;
+
+		/** syntactic predicates are converted to semantic predicates
+		 *  but synpreds are generated slightly differently.
+		 */
+		protected boolean synpred = false;
+
+		public static final int INVALID_PRED_VALUE = -1;
+		public static final int FALSE_PRED = 0;
+		public static final int TRUE_PRED = 1;
+
+		/** sometimes predicates are known to be true or false; we need
+		 *  a way to represent this without resorting to a target language
+		 *  value like true or TRUE.
+		 */
+		protected int constantValue = INVALID_PRED_VALUE;
+
+		public Predicate() {
+			predicate = new GrammarAST();
+			this.gated=false;
+		}
+
+		public Predicate(GrammarAST predicate) {
+			this.predicate = predicate;
+			this.gated =
+				predicate.getType()==ANTLRParser.GATED_SEMPRED ||
+				predicate.getType()==ANTLRParser.SYN_SEMPRED ;
+			this.synpred =
+				predicate.getType()==ANTLRParser.SYN_SEMPRED ||
+				predicate.getType()==ANTLRParser.BACKTRACK_SEMPRED;
+		}
+
+		public Predicate(Predicate p) {
+			this.predicate = p.predicate;
+			this.gated = p.gated;
+			this.synpred = p.synpred;
+			this.constantValue = p.constantValue;
+		}
+
+		/** Two predicates are the same if they are literally the same
+		 *  text rather than same node in the grammar's AST.
+		 *  Or, if they have the same constant value, return equal.
+		 *  As of July 2006 I'm not sure these are needed.
+		 */
+		public boolean equals(Object o) {
+			if ( !(o instanceof Predicate) ) {
+				return false;
+			}
+			return predicate.getText().equals(((Predicate)o).predicate.getText());
+		}
+
+		public int hashCode() {
+			if ( predicate==null ) {
+				return 0;
+			}
+			return predicate.getText().hashCode();
+		}
+
+		public StringTemplate genExpr(CodeGenerator generator,
+									  StringTemplateGroup templates,
+									  DFA dfa)
+		{
+			StringTemplate eST = null;
+			if ( templates!=null ) {
+				if ( synpred ) {
+					eST = templates.getInstanceOf("evalSynPredicate");
+				}
+				else {
+					eST = templates.getInstanceOf("evalPredicate");
+					generator.grammar.decisionsWhoseDFAsUsesSemPreds.add(dfa);
+				}
+				String predEnclosingRuleName = predicate.getEnclosingRule();
+				/*
+				String decisionEnclosingRuleName =
+					dfa.getNFADecisionStartState().getEnclosingRule();
+				// if these rulenames are diff, then pred was hoisted out of rule
+				// Currently I don't warn you about this as it could be annoying.
+				// I do the translation anyway.
+				*/
+				//eST.setAttribute("pred", this.toString());
+				if ( generator!=null ) {
+					eST.setAttribute("pred",
+									 generator.translateAction(predEnclosingRuleName,predicate));
+				}
+			}
+			else {
+				eST = new StringTemplate("$pred$");
+				eST.setAttribute("pred", this.toString());
+				return eST;
+			}
+			if ( generator!=null ) {
+				String description =
+					generator.target.getTargetStringLiteralFromString(this.toString());
+				eST.setAttribute("description", description);
+			}
+			return eST;
+		}
+
+		public SemanticContext getGatedPredicateContext() {
+			if ( gated ) {
+				return this;
+			}
+			return null;
+		}
+
+		public boolean isSyntacticPredicate() {
+			return predicate!=null &&
+				( predicate.getType()==ANTLRParser.SYN_SEMPRED ||
+				  predicate.getType()==ANTLRParser.BACKTRACK_SEMPRED );
+		}
+
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			if ( synpred ) {
+				g.synPredNamesUsedInDFA.add(predicate.getText());
+			}
+		}
+
+		public String toString() {
+			if ( predicate==null ) {
+				return "<nopred>";
+			}
+			return predicate.getText();
+		}
+	}
+
+	public static class TruePredicate extends Predicate {
+		public TruePredicate() {
+			super();
+			this.constantValue = TRUE_PRED;
+		}
+
+		public StringTemplate genExpr(CodeGenerator generator,
+									  StringTemplateGroup templates,
+									  DFA dfa)
+		{
+			if ( templates!=null ) {
+				return templates.getInstanceOf("true");
+			}
+			return new StringTemplate("true");
+		}
+
+		public String toString() {
+			return "true"; // not used for code gen, just DOT and print outs
+		}
+	}
+
+	/*
+	public static class FalsePredicate extends Predicate {
+		public FalsePredicate() {
+			super();
+			this.constantValue = FALSE_PRED;
+		}
+		public StringTemplate genExpr(CodeGenerator generator,
+									  StringTemplateGroup templates,
+									  DFA dfa)
+		{
+			if ( templates!=null ) {
+				return templates.getInstanceOf("false");
+			}
+			return new StringTemplate("false");
+		}
+		public String toString() {
+			return "false"; // not used for code gen, just DOT and print outs
+		}
+	}
+	*/
+
+	public static class AND extends SemanticContext {
+		protected SemanticContext left,right;
+		public AND(SemanticContext a, SemanticContext b) {
+			this.left = a;
+			this.right = b;
+		}
+		public StringTemplate genExpr(CodeGenerator generator,
+									  StringTemplateGroup templates,
+									  DFA dfa)
+		{
+			StringTemplate eST = null;
+			if ( templates!=null ) {
+				eST = templates.getInstanceOf("andPredicates");
+			}
+			else {
+				eST = new StringTemplate("($left$&&$right$)");
+			}
+			eST.setAttribute("left", left.genExpr(generator,templates,dfa));
+			eST.setAttribute("right", right.genExpr(generator,templates,dfa));
+			return eST;
+		}
+		public SemanticContext getGatedPredicateContext() {
+			SemanticContext gatedLeft = left.getGatedPredicateContext();
+			SemanticContext gatedRight = right.getGatedPredicateContext();
+			if ( gatedLeft==null ) {
+				return gatedRight;
+			}
+			if ( gatedRight==null ) {
+				return gatedLeft;
+			}
+			return new AND(gatedLeft, gatedRight);
+		}
+		public boolean isSyntacticPredicate() {
+			return left.isSyntacticPredicate()||right.isSyntacticPredicate();
+		}
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			left.trackUseOfSyntacticPredicates(g);
+			right.trackUseOfSyntacticPredicates(g);
+		}
+		public String toString() {
+			return "("+left+"&&"+right+")";
+		}
+	}
+
+	public static class OR extends SemanticContext {
+		protected Set operands;
+		public OR(SemanticContext a, SemanticContext b) {
+			operands = new HashSet();
+			if ( a instanceof OR ) {
+				operands.addAll(((OR)a).operands);
+			}
+			else if ( a!=null ) {
+				operands.add(a);
+			}
+			if ( b instanceof OR ) {
+				operands.addAll(((OR)b).operands);
+			}
+			else if ( b!=null ) {
+				operands.add(b);
+			}
+		}
+		public StringTemplate genExpr(CodeGenerator generator,
+									  StringTemplateGroup templates,
+									  DFA dfa)
+		{
+			StringTemplate eST = null;
+			if ( templates!=null ) {
+				eST = templates.getInstanceOf("orPredicates");
+			}
+			else {
+				eST = new StringTemplate("($first(operands)$$rest(operands):{o | ||$o$}$)");
+			}
+			for (Iterator it = operands.iterator(); it.hasNext();) {
+				SemanticContext semctx = (SemanticContext) it.next();
+				eST.setAttribute("operands", semctx.genExpr(generator,templates,dfa));
+			}
+			return eST;
+		}
+		public SemanticContext getGatedPredicateContext() {
+			SemanticContext result = null;
+			for (Iterator it = operands.iterator(); it.hasNext();) {
+				SemanticContext semctx = (SemanticContext) it.next();
+				SemanticContext gatedPred = semctx.getGatedPredicateContext();
+				if ( gatedPred!=null ) {
+					result = or(result, gatedPred);
+					// result = new OR(result, gatedPred);
+				}
+			}
+			return result;
+		}
+		public boolean isSyntacticPredicate() {
+			for (Iterator it = operands.iterator(); it.hasNext();) {
+				SemanticContext semctx = (SemanticContext) it.next();
+				if ( semctx.isSyntacticPredicate() ) {
+					return true;
+				}
+			}
+			return false;
+		}
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			for (Iterator it = operands.iterator(); it.hasNext();) {
+				SemanticContext semctx = (SemanticContext) it.next();
+				semctx.trackUseOfSyntacticPredicates(g);
+			}
+		}
+		public String toString() {
+			StringBuffer buf = new StringBuffer();
+			buf.append("(");
+			int i = 0;
+			for (Iterator it = operands.iterator(); it.hasNext();) {
+				SemanticContext semctx = (SemanticContext) it.next();
+				if ( i>0 ) {
+					buf.append("||");
+				}
+				buf.append(semctx.toString());
+				i++;
+			}
+			buf.append(")");
+			return buf.toString();
+		}
+	}
+
+	public static class NOT extends SemanticContext {
+		protected SemanticContext ctx;
+		public NOT(SemanticContext ctx) {
+			this.ctx = ctx;
+		}
+		public StringTemplate genExpr(CodeGenerator generator,
+									  StringTemplateGroup templates,
+									  DFA dfa)
+		{
+			StringTemplate eST = null;
+			if ( templates!=null ) {
+				eST = templates.getInstanceOf("notPredicate");
+			}
+			else {
+				eST = new StringTemplate("?!($pred$)");
+			}
+			eST.setAttribute("pred", ctx.genExpr(generator,templates,dfa));
+			return eST;
+		}
+		public SemanticContext getGatedPredicateContext() {
+			SemanticContext p = ctx.getGatedPredicateContext();
+			if ( p==null ) {
+				return null;
+			}
+			return new NOT(p);
+		}
+		public boolean isSyntacticPredicate() {
+			return ctx.isSyntacticPredicate();
+		}
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			ctx.trackUseOfSyntacticPredicates(g);
+		}
+
+		public boolean equals(Object object) {
+			if ( !(object instanceof NOT) ) {
+				return false;
+			}
+			return this.ctx.equals(((NOT)object).ctx);
+		}
+
+		public String toString() {
+			return "!("+ctx+")";
+		}
+	}
+
+	public static SemanticContext and(SemanticContext a, SemanticContext b) {
+		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null ) {
+			return b;
+		}
+		if ( b==EMPTY_SEMANTIC_CONTEXT || b==null ) {
+			return a;
+		}
+		if ( a.equals(b) ) {
+			return a; // if same, just return left one
+		}
+		return new AND(a,b);
+	}
+
+	public static SemanticContext or(SemanticContext a, SemanticContext b) {
+		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null ) {
+			return b;
+		}
+		if ( b==EMPTY_SEMANTIC_CONTEXT || b==null ) {
+			return a;
+		}
+		if ( a instanceof TruePredicate ) {
+			return a;
+		}
+		if ( b instanceof TruePredicate ) {
+			return b;
+		}
+		if ( a instanceof NOT && b instanceof Predicate ) {
+			NOT n = (NOT)a;
+			// check for !p||p
+			if ( n.ctx.equals(b) ) {
+				return new TruePredicate();
+			}
+		}
+		else if ( b instanceof NOT && a instanceof Predicate ) {
+			NOT n = (NOT)b;
+			// check for p||!p
+			if ( n.ctx.equals(a) ) {
+				return new TruePredicate();
+			}
+		}
+		else if ( a.equals(b) ) {
+			return a;
+		}
+		return new OR(a,b);
+	}
+
+	public static SemanticContext not(SemanticContext a) {
+		return new NOT(a);
+	}
+
+}
diff --git a/src/org/antlr/analysis/State.java b/src/org/antlr/analysis/State.java
new file mode 100644
index 0000000..9c56124
--- /dev/null
+++ b/src/org/antlr/analysis/State.java
@@ -0,0 +1,54 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+/** A generic state machine state. */
+public abstract class State {
+    public static final int INVALID_STATE_NUMBER = -1;
+
+    public int stateNumber = INVALID_STATE_NUMBER;
+
+    /** An accept state is an end of rule state for lexers and
+     *  parser grammar rules.
+	 */
+	protected boolean acceptState = false;
+
+    public abstract int getNumberOfTransitions();
+
+    public abstract void addTransition(Transition e);
+
+    public abstract Transition transition(int i);
+
+	public boolean isAcceptState() {
+		return acceptState;
+	}
+
+	public void setAcceptState(boolean acceptState) {
+		this.acceptState = acceptState;
+	}
+}
diff --git a/src/org/antlr/analysis/StateCluster.java b/src/org/antlr/analysis/StateCluster.java
new file mode 100644
index 0000000..c31e9e2
--- /dev/null
+++ b/src/org/antlr/analysis/StateCluster.java
@@ -0,0 +1,41 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+/** A Cluster object points to the left/right (start and end) states of a
+ *  state machine.  Used to build NFAs.
+ */
+public class StateCluster {
+    public NFAState left;
+    public NFAState right;
+
+    public StateCluster(NFAState left, NFAState right) {
+        this.left = left;
+        this.right = right;
+    }
+}
diff --git a/src/org/antlr/analysis/Transition.java b/src/org/antlr/analysis/Transition.java
new file mode 100644
index 0000000..041f2e5
--- /dev/null
+++ b/src/org/antlr/analysis/Transition.java
@@ -0,0 +1,80 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+/** A generic transition between any two state machine states.  It defines
+ *  some special labels that indicate things like epsilon transitions and
+ *  that the label is actually a set of labels or a semantic predicate.
+ *  This is a one way link.  It emanates from a state (usually via a list of
+ *  transitions) and has a label/target pair.  I have abstracted the notion
+ *  of a Label to handle the various kinds of things it can be.
+ */
+public class Transition implements Comparable {
+    /** What label must be consumed to transition to target */
+    public Label label;
+
+    /** The target of this transition */
+    public State target;
+
+    public Transition(Label label, State target) {
+        this.label = label;
+        this.target = target;
+    }
+
+    public Transition(int label, State target) {
+        this.label = new Label(label);
+        this.target = target;
+    }
+
+    public boolean isEpsilon() {
+        return label.isEpsilon();
+    }
+
+    public boolean isSemanticPredicate() {
+        return label.isSemanticPredicate();
+    }
+
+    public int hashCode() {
+        return label.hashCode() + target.stateNumber;
+    }
+
+    public boolean equals(Object o) {
+        Transition other = (Transition)o;
+        return this.label.equals(other.label) &&
+               this.target.equals(other.target);
+    }
+
+    public int compareTo(Object o) {
+        Transition other = (Transition)o;
+        return this.label.compareTo(other.label);
+    }
+
+    public String toString() {
+        return label+"->"+target.stateNumber;
+    }
+}
diff --git a/src/org/antlr/codegen/ACyclicDFACodeGenerator.java b/src/org/antlr/codegen/ACyclicDFACodeGenerator.java
new file mode 100644
index 0000000..ee58c9a
--- /dev/null
+++ b/src/org/antlr/codegen/ACyclicDFACodeGenerator.java
@@ -0,0 +1,186 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.Utils;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+
+import java.util.List;
+
+public class ACyclicDFACodeGenerator {
+	protected CodeGenerator parentGenerator;
+
+	public ACyclicDFACodeGenerator(CodeGenerator parent) {
+		this.parentGenerator = parent;
+	}
+
+	public StringTemplate genFixedLookaheadDecision(StringTemplateGroup templates,
+													DFA dfa)
+	{
+		return walkFixedDFAGeneratingStateMachine(templates, dfa, dfa.startState, 1);
+	}
+
+	protected StringTemplate walkFixedDFAGeneratingStateMachine(
+			StringTemplateGroup templates,
+			DFA dfa,
+			DFAState s,
+			int k)
+	{
+		if ( s.isAcceptState() ) {
+			StringTemplate dfaST = templates.getInstanceOf("dfaAcceptState");
+			dfaST.setAttribute("alt", Utils.integer(s.getUniquelyPredictedAlt()));
+			return dfaST;
+		}
+
+		// the default templates for generating a state and its edges
+		// can be an if-then-else structure or a switch
+		String dfaStateName = "dfaState";
+		String dfaLoopbackStateName = "dfaLoopbackState";
+		String dfaOptionalBlockStateName = "dfaOptionalBlockState";
+		String dfaEdgeName = "dfaEdge";
+		if ( parentGenerator.canGenerateSwitch(s) ) {
+			dfaStateName = "dfaStateSwitch";
+			dfaLoopbackStateName = "dfaLoopbackStateSwitch";
+			dfaOptionalBlockStateName = "dfaOptionalBlockStateSwitch";
+			dfaEdgeName = "dfaEdgeSwitch";
+		}
+
+		StringTemplate dfaST = templates.getInstanceOf(dfaStateName);
+		if ( dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK ) {
+			dfaST = templates.getInstanceOf(dfaLoopbackStateName);
+		}
+		else if ( dfa.getNFADecisionStartState().decisionStateType==NFAState.OPTIONAL_BLOCK_START ) {
+			dfaST = templates.getInstanceOf(dfaOptionalBlockStateName);
+		}
+		dfaST.setAttribute("k", Utils.integer(k));
+		dfaST.setAttribute("stateNumber", Utils.integer(s.stateNumber));
+		dfaST.setAttribute("semPredState",
+							Boolean.valueOf(s.isResolvedWithPredicates()));
+		String description = dfa.getNFADecisionStartState().getDescription();
+		description = parentGenerator.target.getTargetStringLiteralFromString(description);
+		//System.out.println("DFA: "+description+" associated with AST "+decisionASTNode);
+		if ( description!=null ) {
+			dfaST.setAttribute("description", description);
+		}
+		int EOTPredicts = NFA.INVALID_ALT_NUMBER;
+		DFAState EOTTarget = null;
+		//System.out.println("DFA state "+s.stateNumber);
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) s.transition(i);
+			//System.out.println("edge label "+edge.label.toString());
+			if ( edge.label.getAtom()==Label.EOT ) {
+				// don't generate a real edge for EOT; track alt EOT predicts
+				// generate that prediction in the else clause as default case
+				EOTTarget = (DFAState)edge.target;
+				EOTPredicts = EOTTarget.getUniquelyPredictedAlt();
+				/*
+				System.out.println("DFA s"+s.stateNumber+" EOT goes to s"+
+								   edge.target.stateNumber+" predicates alt "+
+								   EOTPredicts);
+				*/
+				continue;
+			}
+			StringTemplate edgeST = templates.getInstanceOf(dfaEdgeName);
+			// If the template wants all the label values delineated, do that
+			if ( edgeST.getFormalArgument("labels")!=null ) {
+				List labels = edge.label.getSet().toList();
+				for (int j = 0; j < labels.size(); j++) {
+					Integer vI = (Integer) labels.get(j);
+					String label =
+						parentGenerator.getTokenTypeAsTargetLabel(vI.intValue());
+					labels.set(j, label); // rewrite List element to be name
+				}
+				edgeST.setAttribute("labels", labels);
+			}
+			else { // else create an expression to evaluate (the general case)
+				edgeST.setAttribute("labelExpr",
+								parentGenerator.genLabelExpr(templates,edge,k));
+			}
+
+			// stick in any gated predicates for any edge if not already a pred
+			if ( !edge.label.isSemanticPredicate() ) {
+				DFAState target = (DFAState)edge.target;
+				SemanticContext preds =
+					target.getGatedPredicatesInNFAConfigurations();
+				if ( preds!=null ) {
+					//System.out.println("preds="+target.getGatedPredicatesInNFAConfigurations());
+					StringTemplate predST = preds.genExpr(parentGenerator,
+														  parentGenerator.getTemplates(),
+														  dfa);
+					edgeST.setAttribute("predicates", predST.toString());
+				}
+			}
+
+			StringTemplate targetST =
+				walkFixedDFAGeneratingStateMachine(templates,
+												   dfa,
+												   (DFAState)edge.target,
+												   k+1);
+			edgeST.setAttribute("targetState", targetST);
+			dfaST.setAttribute("edges", edgeST);
+			/*
+			System.out.println("back to DFA "+
+							   dfa.decisionNumber+"."+s.stateNumber);
+							   */
+		}
+
+		// HANDLE EOT EDGE
+		if ( EOTPredicts!=NFA.INVALID_ALT_NUMBER ) {
+			// EOT unique predicts an alt
+			dfaST.setAttribute("eotPredictsAlt", Utils.integer(EOTPredicts));
+		}
+		else if ( EOTTarget!=null && EOTTarget.getNumberOfTransitions()>0 ) {
+			// EOT state has transitions so must split on predicates.
+			// Generate predicate else-if clauses and then generate
+			// NoViableAlt exception as else clause.
+			// Note: these predicates emanate from the EOT target state
+			// rather than the current DFAState s so the error message
+			// might be slightly misleading if you are looking at the
+			// state number.  Predicates emanating from EOT targets are
+			// hoisted up to the state that has the EOT edge.
+			for (int i = 0; i < EOTTarget.getNumberOfTransitions(); i++) {
+				Transition predEdge = (Transition)EOTTarget.transition(i);
+				StringTemplate edgeST = templates.getInstanceOf(dfaEdgeName);
+				edgeST.setAttribute("labelExpr",
+							parentGenerator.genSemanticPredicateExpr(templates,predEdge));
+				// the target must be an accept state
+				StringTemplate targetST =
+					walkFixedDFAGeneratingStateMachine(templates,
+													   dfa,
+													   (DFAState)predEdge.target,
+													   k+1);
+				edgeST.setAttribute("targetState", targetST);
+				dfaST.setAttribute("edges", edgeST);
+			}
+		}
+		return dfaST;
+	}
+}
+
diff --git a/src/org/antlr/codegen/ANTLRTokenTypes.txt b/src/org/antlr/codegen/ANTLRTokenTypes.txt
new file mode 100644
index 0000000..27eaa78
--- /dev/null
+++ b/src/org/antlr/codegen/ANTLRTokenTypes.txt
@@ -0,0 +1,95 @@
+// $ANTLR 2.7.7 (2006-01-29): antlr.g -> ANTLRTokenTypes.txt$
+ANTLR    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
diff --git a/src/org/antlr/codegen/ActionTranslator.g b/src/org/antlr/codegen/ActionTranslator.g
new file mode 100644
index 0000000..e9ccf3f
--- /dev/null
+++ b/src/org/antlr/codegen/ActionTranslator.g
@@ -0,0 +1,818 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+lexer grammar ActionTranslator;
+options {
+  filter=true;  // try all non-fragment rules in order specified
+  // output=template;  TODO: can we make tokens return templates somehow?
+}
+
+ at header {
+package org.antlr.codegen;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.runtime.*;
+import org.antlr.tool.*;
+}
+
+ at members {
+public List chunks = new ArrayList();
+Rule enclosingRule;
+int outerAltNum;
+Grammar grammar;
+CodeGenerator generator;
+antlr.Token actionToken;
+
+	public ActionTranslatorLexer(CodeGenerator generator,
+								 String ruleName,
+								 GrammarAST actionAST)
+	{
+		this(new ANTLRStringStream(actionAST.token.getText()));
+		this.generator = generator;
+		this.grammar = generator.grammar;
+	    this.enclosingRule = grammar.getRule(ruleName);
+	    this.actionToken = actionAST.token;
+	    this.outerAltNum = actionAST.outerAltNum;
+	}
+
+	public ActionTranslatorLexer(CodeGenerator generator,
+								 String ruleName,
+								 antlr.Token actionToken,
+								 int outerAltNum)
+	{
+		this(new ANTLRStringStream(actionToken.getText()));
+		this.generator = generator;
+		grammar = generator.grammar;
+	    this.enclosingRule = grammar.getRule(ruleName);
+	    this.actionToken = actionToken;
+		this.outerAltNum = outerAltNum;
+	}
+	// BACKWARD COMPATIBILITY UNTIL REGENERATING WITH 3.0b7
+	public Token emit(int tokenType,
+					  int line, int charPosition,
+					  int channel,
+					  int start, int stop)
+	{
+		Token t = new CommonToken(input, tokenType, channel, start, stop);
+		t.setLine(line);
+		t.setText(text);
+		t.setCharPositionInLine(charPosition);
+		emit(t);
+		return t;
+	}
+
+/*
+public ActionTranslatorLexer(CharStream input, CodeGenerator generator,
+                             Grammar grammar, Rule enclosingRule,
+                             antlr.Token actionToken, int outerAltNum)
+{
+    this(input);
+    this.grammar = grammar;
+    this.generator = generator;
+    this.enclosingRule = enclosingRule;
+    this.actionToken = actionToken;
+    this.outerAltNum = outerAltNum;
+}
+*/
+
+/** Return a list of strings and StringTemplate objects that
+ *  represent the translated action.
+ */
+public List translateToChunks() {
+	// System.out.println("###\naction="+action);
+	Token t;
+	do {
+		t = nextToken();
+	} while ( t.getType()!= Token.EOF );
+	return chunks;
+}
+
+public String translate() {
+	List theChunks = translateToChunks();
+	//System.out.println("chunks="+a.chunks);
+	StringBuffer buf = new StringBuffer();
+	for (int i = 0; i < theChunks.size(); i++) {
+		Object o = (Object) theChunks.get(i);
+		buf.append(o);
+	}
+	//System.out.println("translated: "+buf.toString());
+	return buf.toString();
+}
+
+public List translateAction(String action) {
+    ActionTranslatorLexer translator =
+        new ActionTranslatorLexer(generator,
+                                  enclosingRule.name,
+                                  new antlr.CommonToken(ANTLRParser.ACTION,action),outerAltNum);
+    return translator.translateToChunks();
+}
+
+public boolean isTokenRefInAlt(String id) {
+    return enclosingRule.getTokenRefsInAlt(id, outerAltNum)!=null;
+}
+public boolean isRuleRefInAlt(String id) {
+    return enclosingRule.getRuleRefsInAlt(id, outerAltNum)!=null;
+}
+public Grammar.LabelElementPair getElementLabel(String id) {
+    return enclosingRule.getLabel(id);
+}
+
+public void checkElementRefUniqueness(String ref, boolean isToken) {
+		List refs = null;
+		if ( isToken ) {
+		    refs = enclosingRule.getTokenRefsInAlt(ref, outerAltNum);
+		}
+		else {
+		    refs = enclosingRule.getRuleRefsInAlt(ref, outerAltNum);
+		}
+		if ( refs!=null && refs.size()>1 ) {
+			ErrorManager.grammarError(ErrorManager.MSG_NONUNIQUE_REF,
+									  grammar,
+									  actionToken,
+									  ref);
+		}
+}
+
+/** For \$rulelabel.name, return the Attribute found for name.  It
+ *  will be a predefined property or a return value.
+ */
+public Attribute getRuleLabelAttribute(String ruleName, String attrName) {
+	Rule r = grammar.getRule(ruleName);
+	AttributeScope scope = r.getLocalAttributeScope(attrName);
+	if ( scope!=null && !scope.isParameterScope ) {
+		return scope.getAttribute(attrName);
+	}
+	return null;
+}
+
+AttributeScope resolveDynamicScope(String scopeName) {
+	if ( grammar.getGlobalScope(scopeName)!=null ) {
+		return grammar.getGlobalScope(scopeName);
+	}
+	Rule scopeRule = grammar.getRule(scopeName);
+	if ( scopeRule!=null ) {
+		return scopeRule.ruleScope;
+	}
+	return null; // not a valid dynamic scope
+}
+
+protected StringTemplate template(String name) {
+	StringTemplate st = generator.getTemplates().getInstanceOf(name);
+	chunks.add(st);
+	return st;
+}
+
+
+}
+
+/**	$x.y	x is enclosing rule, y is a return value, parameter, or
+ * 			predefined property.
+ *
+ * 			r[int i] returns [int j]
+ * 				:	{$r.i, $r.j, $r.start, $r.stop, $r.st, $r.tree}
+ * 				;
+ */
+SET_ENCLOSING_RULE_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+							{enclosingRule!=null &&
+	                         $x.text.equals(enclosingRule.name) &&
+	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
+		//{System.out.println("found \$rule.attr");}
+		{
+		StringTemplate st = null;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
+		if ( scope.isPredefinedRuleScope ) {
+			if ( $y.text.equals("st") || $y.text.equals("tree") ) {
+				st = template("ruleSetPropertyRef_"+$y.text);
+				grammar.referenceRuleLabelPredefinedAttribute($x.text);
+				st.setAttribute("scope", $x.text);
+				st.setAttribute("attr", $y.text);
+				st.setAttribute("expr", translateAction($expr.text));
+			} else {
+				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+										  grammar,
+										  actionToken,
+										  $x.text,
+										  $y.text);
+			}
+		}
+	    else if ( scope.isPredefinedLexerRuleScope ) {
+	    	// this is a better message to emit than the previous one...
+			ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+									  grammar,
+									  actionToken,
+									  $x.text,
+									  $y.text);
+	    }
+		else if ( scope.isParameterScope ) {
+			st = template("parameterSetAttributeRef");
+			st.setAttribute("attr", scope.getAttribute($y.text));
+			st.setAttribute("expr", translateAction($expr.text));
+		}
+		else { // must be return value
+			st = template("returnSetAttributeRef");
+			st.setAttribute("ruleDescriptor", enclosingRule);
+			st.setAttribute("attr", scope.getAttribute($y.text));
+			st.setAttribute("expr", translateAction($expr.text));
+		}
+		}
+	;
+ENCLOSING_RULE_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID	{enclosingRule!=null &&
+	                         $x.text.equals(enclosingRule.name) &&
+	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
+		//{System.out.println("found \$rule.attr");}
+		{
+		StringTemplate st = null;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
+		if ( scope.isPredefinedRuleScope ) {
+			st = template("rulePropertyRef_"+$y.text);
+			grammar.referenceRuleLabelPredefinedAttribute($x.text);
+			st.setAttribute("scope", $x.text);
+			st.setAttribute("attr", $y.text);
+		}
+	    else if ( scope.isPredefinedLexerRuleScope ) {
+	    	// perhaps not the most precise error message to use, but...
+			ErrorManager.grammarError(ErrorManager.MSG_RULE_HAS_NO_ARGS,
+									  grammar,
+									  actionToken,
+									  $x.text);
+	    }
+		else if ( scope.isParameterScope ) {
+			st = template("parameterAttributeRef");
+			st.setAttribute("attr", scope.getAttribute($y.text));
+		}
+		else { // must be return value
+			st = template("returnAttributeRef");
+			st.setAttribute("ruleDescriptor", enclosingRule);
+			st.setAttribute("attr", scope.getAttribute($y.text));
+		}
+		}
+	;
+
+/** Setting $tokenlabel.attr or $tokenref.attr where attr is predefined property of a token is an error. */
+SET_TOKEN_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID WS? '='
+							 {enclosingRule!=null && input.LA(1)!='=' &&
+	                         (enclosingRule.getTokenLabel($x.text)!=null||
+	                          isTokenRefInAlt($x.text)) &&
+	                         AttributeScope.tokenScope.getAttribute($y.text)!=null}?
+		//{System.out.println("found \$tokenlabel.attr or \$tokenref.attr");}
+		{
+		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+								  grammar,
+								  actionToken,
+								  $x.text,
+								  $y.text);
+		}
+	;
+
+/** $tokenlabel.attr or $tokenref.attr where attr is predefined property of a token.
+ *  If in lexer grammar, only translate for strings and tokens (rule refs)
+ */
+TOKEN_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID	{enclosingRule!=null &&
+	                         (enclosingRule.getTokenLabel($x.text)!=null||
+	                          isTokenRefInAlt($x.text)) &&
+	                         AttributeScope.tokenScope.getAttribute($y.text)!=null &&
+	                         (grammar.type!=Grammar.LEXER ||
+	                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||
+	                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.STRING_LITERAL)}?
+		// {System.out.println("found \$tokenlabel.attr or \$tokenref.attr");}
+		{
+		String label = $x.text;
+		if ( enclosingRule.getTokenLabel($x.text)==null ) {
+			// \$tokenref.attr  gotta get old label or compute new one
+			checkElementRefUniqueness($x.text, true);
+			label = enclosingRule.getElementLabel($x.text, outerAltNum, generator);
+			if ( label==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+										  grammar,
+										  actionToken,
+										  "\$"+$x.text+"."+$y.text);
+				label = $x.text;
+			}
+		}
+		StringTemplate st = template("tokenLabelPropertyRef_"+$y.text);
+		st.setAttribute("scope", label);
+		st.setAttribute("attr", AttributeScope.tokenScope.getAttribute($y.text));
+		}
+	;
+
+/** Setting $rulelabel.attr or $ruleref.attr where attr is a predefined property is an error
+ *  This must also fail, if we try to access a local attribute's field, like $tree.scope = localObject
+ *  That must be handled by LOCAL_ATTR below. ANTLR only concerns itself with the top-level scope
+ *  attributes declared in scope {} or parameters, return values and the like.
+ */
+SET_RULE_SCOPE_ATTR
+ at init {
+Grammar.LabelElementPair pair=null;
+String refdRuleName=null;
+}
+	:	'$' x=ID '.' y=ID WS? '=' {enclosingRule!=null && input.LA(1)!='='}?
+		{
+		pair = enclosingRule.getRuleLabel($x.text);
+		refdRuleName = $x.text;
+		if ( pair!=null ) {
+			refdRuleName = pair.referencedRuleName;
+		}
+		}
+		// supercomplicated because I can't exec the above action.
+		// This asserts that if it's a label or a ref to a rule proceed but only if the attribute
+		// is valid for that rule's scope
+		{(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&
+	      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null}?
+		//{System.out.println("found set \$rulelabel.attr or \$ruleref.attr: "+$x.text+"."+$y.text);}
+		{
+		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+								  grammar,
+								  actionToken,
+								  $x.text,
+								  $y.text);
+		}
+	;
+
+/** $rulelabel.attr or $ruleref.attr where attr is a predefined property*/
+RULE_SCOPE_ATTR
+ at init {
+Grammar.LabelElementPair pair=null;
+String refdRuleName=null;
+}
+	:	'$' x=ID '.' y=ID {enclosingRule!=null}?
+		{
+		pair = enclosingRule.getRuleLabel($x.text);
+		refdRuleName = $x.text;
+		if ( pair!=null ) {
+			refdRuleName = pair.referencedRuleName;
+		}
+		}
+		// supercomplicated because I can't exec the above action.
+		// This asserts that if it's a label or a ref to a rule proceed but only if the attribute
+		// is valid for that rule's scope
+		{(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&
+	      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null}?
+		//{System.out.println("found \$rulelabel.attr or \$ruleref.attr: "+$x.text+"."+$y.text);}
+		{
+		String label = $x.text;
+		if ( pair==null ) {
+			// \$ruleref.attr  gotta get old label or compute new one
+			checkElementRefUniqueness($x.text, false);
+			label = enclosingRule.getElementLabel($x.text, outerAltNum, generator);
+			if ( label==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+										  grammar,
+										  actionToken,
+										  "\$"+$x.text+"."+$y.text);
+				label = $x.text;
+			}
+		}
+		StringTemplate st;
+		Rule refdRule = grammar.getRule(refdRuleName);
+		AttributeScope scope = refdRule.getLocalAttributeScope($y.text);
+		if ( scope.isPredefinedRuleScope ) {
+			st = template("ruleLabelPropertyRef_"+$y.text);
+			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+			st.setAttribute("scope", label);
+			st.setAttribute("attr", $y.text);
+		}
+		else if ( scope.isPredefinedLexerRuleScope ) {
+			st = template("lexerRuleLabelPropertyRef_"+$y.text);
+			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+			st.setAttribute("scope", label);
+			st.setAttribute("attr", $y.text);
+		}
+		else if ( scope.isParameterScope ) {
+			// TODO: error!
+		}
+		else {
+			st = template("ruleLabelRef");
+			st.setAttribute("referencedRule", refdRule);
+			st.setAttribute("scope", label);
+			st.setAttribute("attr", scope.getAttribute($y.text));
+		}
+		}
+	;
+
+
+/** $label	either a token label or token/rule list label like label+=expr */
+LABEL_REF
+	:	'$' ID {enclosingRule!=null &&
+	            getElementLabel($ID.text)!=null &&
+		        enclosingRule.getRuleLabel($ID.text)==null}?
+		// {System.out.println("found \$label");}
+		{
+		StringTemplate st;
+		Grammar.LabelElementPair pair = getElementLabel($ID.text);
+		if ( pair.type==Grammar.TOKEN_LABEL ||
+              pair.type==Grammar.CHAR_LABEL )
+        {
+			st = template("tokenLabelRef");
+		}
+		else {
+			st = template("listLabelRef");
+		}
+		st.setAttribute("label", $ID.text);
+		}
+	;
+
+/** $tokenref in a non-lexer grammar */
+ISOLATED_TOKEN_REF
+	:	'$' ID	{grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt($ID.text)}?
+		//{System.out.println("found \$tokenref");}
+		{
+		String label = enclosingRule.getElementLabel($ID.text, outerAltNum, generator);
+		checkElementRefUniqueness($ID.text, true);
+		if ( label==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+									  grammar,
+									  actionToken,
+									  $ID.text);
+		}
+		else {
+			StringTemplate st = template("tokenLabelRef");
+			st.setAttribute("label", label);
+		}
+		}
+	;
+
+/** $lexerruleref from within the lexer */
+ISOLATED_LEXER_RULE_REF
+	:	'$' ID	{grammar.type==Grammar.LEXER &&
+	             enclosingRule!=null &&
+	             isRuleRefInAlt($ID.text)}?
+		//{System.out.println("found \$lexerruleref");}
+		{
+		String label = enclosingRule.getElementLabel($ID.text, outerAltNum, generator);
+		checkElementRefUniqueness($ID.text, false);
+		if ( label==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+									  grammar,
+									  actionToken,
+									  $ID.text);
+		}
+		else {
+			StringTemplate st = template("lexerRuleLabel");
+			st.setAttribute("label", label);
+		}
+		}
+	;
+
+/**  $y 	return value, parameter, predefined rule property, or token/rule
+ *          reference within enclosing rule's outermost alt.
+ *          y must be a "local" reference; i.e., it must be referring to
+ *          something defined within the enclosing rule.
+ *
+ * 			r[int i] returns [int j]
+ * 				:	{$i, $j, $start, $stop, $st, $tree}
+ *              ;
+ *
+ *	TODO: this might get the dynamic scope's elements too.!!!!!!!!!
+ */
+SET_LOCAL_ATTR
+	:	'$' ID WS? '=' expr=ATTR_VALUE_EXPR ';' {enclosingRule!=null
+													&& enclosingRule.getLocalAttributeScope($ID.text)!=null
+													&& !enclosingRule.getLocalAttributeScope($ID.text).isPredefinedLexerRuleScope}?
+		//{System.out.println("found set \$localattr");}
+		{
+		StringTemplate st;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
+		if ( scope.isPredefinedRuleScope ) {
+			if ($ID.text.equals("tree") || $ID.text.equals("st")) {
+				st = template("ruleSetPropertyRef_"+$ID.text);
+				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+				st.setAttribute("scope", enclosingRule.name);
+				st.setAttribute("attr", $ID.text);
+				st.setAttribute("expr", translateAction($expr.text));
+			} else {
+				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+										 grammar,
+										 actionToken,
+										 $ID.text,
+										 "");
+			}
+		}
+		else if ( scope.isParameterScope ) {
+			st = template("parameterSetAttributeRef");
+			st.setAttribute("attr", scope.getAttribute($ID.text));
+			st.setAttribute("expr", translateAction($expr.text));
+		}
+		else {
+			st = template("returnSetAttributeRef");
+			st.setAttribute("ruleDescriptor", enclosingRule);
+			st.setAttribute("attr", scope.getAttribute($ID.text));
+			st.setAttribute("expr", translateAction($expr.text));
+			}
+		}
+	;
+LOCAL_ATTR
+	:	'$' ID {enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null}?
+		//{System.out.println("found \$localattr");}
+		{
+		StringTemplate st;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
+		if ( scope.isPredefinedRuleScope ) {
+			st = template("rulePropertyRef_"+$ID.text);
+			grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+			st.setAttribute("scope", enclosingRule.name);
+			st.setAttribute("attr", $ID.text);
+		}
+		else if ( scope.isPredefinedLexerRuleScope ) {
+			st = template("lexerRulePropertyRef_"+$ID.text);
+			st.setAttribute("scope", enclosingRule.name);
+			st.setAttribute("attr", $ID.text);
+		}
+		else if ( scope.isParameterScope ) {
+			st = template("parameterAttributeRef");
+			st.setAttribute("attr", scope.getAttribute($ID.text));
+		}
+		else {
+			st = template("returnAttributeRef");
+			st.setAttribute("ruleDescriptor", enclosingRule);
+			st.setAttribute("attr", scope.getAttribute($ID.text));
+		}
+		}
+	;
+
+/**	$x::y	the only way to access the attributes within a dynamic scope
+ * 			regardless of whether or not you are in the defining rule.
+ *
+ * 			scope Symbols { List names; }
+ * 			r
+ * 			scope {int i;}
+ * 			scope Symbols;
+ * 				:	{$r::i=3;} s {$Symbols::names;}
+ * 				;
+ * 			s	:	{$r::i; $Symbols::names;}
+ * 				;
+ */
+SET_DYNAMIC_SCOPE_ATTR
+	:	'$' x=ID '::' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+						   {resolveDynamicScope($x.text)!=null &&
+						     resolveDynamicScope($x.text).getAttribute($y.text)!=null}?
+		//{System.out.println("found set \$scope::attr "+ $x.text + "::" + $y.text + " to " + $expr.text);}
+		{
+		AttributeScope scope = resolveDynamicScope($x.text);
+		if ( scope!=null ) {
+			StringTemplate st = template("scopeSetAttributeRef");
+			st.setAttribute("scope", $x.text);
+			st.setAttribute("attr",  scope.getAttribute($y.text));
+			st.setAttribute("expr",  translateAction($expr.text));
+		}
+		else {
+			// error: invalid dynamic attribute
+		}
+		}
+	;
+
+DYNAMIC_SCOPE_ATTR
+	:	'$' x=ID '::' y=ID
+						   {resolveDynamicScope($x.text)!=null &&
+						     resolveDynamicScope($x.text).getAttribute($y.text)!=null}?
+		//{System.out.println("found \$scope::attr "+ $x.text + "::" + $y.text);}
+		{
+		AttributeScope scope = resolveDynamicScope($x.text);
+		if ( scope!=null ) {
+			StringTemplate st = template("scopeAttributeRef");
+			st.setAttribute("scope", $x.text);
+			st.setAttribute("attr",  scope.getAttribute($y.text));
+		}
+		else {
+			// error: invalid dynamic attribute
+		}
+		}
+	;
+
+
+ERROR_SCOPED_XY
+	:	'$' x=ID '::' y=ID
+		{
+		chunks.add(getText());
+		generator.issueInvalidScopeError($x.text,$y.text,
+		                                 enclosingRule,actionToken,
+		                                 outerAltNum);		
+		}
+	;
+	
+/**		To access deeper (than top of stack) scopes, use the notation:
+ *
+ * 		$x[-1]::y previous (just under top of stack)
+ * 		$x[-i]::y top of stack - i where the '-' MUST BE PRESENT;
+ * 				  i.e., i cannot simply be negative without the '-' sign!
+ * 		$x[i]::y  absolute index i (0..size-1)
+ * 		$x[0]::y  is the absolute 0 indexed element (bottom of the stack)
+ */
+DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
+	:	'$' x=ID '[' '-' expr=SCOPE_INDEX_EXPR ']' '::' y=ID
+		// {System.out.println("found \$scope[-...]::attr");}
+		{
+		StringTemplate st = template("scopeAttributeRef");
+		st.setAttribute("scope",    $x.text);
+		st.setAttribute("attr",     resolveDynamicScope($x.text).getAttribute($y.text));
+		st.setAttribute("negIndex", $expr.text);
+		}		
+	;
+
+DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
+	:	'$' x=ID '[' expr=SCOPE_INDEX_EXPR ']' '::' y=ID 
+		// {System.out.println("found \$scope[...]::attr");}
+		{
+		StringTemplate st = template("scopeAttributeRef");
+		st.setAttribute("scope", $x.text);
+		st.setAttribute("attr",  resolveDynamicScope($x.text).getAttribute($y.text));
+		st.setAttribute("index", $expr.text);
+		}		
+	;
+
+fragment
+SCOPE_INDEX_EXPR
+	:	(~']')+
+	;
+	
+/** $r		y is a rule's dynamic scope or a global shared scope.
+ * 			Isolated $rulename is not allowed unless it has a dynamic scope *and*
+ * 			there is no reference to rulename in the enclosing alternative,
+ * 			which would be ambiguous.  See TestAttributes.testAmbiguousRuleRef()
+ */
+ISOLATED_DYNAMIC_SCOPE
+	:	'$' ID {resolveDynamicScope($ID.text)!=null}?
+		// {System.out.println("found isolated \$scope where scope is a dynamic scope");}
+		{
+		StringTemplate st = template("isolatedDynamicScopeRef");
+		st.setAttribute("scope", $ID.text);
+		}		
+	;
+	
+// antlr.g then codegen.g does these first two currently.
+// don't want to duplicate that code.
+
+/** %foo(a={},b={},...) ctor */
+TEMPLATE_INSTANCE
+	:	'%' ID '(' ( WS? ARG (',' WS? ARG)* WS? )? ')'
+		// {System.out.println("found \%foo(args)");}
+		{
+		String action = getText().substring(1,getText().length());
+		String ruleName = "<outside-of-rule>";
+		if ( enclosingRule!=null ) {
+			ruleName = enclosingRule.name;
+		}
+		StringTemplate st =
+			generator.translateTemplateConstructor(ruleName,
+												   outerAltNum,
+												   actionToken,
+												   action);
+		if ( st!=null ) {
+			chunks.add(st);
+		}
+		}
+	;
+
+/** %({name-expr})(a={},...) indirect template ctor reference */
+INDIRECT_TEMPLATE_INSTANCE
+	:	'%' '(' ACTION ')' '(' ( WS? ARG (',' WS? ARG)* WS? )? ')'
+		// {System.out.println("found \%({...})(args)");}
+		{
+		String action = getText().substring(1,getText().length());
+		StringTemplate st =
+			generator.translateTemplateConstructor(enclosingRule.name,
+												   outerAltNum,
+												   actionToken,
+												   action);
+		chunks.add(st);
+		}
+	;
+
+fragment
+ARG	:	ID '=' ACTION
+	;
+
+/**	%{expr}.y = z; template attribute y of StringTemplate-typed expr to z */
+SET_EXPR_ATTRIBUTE
+	:	'%' a=ACTION '.' ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+		// {System.out.println("found \%{expr}.y = z;");}
+		{
+		StringTemplate st = template("actionSetAttribute");
+		String action = $a.text;
+		action = action.substring(1,action.length()-1); // stuff inside {...}
+		st.setAttribute("st", translateAction(action));
+		st.setAttribute("attrName", $ID.text);
+		st.setAttribute("expr", translateAction($expr.text));
+		}
+	;
+	
+/*    %x.y = z; set template attribute y of x (always set never get attr)
+ *              to z [languages like python without ';' must still use the
+ *              ';' which the code generator is free to remove during code gen]
+ */
+SET_ATTRIBUTE
+	:	'%' x=ID '.' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+		// {System.out.println("found \%x.y = z;");}
+		{
+		StringTemplate st = template("actionSetAttribute");
+		st.setAttribute("st", $x.text);
+		st.setAttribute("attrName", $y.text);
+		st.setAttribute("expr", translateAction($expr.text));
+		}
+	;
+
+/** Don't allow an = as first char to prevent $x == 3; kind of stuff. */
+fragment
+ATTR_VALUE_EXPR
+	:	~'=' (~';')*
+	;
+	
+/** %{string-expr} anonymous template from string expr */
+TEMPLATE_EXPR
+	:	'%' a=ACTION
+		// {System.out.println("found \%{expr}");}
+		{
+		StringTemplate st = template("actionStringConstructor");
+		String action = $a.text;
+		action = action.substring(1,action.length()-1); // stuff inside {...}
+		st.setAttribute("stringExpr", translateAction(action));
+		}
+	;
+	
+fragment
+ACTION
+	:	'{' (options {greedy=false;}:.)* '}'
+	;
+	
+ESC :   '\\' '$' {chunks.add("\$");}
+	|	'\\' '%' {chunks.add("\%");}
+	|	'\\' ~('$'|'%') {chunks.add(getText());}
+    ;       
+
+ERROR_XY
+	:	'$' x=ID '.' y=ID
+		{
+		chunks.add(getText());
+		generator.issueInvalidAttributeError($x.text,$y.text,
+		                                     enclosingRule,actionToken,
+		                                     outerAltNum);
+		}
+	;
+	
+ERROR_X
+	:	'$' x=ID
+		{
+		chunks.add(getText());
+		generator.issueInvalidAttributeError($x.text,
+		                                     enclosingRule,actionToken,
+		                                     outerAltNum);
+		}
+	;
+	
+UNKNOWN_SYNTAX
+	:	'$'
+		{
+		chunks.add(getText());
+		// shouldn't need an error here.  Just accept \$ if it doesn't look like anything
+		}
+	|	'%' (ID|'.'|'('|')'|','|'{'|'}'|'"')*
+		{
+		chunks.add(getText());
+		ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
+								  grammar,
+								  actionToken,
+								  getText());
+		}
+	;
+
+TEXT:	~('$'|'%'|'\\')+ {chunks.add(getText());}
+	;
+	
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
+
+fragment
+INT :	'0'..'9'+
+	;
+
+fragment
+WS	:	(' '|'\t'|'\n')+
+	;
diff --git a/src/org/antlr/codegen/ActionTranslator.tokens b/src/org/antlr/codegen/ActionTranslator.tokens
new file mode 100644
index 0000000..8923b6a
--- /dev/null
+++ b/src/org/antlr/codegen/ActionTranslator.tokens
@@ -0,0 +1,35 @@
+LOCAL_ATTR=17
+SET_DYNAMIC_SCOPE_ATTR=18
+ISOLATED_DYNAMIC_SCOPE=24
+WS=5
+UNKNOWN_SYNTAX=35
+DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR=23
+SCOPE_INDEX_EXPR=21
+DYNAMIC_SCOPE_ATTR=19
+ISOLATED_TOKEN_REF=14
+SET_ATTRIBUTE=30
+SET_EXPR_ATTRIBUTE=29
+ACTION=27
+ERROR_X=34
+TEMPLATE_INSTANCE=26
+TOKEN_SCOPE_ATTR=10
+ISOLATED_LEXER_RULE_REF=15
+ESC=32
+SET_ENCLOSING_RULE_SCOPE_ATTR=7
+ATTR_VALUE_EXPR=6
+RULE_SCOPE_ATTR=12
+LABEL_REF=13
+INT=37
+ARG=25
+SET_LOCAL_ATTR=16
+TEXT=36
+Tokens=38
+DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR=22
+SET_TOKEN_SCOPE_ATTR=9
+ERROR_SCOPED_XY=20
+SET_RULE_SCOPE_ATTR=11
+ENCLOSING_RULE_SCOPE_ATTR=8
+ERROR_XY=33
+TEMPLATE_EXPR=31
+INDIRECT_TEMPLATE_INSTANCE=28
+ID=4
diff --git a/src/org/antlr/codegen/ActionTranslatorLexer.java b/src/org/antlr/codegen/ActionTranslatorLexer.java
new file mode 100644
index 0000000..04bd530
--- /dev/null
+++ b/src/org/antlr/codegen/ActionTranslatorLexer.java
@@ -0,0 +1,3640 @@
+// $ANTLR 3.0b5 ActionTranslator.g 2006-11-23 01:51:22
+
+package org.antlr.codegen;
+import org.antlr.runtime.*;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.*;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+public class ActionTranslatorLexer extends Lexer {
+    public static final int LOCAL_ATTR=17;
+    public static final int SET_DYNAMIC_SCOPE_ATTR=18;
+    public static final int ISOLATED_DYNAMIC_SCOPE=24;
+    public static final int WS=5;
+    public static final int UNKNOWN_SYNTAX=35;
+    public static final int DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR=23;
+    public static final int SCOPE_INDEX_EXPR=21;
+    public static final int DYNAMIC_SCOPE_ATTR=19;
+    public static final int ISOLATED_TOKEN_REF=14;
+    public static final int SET_ATTRIBUTE=30;
+    public static final int SET_EXPR_ATTRIBUTE=29;
+    public static final int ACTION=27;
+    public static final int ERROR_X=34;
+    public static final int TEMPLATE_INSTANCE=26;
+    public static final int TOKEN_SCOPE_ATTR=10;
+    public static final int ISOLATED_LEXER_RULE_REF=15;
+    public static final int ESC=32;
+    public static final int SET_ENCLOSING_RULE_SCOPE_ATTR=7;
+    public static final int ATTR_VALUE_EXPR=6;
+    public static final int RULE_SCOPE_ATTR=12;
+    public static final int LABEL_REF=13;
+    public static final int INT=37;
+    public static final int ARG=25;
+    public static final int EOF=-1;
+    public static final int SET_LOCAL_ATTR=16;
+    public static final int TEXT=36;
+    public static final int Tokens=38;
+    public static final int DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR=22;
+    public static final int SET_TOKEN_SCOPE_ATTR=9;
+    public static final int ERROR_SCOPED_XY=20;
+    public static final int SET_RULE_SCOPE_ATTR=11;
+    public static final int ENCLOSING_RULE_SCOPE_ATTR=8;
+    public static final int ERROR_XY=33;
+    public static final int TEMPLATE_EXPR=31;
+    public static final int INDIRECT_TEMPLATE_INSTANCE=28;
+    public static final int ID=4;
+
+    public List chunks = new ArrayList();
+    Rule enclosingRule;
+    int outerAltNum;
+    Grammar grammar;
+    CodeGenerator generator;
+    antlr.Token actionToken;
+
+	int ruleNestingLevel = 0; 
+
+	public Token emit(int tokenType,
+					  int line, int charPosition,
+					  int channel,
+					  int start, int stop)
+	{
+		Token t = new CommonToken(input, tokenType, channel, start, stop);
+		t.setLine(line);
+		t.setText(text);
+		t.setCharPositionInLine(charPosition);
+		emit(t);
+		return t;
+	}
+
+		public ActionTranslatorLexer(CodeGenerator generator,
+    								 String ruleName,
+    								 GrammarAST actionAST)
+    	{
+    		this(new ANTLRStringStream(actionAST.token.getText()));
+    		this.generator = generator;
+    		this.grammar = generator.grammar;
+    	    this.enclosingRule = grammar.getRule(ruleName);
+    	    this.actionToken = actionAST.token;
+    	    this.outerAltNum = actionAST.outerAltNum;
+    	}
+
+    	public ActionTranslatorLexer(CodeGenerator generator,
+    								 String ruleName,
+    								 antlr.Token actionToken,
+    								 int outerAltNum)
+    	{
+    		this(new ANTLRStringStream(actionToken.getText()));
+    		this.generator = generator;
+    		grammar = generator.grammar;
+    	    this.enclosingRule = grammar.getRule(ruleName);
+    	    this.actionToken = actionToken;
+    		this.outerAltNum = outerAltNum;
+    	}
+
+    /*
+    public ActionTranslatorLexer(CharStream input, CodeGenerator generator,
+                                 Grammar grammar, Rule enclosingRule,
+                                 antlr.Token actionToken, int outerAltNum)
+    {
+        this(input);
+        this.grammar = grammar;
+        this.generator = generator;
+        this.enclosingRule = enclosingRule;
+        this.actionToken = actionToken;
+        this.outerAltNum = outerAltNum;
+    }
+    */
+
+    /** Return a list of strings and StringTemplate objects that
+     *  represent the translated action.
+     */
+    public List translateToChunks() {
+    	// System.out.println("###\naction="+action);
+    	Token t;
+    	do {
+    		t = nextToken();
+    	} while ( t.getType()!= Token.EOF );
+    	return chunks;
+    }
+
+    public String translate() {
+    	List theChunks = translateToChunks();
+    	//System.out.println("chunks="+a.chunks);
+    	StringBuffer buf = new StringBuffer();
+    	for (int i = 0; i < theChunks.size(); i++) {
+    		Object o = (Object) theChunks.get(i);
+    		buf.append(o);
+    	}
+    	//System.out.println("translated: "+buf.toString());
+    	return buf.toString();
+    }
+
+    public List translateAction(String action) {
+        ActionTranslatorLexer translator =
+            new ActionTranslatorLexer(generator,
+                                      enclosingRule.name,
+                                      new antlr.CommonToken(ANTLRParser.ACTION,action),outerAltNum);
+        return translator.translateToChunks();
+    }
+
+    public boolean isTokenRefInAlt(String id) {
+        return enclosingRule.getTokenRefsInAlt(id, outerAltNum)!=null;
+    }
+    public boolean isRuleRefInAlt(String id) {
+        return enclosingRule.getRuleRefsInAlt(id, outerAltNum)!=null;
+    }
+    public Grammar.LabelElementPair getElementLabel(String id) {
+        return enclosingRule.getLabel(id);
+    }
+
+    public void checkElementRefUniqueness(String ref, boolean isToken) {
+    		List refs = null;
+    		if ( isToken ) {
+    		    refs = enclosingRule.getTokenRefsInAlt(ref, outerAltNum);
+    		}
+    		else {
+    		    refs = enclosingRule.getRuleRefsInAlt(ref, outerAltNum);
+    		}
+    		if ( refs!=null && refs.size()>1 ) {
+    			ErrorManager.grammarError(ErrorManager.MSG_NONUNIQUE_REF,
+    									  grammar,
+    									  actionToken,
+    									  ref);
+    		}
+    }
+
+    /** For $rulelabel.name, return the Attribute found for name.  It
+     *  will be a predefined property or a return value.
+     */
+    public Attribute getRuleLabelAttribute(String ruleName, String attrName) {
+    	Rule r = grammar.getRule(ruleName);
+    	AttributeScope scope = r.getLocalAttributeScope(attrName);
+    	if ( scope!=null && !scope.isParameterScope ) {
+    		return scope.getAttribute(attrName);
+    	}
+    	return null;
+    }
+
+    AttributeScope resolveDynamicScope(String scopeName) {
+    	if ( grammar.getGlobalScope(scopeName)!=null ) {
+    		return grammar.getGlobalScope(scopeName);
+    	}
+    	Rule scopeRule = grammar.getRule(scopeName);
+    	if ( scopeRule!=null ) {
+    		return scopeRule.ruleScope;
+    	}
+    	return null; // not a valid dynamic scope
+    }
+
+    protected StringTemplate template(String name) {
+    	StringTemplate st = generator.getTemplates().getInstanceOf(name);
+    	chunks.add(st);
+    	return st;
+    }
+
+
+
+    public ActionTranslatorLexer() {;} 
+    public ActionTranslatorLexer(CharStream input) {
+        super(input);
+        ruleMemo = new HashMap[62+1];
+     }
+    public String getGrammarFileName() { return "ActionTranslator.g"; }
+
+    public Token nextToken() {
+        while (true) {
+            if ( input.LA(1)==CharStream.EOF ) {
+                return Token.EOF_TOKEN;
+            }
+            token = null;
+            tokenStartCharIndex = getCharIndex();
+    	text = null;
+            try {
+                int m = input.mark();
+                backtracking=1; 
+                failed=false;
+                mTokens();
+                backtracking=0;
+
+                if ( failed ) {
+                    input.rewind(m);
+                    input.consume(); 
+                }
+                else {
+                    return token;
+                }
+            }
+            catch (RecognitionException re) {
+                // shouldn't happen in backtracking mode, but...
+                reportError(re);
+                recover(re);
+            }
+        }
+    }
+
+    public void memoize(IntStream input,
+    		int ruleIndex,
+    		int ruleStartIndex)
+    {
+    if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+    }
+
+    public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+    if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+    return false;
+    }// $ANTLR start SET_ENCLOSING_RULE_SCOPE_ATTR
+    public void mSET_ENCLOSING_RULE_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_ENCLOSING_RULE_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:201:4: ( '$' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?)
+            // ActionTranslator.g:201:4: '$' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            // ActionTranslator.g:201:22: ( WS )?
+            int alt1=2;
+            int LA1_0 = input.LA(1);
+            if ( ((LA1_0>='\t' && LA1_0<='\n')||LA1_0==' ') ) {
+                alt1=1;
+            }
+            switch (alt1) {
+                case 1 :
+                    // ActionTranslator.g:201:22: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            int exprStart = getCharIndex();
+            mATTR_VALUE_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(';'); if (failed) return ;
+            if ( !(enclosingRule!=null &&
+            	                         x.getText().equals(enclosingRule.name) &&
+            	                         enclosingRule.getLocalAttributeScope(y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "SET_ENCLOSING_RULE_SCOPE_ATTR", "enclosingRule!=null &&\n\t                         $x.text.equals(enclosingRule.name) &&\n\t                         enclosingRule.getLocalAttributeScope($y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = null;
+              		AttributeScope scope = enclosingRule.getLocalAttributeScope(y.getText());
+              		if ( scope.isPredefinedRuleScope ) {
+              			if ( y.getText().equals("st") || y.getText().equals("tree") ) {
+              				st = template("ruleSetPropertyRef_"+y.getText());
+              				grammar.referenceRuleLabelPredefinedAttribute(x.getText());
+              				st.setAttribute("scope", x.getText());
+              				st.setAttribute("attr", y.getText());
+              				st.setAttribute("expr", translateAction(expr.getText()));
+              			} else {
+              				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+              										  grammar,
+              										  actionToken,
+              										  x.getText(),
+              										  y.getText());
+              			}
+              		}
+              	    else if ( scope.isPredefinedLexerRuleScope ) {
+              	    	// this is a better message to emit than the previous one...
+              			ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+              									  grammar,
+              									  actionToken,
+              									  x.getText(),
+              									  y.getText());
+              	    }
+              		else if ( scope.isParameterScope ) {
+              			st = template("parameterSetAttributeRef");
+              			st.setAttribute("attr", scope.getAttribute(y.getText()));
+              			st.setAttribute("expr", translateAction(expr.getText()));
+              		}
+              		else { // must be return value
+              			st = template("returnSetAttributeRef");
+              			st.setAttribute("ruleDescriptor", enclosingRule);
+              			st.setAttribute("attr", scope.getAttribute(y.getText()));
+              			st.setAttribute("expr", translateAction(expr.getText()));
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_ENCLOSING_RULE_SCOPE_ATTR
+
+    // $ANTLR start ENCLOSING_RULE_SCOPE_ATTR
+    public void mENCLOSING_RULE_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ENCLOSING_RULE_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:246:4: ( '$' x= ID '.' y= ID {...}?)
+            // ActionTranslator.g:246:4: '$' x= ID '.' y= ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( !(enclosingRule!=null &&
+            	                         x.getText().equals(enclosingRule.name) &&
+            	                         enclosingRule.getLocalAttributeScope(y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "ENCLOSING_RULE_SCOPE_ATTR", "enclosingRule!=null &&\n\t                         $x.text.equals(enclosingRule.name) &&\n\t                         enclosingRule.getLocalAttributeScope($y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = null;
+              		AttributeScope scope = enclosingRule.getLocalAttributeScope(y.getText());
+              		if ( scope.isPredefinedRuleScope ) {
+              			st = template("rulePropertyRef_"+y.getText());
+              			grammar.referenceRuleLabelPredefinedAttribute(x.getText());
+              			st.setAttribute("scope", x.getText());
+              			st.setAttribute("attr", y.getText());
+              		}
+              	    else if ( scope.isPredefinedLexerRuleScope ) {
+              	    	// perhaps not the most precise error message to use, but...
+              			ErrorManager.grammarError(ErrorManager.MSG_RULE_HAS_NO_ARGS,
+              									  grammar,
+              									  actionToken,
+              									  x.getText());
+              	    }
+              		else if ( scope.isParameterScope ) {
+              			st = template("parameterAttributeRef");
+              			st.setAttribute("attr", scope.getAttribute(y.getText()));
+              		}
+              		else { // must be return value
+              			st = template("returnAttributeRef");
+              			st.setAttribute("ruleDescriptor", enclosingRule);
+              			st.setAttribute("attr", scope.getAttribute(y.getText()));
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ENCLOSING_RULE_SCOPE_ATTR
+
+    // $ANTLR start SET_TOKEN_SCOPE_ATTR
+    public void mSET_TOKEN_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_TOKEN_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:280:4: ( '$' x= ID '.' y= ID ( WS )? '=' {...}?)
+            // ActionTranslator.g:280:4: '$' x= ID '.' y= ID ( WS )? '=' {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            // ActionTranslator.g:280:22: ( WS )?
+            int alt2=2;
+            int LA2_0 = input.LA(1);
+            if ( ((LA2_0>='\t' && LA2_0<='\n')||LA2_0==' ') ) {
+                alt2=1;
+            }
+            switch (alt2) {
+                case 1 :
+                    // ActionTranslator.g:280:22: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            if ( !(enclosingRule!=null && input.LA(1)!='=' &&
+            	                         (enclosingRule.getTokenLabel(x.getText())!=null||
+            	                          isTokenRefInAlt(x.getText())) &&
+            	                         AttributeScope.tokenScope.getAttribute(y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "SET_TOKEN_SCOPE_ATTR", "enclosingRule!=null && input.LA(1)!='=' &&\n\t                         (enclosingRule.getTokenLabel($x.text)!=null||\n\t                          isTokenRefInAlt($x.text)) &&\n\t                         AttributeScope.tokenScope.getAttribute($y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+              								  grammar,
+              								  actionToken,
+              								  x.getText(),
+              								  y.getText());
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_TOKEN_SCOPE_ATTR
+
+    // $ANTLR start TOKEN_SCOPE_ATTR
+    public void mTOKEN_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = TOKEN_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:299:4: ( '$' x= ID '.' y= ID {...}?)
+            // ActionTranslator.g:299:4: '$' x= ID '.' y= ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( !(enclosingRule!=null &&
+            	                         (enclosingRule.getTokenLabel(x.getText())!=null||
+            	                          isTokenRefInAlt(x.getText())) &&
+            	                         AttributeScope.tokenScope.getAttribute(y.getText())!=null &&
+            	                         (grammar.type!=Grammar.LEXER ||
+            	                         getElementLabel(x.getText()).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||
+            	                         getElementLabel(x.getText()).elementRef.token.getType()==ANTLRParser.STRING_LITERAL)) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "TOKEN_SCOPE_ATTR", "enclosingRule!=null &&\n\t                         (enclosingRule.getTokenLabel($x.text)!=null||\n\t                          isTokenRefInAlt($x.text)) &&\n\t                         AttributeScope.tokenScope.getAttribute($y.text)!=null &&\n\t                         (grammar.type!=Grammar.LEXER ||\n\t                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||\n\t     [...]
+            }
+            if ( backtracking==1 ) {
+
+              		String label = x.getText();
+              		if ( enclosingRule.getTokenLabel(x.getText())==null ) {
+              			// $tokenref.attr  gotta get old label or compute new one
+              			checkElementRefUniqueness(x.getText(), true);
+              			label = enclosingRule.getElementLabel(x.getText(), outerAltNum, generator);
+              			if ( label==null ) {
+              				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+              										  grammar,
+              										  actionToken,
+              										  "$"+x.getText()+"."+y.getText());
+              				label = x.getText();
+              			}
+              		}
+              		StringTemplate st = template("tokenLabelPropertyRef_"+y.getText());
+              		st.setAttribute("scope", label);
+              		st.setAttribute("attr", AttributeScope.tokenScope.getAttribute(y.getText()));
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end TOKEN_SCOPE_ATTR
+
+    // $ANTLR start SET_RULE_SCOPE_ATTR
+    public void mSET_RULE_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_RULE_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+
+            Grammar.LabelElementPair pair=null;
+            String refdRuleName=null;
+
+            // ActionTranslator.g:337:4: ( '$' x= ID '.' y= ID ( WS )? '=' {...}?{...}?)
+            // ActionTranslator.g:337:4: '$' x= ID '.' y= ID ( WS )? '=' {...}?{...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            // ActionTranslator.g:337:22: ( WS )?
+            int alt3=2;
+            int LA3_0 = input.LA(1);
+            if ( ((LA3_0>='\t' && LA3_0<='\n')||LA3_0==' ') ) {
+                alt3=1;
+            }
+            switch (alt3) {
+                case 1 :
+                    // ActionTranslator.g:337:22: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            if ( !(enclosingRule!=null && input.LA(1)!='=') ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "SET_RULE_SCOPE_ATTR", "enclosingRule!=null && input.LA(1)!='='");
+            }
+            if ( backtracking==1 ) {
+
+              		pair = enclosingRule.getRuleLabel(x.getText());
+              		refdRuleName = x.getText();
+              		if ( pair!=null ) {
+              			refdRuleName = pair.referencedRuleName;
+              		}
+              		
+            }
+            if ( !((enclosingRule.getRuleLabel(x.getText())!=null || isRuleRefInAlt(x.getText())) &&
+            	      getRuleLabelAttribute(enclosingRule.getRuleLabel(x.getText())!=null?enclosingRule.getRuleLabel(x.getText()).referencedRuleName:x.getText(),y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "SET_RULE_SCOPE_ATTR", "(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&\n\t      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+              								  grammar,
+              								  actionToken,
+              								  x.getText(),
+              								  y.getText());
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_RULE_SCOPE_ATTR
+
+    // $ANTLR start RULE_SCOPE_ATTR
+    public void mRULE_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = RULE_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+
+            Grammar.LabelElementPair pair=null;
+            String refdRuleName=null;
+
+            // ActionTranslator.g:366:4: ( '$' x= ID '.' y= ID {...}?{...}?)
+            // ActionTranslator.g:366:4: '$' x= ID '.' y= ID {...}?{...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( !(enclosingRule!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "RULE_SCOPE_ATTR", "enclosingRule!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		pair = enclosingRule.getRuleLabel(x.getText());
+              		refdRuleName = x.getText();
+              		if ( pair!=null ) {
+              			refdRuleName = pair.referencedRuleName;
+              		}
+              		
+            }
+            if ( !((enclosingRule.getRuleLabel(x.getText())!=null || isRuleRefInAlt(x.getText())) &&
+            	      getRuleLabelAttribute(enclosingRule.getRuleLabel(x.getText())!=null?enclosingRule.getRuleLabel(x.getText()).referencedRuleName:x.getText(),y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "RULE_SCOPE_ATTR", "(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&\n\t      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		String label = x.getText();
+              		if ( pair==null ) {
+              			// $ruleref.attr  gotta get old label or compute new one
+              			checkElementRefUniqueness(x.getText(), false);
+              			label = enclosingRule.getElementLabel(x.getText(), outerAltNum, generator);
+              			if ( label==null ) {
+              				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+              										  grammar,
+              										  actionToken,
+              										  "$"+x.getText()+"."+y.getText());
+              				label = x.getText();
+              			}
+              		}
+              		StringTemplate st;
+              		Rule refdRule = grammar.getRule(refdRuleName);
+              		AttributeScope scope = refdRule.getLocalAttributeScope(y.getText());
+              		if ( scope.isPredefinedRuleScope ) {
+              			st = template("ruleLabelPropertyRef_"+y.getText());
+              			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+              			st.setAttribute("scope", label);
+              			st.setAttribute("attr", y.getText());
+              		}
+              		else if ( scope.isPredefinedLexerRuleScope ) {
+              			st = template("lexerRuleLabelPropertyRef_"+y.getText());
+              			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+              			st.setAttribute("scope", label);
+              			st.setAttribute("attr", y.getText());
+              		}
+              		else if ( scope.isParameterScope ) {
+              			// TODO: error!
+					  }
+              		else {
+              			st = template("ruleLabelRef");
+              			st.setAttribute("referencedRule", refdRule);
+              			st.setAttribute("scope", label);
+              			st.setAttribute("attr", scope.getAttribute(y.getText()));
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end RULE_SCOPE_ATTR
+
+    // $ANTLR start LABEL_REF
+    public void mLABEL_REF() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = LABEL_REF;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:424:4: ( '$' ID {...}?)
+            // ActionTranslator.g:424:4: '$' ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID1Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID1 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID1Start, getCharIndex()-1);
+            if ( !(enclosingRule!=null &&
+            	            getElementLabel(ID1.getText())!=null &&
+            		        enclosingRule.getRuleLabel(ID1.getText())==null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "LABEL_REF", "enclosingRule!=null &&\n\t            getElementLabel($ID.text)!=null &&\n\t\t        enclosingRule.getRuleLabel($ID.text)==null");
+            }
+            if ( backtracking==1 ) {
+
+              		StringTemplate st;
+              		Grammar.LabelElementPair pair = getElementLabel(ID1.getText());
+              		if ( pair.type==Grammar.TOKEN_LABEL ||
+             			 pair.type==Grammar.CHAR_LABEL )
+					{
+              			st = template("tokenLabelRef");
+              		}
+              		else {
+              			st = template("listLabelRef");
+              		}
+              		st.setAttribute("label", ID1.getText());
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end LABEL_REF
+
+    // $ANTLR start ISOLATED_TOKEN_REF
+    public void mISOLATED_TOKEN_REF() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ISOLATED_TOKEN_REF;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:443:4: ( '$' ID {...}?)
+            // ActionTranslator.g:443:4: '$' ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID2Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID2 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID2Start, getCharIndex()-1);
+            if ( !(grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt(ID2.getText())) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "ISOLATED_TOKEN_REF", "grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt($ID.text)");
+            }
+            if ( backtracking==1 ) {
+
+              		String label = enclosingRule.getElementLabel(ID2.getText(), outerAltNum, generator);
+              		checkElementRefUniqueness(ID2.getText(), true);
+              		if ( label==null ) {
+              			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+              									  grammar,
+              									  actionToken,
+              									  ID2.getText());
+              		}
+              		else {
+              			StringTemplate st = template("tokenLabelRef");
+              			st.setAttribute("label", label);
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ISOLATED_TOKEN_REF
+
+    // $ANTLR start ISOLATED_LEXER_RULE_REF
+    public void mISOLATED_LEXER_RULE_REF() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ISOLATED_LEXER_RULE_REF;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:463:4: ( '$' ID {...}?)
+            // ActionTranslator.g:463:4: '$' ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID3Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID3 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID3Start, getCharIndex()-1);
+            if ( !(grammar.type==Grammar.LEXER &&
+            	             enclosingRule!=null &&
+            	             isRuleRefInAlt(ID3.getText())) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "ISOLATED_LEXER_RULE_REF", "grammar.type==Grammar.LEXER &&\n\t             enclosingRule!=null &&\n\t             isRuleRefInAlt($ID.text)");
+            }
+            if ( backtracking==1 ) {
+
+              		String label = enclosingRule.getElementLabel(ID3.getText(), outerAltNum, generator);
+              		checkElementRefUniqueness(ID3.getText(), false);
+              		if ( label==null ) {
+              			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+              									  grammar,
+              									  actionToken,
+              									  ID3.getText());
+              		}
+              		else {
+              			StringTemplate st = template("lexerRuleLabel");
+              			st.setAttribute("label", label);
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ISOLATED_LEXER_RULE_REF
+
+    // $ANTLR start SET_LOCAL_ATTR
+    public void mSET_LOCAL_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_LOCAL_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:495:4: ( '$' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?)
+            // ActionTranslator.g:495:4: '$' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID4Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID4 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID4Start, getCharIndex()-1);
+            // ActionTranslator.g:495:11: ( WS )?
+            int alt4=2;
+            int LA4_0 = input.LA(1);
+            if ( ((LA4_0>='\t' && LA4_0<='\n')||LA4_0==' ') ) {
+                alt4=1;
+            }
+            switch (alt4) {
+                case 1 :
+                    // ActionTranslator.g:495:11: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            int exprStart = getCharIndex();
+            mATTR_VALUE_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(';'); if (failed) return ;
+            if ( !(enclosingRule!=null
+            													&& enclosingRule.getLocalAttributeScope(ID4.getText())!=null
+            													&& !enclosingRule.getLocalAttributeScope(ID4.getText()).isPredefinedLexerRuleScope) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "SET_LOCAL_ATTR", "enclosingRule!=null\n\t\t\t\t\t\t\t\t\t\t\t\t\t&& enclosingRule.getLocalAttributeScope($ID.text)!=null\n\t\t\t\t\t\t\t\t\t\t\t\t\t&& !enclosingRule.getLocalAttributeScope($ID.text).isPredefinedLexerRuleScope");
+            }
+            if ( backtracking==1 ) {
+
+              		StringTemplate st;
+              		AttributeScope scope = enclosingRule.getLocalAttributeScope(ID4.getText());
+              		if ( scope.isPredefinedRuleScope ) {
+              			if (ID4.getText().equals("tree") || ID4.getText().equals("st")) {
+              				st = template("ruleSetPropertyRef_"+ID4.getText());
+              				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+              				st.setAttribute("scope", enclosingRule.name);
+              				st.setAttribute("attr", ID4.getText());
+              				st.setAttribute("expr", translateAction(expr.getText()));
+              			} else {
+              				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+              										 grammar,
+              										 actionToken,
+              										 ID4.getText(),
+              										 "");
+              			}
+              		}
+              		else if ( scope.isParameterScope ) {
+              			st = template("parameterSetAttributeRef");
+              			st.setAttribute("attr", scope.getAttribute(ID4.getText()));
+              			st.setAttribute("expr", translateAction(expr.getText()));
+              		}
+              		else {
+              			st = template("returnSetAttributeRef");
+              			st.setAttribute("ruleDescriptor", enclosingRule);
+              			st.setAttribute("attr", scope.getAttribute(ID4.getText()));
+              			st.setAttribute("expr", translateAction(expr.getText()));
+              			}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_LOCAL_ATTR
+
+    // $ANTLR start LOCAL_ATTR
+    public void mLOCAL_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = LOCAL_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:531:4: ( '$' ID {...}?)
+            // ActionTranslator.g:531:4: '$' ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID5Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID5 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID5Start, getCharIndex()-1);
+            if ( !(enclosingRule!=null && enclosingRule.getLocalAttributeScope(ID5.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "LOCAL_ATTR", "enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		StringTemplate st;
+              		AttributeScope scope = enclosingRule.getLocalAttributeScope(ID5.getText());
+              		if ( scope.isPredefinedRuleScope ) {
+              			st = template("rulePropertyRef_"+ID5.getText());
+              			grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+              			st.setAttribute("scope", enclosingRule.name);
+              			st.setAttribute("attr", ID5.getText());
+              		}
+              		else if ( scope.isPredefinedLexerRuleScope ) {
+              			st = template("lexerRulePropertyRef_"+ID5.getText());
+              			st.setAttribute("scope", enclosingRule.name);
+              			st.setAttribute("attr", ID5.getText());
+              		}
+              		else if ( scope.isParameterScope ) {
+              			st = template("parameterAttributeRef");
+              			st.setAttribute("attr", scope.getAttribute(ID5.getText()));
+              		}
+              		else {
+              			st = template("returnAttributeRef");
+              			st.setAttribute("ruleDescriptor", enclosingRule);
+              			st.setAttribute("attr", scope.getAttribute(ID5.getText()));
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end LOCAL_ATTR
+
+    // $ANTLR start SET_DYNAMIC_SCOPE_ATTR
+    public void mSET_DYNAMIC_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_DYNAMIC_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:572:4: ( '$' x= ID '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?)
+            // ActionTranslator.g:572:4: '$' x= ID '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match("::"); if (failed) return ;
+
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            // ActionTranslator.g:572:23: ( WS )?
+            int alt5=2;
+            int LA5_0 = input.LA(1);
+            if ( ((LA5_0>='\t' && LA5_0<='\n')||LA5_0==' ') ) {
+                alt5=1;
+            }
+            switch (alt5) {
+                case 1 :
+                    // ActionTranslator.g:572:23: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            int exprStart = getCharIndex();
+            mATTR_VALUE_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(';'); if (failed) return ;
+            if ( !(resolveDynamicScope(x.getText())!=null &&
+            						     resolveDynamicScope(x.getText()).getAttribute(y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "SET_DYNAMIC_SCOPE_ATTR", "resolveDynamicScope($x.text)!=null &&\n\t\t\t\t\t\t     resolveDynamicScope($x.text).getAttribute($y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		AttributeScope scope = resolveDynamicScope(x.getText());
+              		if ( scope!=null ) {
+              			StringTemplate st = template("scopeSetAttributeRef");
+              			st.setAttribute("scope", x.getText());
+              			st.setAttribute("attr",  scope.getAttribute(y.getText()));
+              			st.setAttribute("expr",  translateAction(expr.getText()));
+              		}
+              		else {
+              			// error: invalid dynamic attribute
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_DYNAMIC_SCOPE_ATTR
+
+    // $ANTLR start DYNAMIC_SCOPE_ATTR
+    public void mDYNAMIC_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = DYNAMIC_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:591:4: ( '$' x= ID '::' y= ID {...}?)
+            // ActionTranslator.g:591:4: '$' x= ID '::' y= ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match("::"); if (failed) return ;
+
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( !(resolveDynamicScope(x.getText())!=null &&
+            						     resolveDynamicScope(x.getText()).getAttribute(y.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "DYNAMIC_SCOPE_ATTR", "resolveDynamicScope($x.text)!=null &&\n\t\t\t\t\t\t     resolveDynamicScope($x.text).getAttribute($y.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		AttributeScope scope = resolveDynamicScope(x.getText());
+              		if ( scope!=null ) {
+              			StringTemplate st = template("scopeAttributeRef");
+              			st.setAttribute("scope", x.getText());
+              			st.setAttribute("attr",  scope.getAttribute(y.getText()));
+              		}
+              		else {
+              			// error: invalid dynamic attribute
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end DYNAMIC_SCOPE_ATTR
+
+    // $ANTLR start ERROR_SCOPED_XY
+    public void mERROR_SCOPED_XY() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ERROR_SCOPED_XY;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:610:4: ( '$' x= ID '::' y= ID )
+            // ActionTranslator.g:610:4: '$' x= ID '::' y= ID
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match("::"); if (failed) return ;
+
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( backtracking==1 ) {
+
+              		chunks.add(getText());
+              		generator.issueInvalidScopeError(x.getText(),y.getText(),
+              		                                 enclosingRule,actionToken,
+              		                                 outerAltNum);		
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ERROR_SCOPED_XY
+
+    // $ANTLR start DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
+    public void mDYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:628:4: ( '$' x= ID '[' '-' expr= SCOPE_INDEX_EXPR ']' '::' y= ID )
+            // ActionTranslator.g:628:4: '$' x= ID '[' '-' expr= SCOPE_INDEX_EXPR ']' '::' y= ID
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('['); if (failed) return ;
+            match('-'); if (failed) return ;
+            int exprStart = getCharIndex();
+            mSCOPE_INDEX_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(']'); if (failed) return ;
+            match("::"); if (failed) return ;
+
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = template("scopeAttributeRef");
+              		st.setAttribute("scope",    x.getText());
+              		st.setAttribute("attr",     resolveDynamicScope(x.getText()).getAttribute(y.getText()));
+              		st.setAttribute("negIndex", expr.getText());
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
+
+    // $ANTLR start DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
+    public void mDYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:639:4: ( '$' x= ID '[' expr= SCOPE_INDEX_EXPR ']' '::' y= ID )
+            // ActionTranslator.g:639:4: '$' x= ID '[' expr= SCOPE_INDEX_EXPR ']' '::' y= ID
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('['); if (failed) return ;
+            int exprStart = getCharIndex();
+            mSCOPE_INDEX_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(']'); if (failed) return ;
+            match("::"); if (failed) return ;
+
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = template("scopeAttributeRef");
+              		st.setAttribute("scope", x.getText());
+              		st.setAttribute("attr",  resolveDynamicScope(x.getText()).getAttribute(y.getText()));
+              		st.setAttribute("index", expr.getText());
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
+
+    // $ANTLR start SCOPE_INDEX_EXPR
+    public void mSCOPE_INDEX_EXPR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:651:4: ( (~ ']' )+ )
+            // ActionTranslator.g:651:4: (~ ']' )+
+            {
+            // ActionTranslator.g:651:4: (~ ']' )+
+            int cnt6=0;
+            loop6:
+            do {
+                int alt6=2;
+                int LA6_0 = input.LA(1);
+                if ( ((LA6_0>='\u0000' && LA6_0<='\\')||(LA6_0>='^' && LA6_0<='\uFFFE')) ) {
+                    alt6=1;
+                }
+
+
+                switch (alt6) {
+            	case 1 :
+            	    // ActionTranslator.g:651:5: ~ ']'
+            	    {
+            	    if ( (input.LA(1)>='\u0000' && input.LA(1)<='\\')||(input.LA(1)>='^' && input.LA(1)<='\uFFFE') ) {
+            	        input.consume();
+            	    failed=false;
+            	    }
+            	    else {
+            	        if (backtracking>0) {failed=true; return ;}
+            	        MismatchedSetException mse =
+            	            new MismatchedSetException(null,input);
+            	        recover(mse);    throw mse;
+            	    }
+
+
+            	    }
+            	    break;
+
+            	default :
+            	    if ( cnt6 >= 1 ) break loop6;
+            	    if (backtracking>0) {failed=true; return ;}
+                        EarlyExitException eee =
+                            new EarlyExitException(6, input);
+                        throw eee;
+                }
+                cnt6++;
+            } while (true);
+
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SCOPE_INDEX_EXPR
+
+    // $ANTLR start ISOLATED_DYNAMIC_SCOPE
+    public void mISOLATED_DYNAMIC_SCOPE() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ISOLATED_DYNAMIC_SCOPE;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:660:4: ( '$' ID {...}?)
+            // ActionTranslator.g:660:4: '$' ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID6Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID6 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID6Start, getCharIndex()-1);
+            if ( !(resolveDynamicScope(ID6.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "ISOLATED_DYNAMIC_SCOPE", "resolveDynamicScope($ID.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = template("isolatedDynamicScopeRef");
+              		st.setAttribute("scope", ID6.getText());
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ISOLATED_DYNAMIC_SCOPE
+
+    // $ANTLR start TEMPLATE_INSTANCE
+    public void mTEMPLATE_INSTANCE() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = TEMPLATE_INSTANCE;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:673:4: ( '%' ID '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')' )
+            // ActionTranslator.g:673:4: '%' ID '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')'
+            {
+            match('%'); if (failed) return ;
+            mID(); if (failed) return ;
+            match('('); if (failed) return ;
+            // ActionTranslator.g:673:15: ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )?
+            int alt11=2;
+            int LA11_0 = input.LA(1);
+            if ( ((LA11_0>='\t' && LA11_0<='\n')||LA11_0==' '||(LA11_0>='A' && LA11_0<='Z')||LA11_0=='_'||(LA11_0>='a' && LA11_0<='z')) ) {
+                alt11=1;
+            }
+            switch (alt11) {
+                case 1 :
+                    // ActionTranslator.g:673:17: ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )?
+                    {
+                    // ActionTranslator.g:673:17: ( WS )?
+                    int alt7=2;
+                    int LA7_0 = input.LA(1);
+                    if ( ((LA7_0>='\t' && LA7_0<='\n')||LA7_0==' ') ) {
+                        alt7=1;
+                    }
+                    switch (alt7) {
+                        case 1 :
+                            // ActionTranslator.g:673:17: WS
+                            {
+                            mWS(); if (failed) return ;
+
+                            }
+                            break;
+
+                    }
+
+                    mARG(); if (failed) return ;
+                    // ActionTranslator.g:673:25: ( ',' ( WS )? ARG )*
+                    loop9:
+                    do {
+                        int alt9=2;
+                        int LA9_0 = input.LA(1);
+                        if ( (LA9_0==',') ) {
+                            alt9=1;
+                        }
+
+
+                        switch (alt9) {
+                    	case 1 :
+                    	    // ActionTranslator.g:673:26: ',' ( WS )? ARG
+                    	    {
+                    	    match(','); if (failed) return ;
+                    	    // ActionTranslator.g:673:30: ( WS )?
+                    	    int alt8=2;
+                    	    int LA8_0 = input.LA(1);
+                    	    if ( ((LA8_0>='\t' && LA8_0<='\n')||LA8_0==' ') ) {
+                    	        alt8=1;
+                    	    }
+                    	    switch (alt8) {
+                    	        case 1 :
+                    	            // ActionTranslator.g:673:30: WS
+                    	            {
+                    	            mWS(); if (failed) return ;
+
+                    	            }
+                    	            break;
+
+                    	    }
+
+                    	    mARG(); if (failed) return ;
+
+                    	    }
+                    	    break;
+
+                    	default :
+                    	    break loop9;
+                        }
+                    } while (true);
+
+                    // ActionTranslator.g:673:40: ( WS )?
+                    int alt10=2;
+                    int LA10_0 = input.LA(1);
+                    if ( ((LA10_0>='\t' && LA10_0<='\n')||LA10_0==' ') ) {
+                        alt10=1;
+                    }
+                    switch (alt10) {
+                        case 1 :
+                            // ActionTranslator.g:673:40: WS
+                            {
+                            mWS(); if (failed) return ;
+
+                            }
+                            break;
+
+                    }
+
+
+                    }
+                    break;
+
+            }
+
+            match(')'); if (failed) return ;
+            if ( backtracking==1 ) {
+
+              		String action = getText().substring(1,getText().length());
+              		String ruleName = "<outside-of-rule>";
+              		if ( enclosingRule!=null ) {
+              			ruleName = enclosingRule.name;
+              		}
+              		StringTemplate st =
+              			generator.translateTemplateConstructor(ruleName,
+              												   outerAltNum,
+              												   actionToken,
+              												   action);
+              		if ( st!=null ) {
+              			chunks.add(st);
+              		}
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end TEMPLATE_INSTANCE
+
+    // $ANTLR start INDIRECT_TEMPLATE_INSTANCE
+    public void mINDIRECT_TEMPLATE_INSTANCE() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = INDIRECT_TEMPLATE_INSTANCE;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:694:4: ( '%' '(' ACTION ')' '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')' )
+            // ActionTranslator.g:694:4: '%' '(' ACTION ')' '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')'
+            {
+            match('%'); if (failed) return ;
+            match('('); if (failed) return ;
+            mACTION(); if (failed) return ;
+            match(')'); if (failed) return ;
+            match('('); if (failed) return ;
+            // ActionTranslator.g:694:27: ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )?
+            int alt16=2;
+            int LA16_0 = input.LA(1);
+            if ( ((LA16_0>='\t' && LA16_0<='\n')||LA16_0==' '||(LA16_0>='A' && LA16_0<='Z')||LA16_0=='_'||(LA16_0>='a' && LA16_0<='z')) ) {
+                alt16=1;
+            }
+            switch (alt16) {
+                case 1 :
+                    // ActionTranslator.g:694:29: ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )?
+                    {
+                    // ActionTranslator.g:694:29: ( WS )?
+                    int alt12=2;
+                    int LA12_0 = input.LA(1);
+                    if ( ((LA12_0>='\t' && LA12_0<='\n')||LA12_0==' ') ) {
+                        alt12=1;
+                    }
+                    switch (alt12) {
+                        case 1 :
+                            // ActionTranslator.g:694:29: WS
+                            {
+                            mWS(); if (failed) return ;
+
+                            }
+                            break;
+
+                    }
+
+                    mARG(); if (failed) return ;
+                    // ActionTranslator.g:694:37: ( ',' ( WS )? ARG )*
+                    loop14:
+                    do {
+                        int alt14=2;
+                        int LA14_0 = input.LA(1);
+                        if ( (LA14_0==',') ) {
+                            alt14=1;
+                        }
+
+
+                        switch (alt14) {
+                    	case 1 :
+                    	    // ActionTranslator.g:694:38: ',' ( WS )? ARG
+                    	    {
+                    	    match(','); if (failed) return ;
+                    	    // ActionTranslator.g:694:42: ( WS )?
+                    	    int alt13=2;
+                    	    int LA13_0 = input.LA(1);
+                    	    if ( ((LA13_0>='\t' && LA13_0<='\n')||LA13_0==' ') ) {
+                    	        alt13=1;
+                    	    }
+                    	    switch (alt13) {
+                    	        case 1 :
+                    	            // ActionTranslator.g:694:42: WS
+                    	            {
+                    	            mWS(); if (failed) return ;
+
+                    	            }
+                    	            break;
+
+                    	    }
+
+                    	    mARG(); if (failed) return ;
+
+                    	    }
+                    	    break;
+
+                    	default :
+                    	    break loop14;
+                        }
+                    } while (true);
+
+                    // ActionTranslator.g:694:52: ( WS )?
+                    int alt15=2;
+                    int LA15_0 = input.LA(1);
+                    if ( ((LA15_0>='\t' && LA15_0<='\n')||LA15_0==' ') ) {
+                        alt15=1;
+                    }
+                    switch (alt15) {
+                        case 1 :
+                            // ActionTranslator.g:694:52: WS
+                            {
+                            mWS(); if (failed) return ;
+
+                            }
+                            break;
+
+                    }
+
+
+                    }
+                    break;
+
+            }
+
+            match(')'); if (failed) return ;
+            if ( backtracking==1 ) {
+
+              		String action = getText().substring(1,getText().length());
+              		StringTemplate st =
+              			generator.translateTemplateConstructor(enclosingRule.name,
+              												   outerAltNum,
+              												   actionToken,
+              												   action);
+              		chunks.add(st);
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end INDIRECT_TEMPLATE_INSTANCE
+
+    // $ANTLR start ARG
+    public void mARG() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:708:7: ( ID '=' ACTION )
+            // ActionTranslator.g:708:7: ID '=' ACTION
+            {
+            mID(); if (failed) return ;
+            match('='); if (failed) return ;
+            mACTION(); if (failed) return ;
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ARG
+
+    // $ANTLR start SET_EXPR_ATTRIBUTE
+    public void mSET_EXPR_ATTRIBUTE() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_EXPR_ATTRIBUTE;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:713:4: ( '%' a= ACTION '.' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' )
+            // ActionTranslator.g:713:4: '%' a= ACTION '.' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
+            {
+            match('%'); if (failed) return ;
+            int aStart = getCharIndex();
+            mACTION(); if (failed) return ;
+            Token a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int ID7Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID7 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID7Start, getCharIndex()-1);
+            // ActionTranslator.g:713:24: ( WS )?
+            int alt17=2;
+            int LA17_0 = input.LA(1);
+            if ( ((LA17_0>='\t' && LA17_0<='\n')||LA17_0==' ') ) {
+                alt17=1;
+            }
+            switch (alt17) {
+                case 1 :
+                    // ActionTranslator.g:713:24: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            int exprStart = getCharIndex();
+            mATTR_VALUE_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(';'); if (failed) return ;
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = template("actionSetAttribute");
+              		String action = a.getText();
+              		action = action.substring(1,action.length()-1); // stuff inside {...}
+              		st.setAttribute("st", translateAction(action));
+              		st.setAttribute("attrName", ID7.getText());
+              		st.setAttribute("expr", translateAction(expr.getText()));
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_EXPR_ATTRIBUTE
+
+    // $ANTLR start SET_ATTRIBUTE
+    public void mSET_ATTRIBUTE() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = SET_ATTRIBUTE;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:730:4: ( '%' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' )
+            // ActionTranslator.g:730:4: '%' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
+            {
+            match('%'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            // ActionTranslator.g:730:22: ( WS )?
+            int alt18=2;
+            int LA18_0 = input.LA(1);
+            if ( ((LA18_0>='\t' && LA18_0<='\n')||LA18_0==' ') ) {
+                alt18=1;
+            }
+            switch (alt18) {
+                case 1 :
+                    // ActionTranslator.g:730:22: WS
+                    {
+                    mWS(); if (failed) return ;
+
+                    }
+                    break;
+
+            }
+
+            match('='); if (failed) return ;
+            int exprStart = getCharIndex();
+            mATTR_VALUE_EXPR(); if (failed) return ;
+            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
+            match(';'); if (failed) return ;
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = template("actionSetAttribute");
+              		st.setAttribute("st", x.getText());
+              		st.setAttribute("attrName", y.getText());
+              		st.setAttribute("expr", translateAction(expr.getText()));
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end SET_ATTRIBUTE
+
+    // $ANTLR start ATTR_VALUE_EXPR
+    public void mATTR_VALUE_EXPR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:743:4: (~ '=' (~ ';' )* )
+            // ActionTranslator.g:743:4: ~ '=' (~ ';' )*
+            {
+            if ( (input.LA(1)>='\u0000' && input.LA(1)<='<')||(input.LA(1)>='>' && input.LA(1)<='\uFFFE') ) {
+                input.consume();
+            failed=false;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                MismatchedSetException mse =
+                    new MismatchedSetException(null,input);
+                recover(mse);    throw mse;
+            }
+
+            // ActionTranslator.g:743:9: (~ ';' )*
+            loop19:
+            do {
+                int alt19=2;
+                int LA19_0 = input.LA(1);
+                if ( ((LA19_0>='\u0000' && LA19_0<=':')||(LA19_0>='<' && LA19_0<='\uFFFE')) ) {
+                    alt19=1;
+                }
+
+
+                switch (alt19) {
+            	case 1 :
+            	    // ActionTranslator.g:743:10: ~ ';'
+            	    {
+            	    if ( (input.LA(1)>='\u0000' && input.LA(1)<=':')||(input.LA(1)>='<' && input.LA(1)<='\uFFFE') ) {
+            	        input.consume();
+            	    failed=false;
+            	    }
+            	    else {
+            	        if (backtracking>0) {failed=true; return ;}
+            	        MismatchedSetException mse =
+            	            new MismatchedSetException(null,input);
+            	        recover(mse);    throw mse;
+            	    }
+
+
+            	    }
+            	    break;
+
+            	default :
+            	    break loop19;
+                }
+            } while (true);
+
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ATTR_VALUE_EXPR
+
+    // $ANTLR start TEMPLATE_EXPR
+    public void mTEMPLATE_EXPR() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = TEMPLATE_EXPR;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:748:4: ( '%' a= ACTION )
+            // ActionTranslator.g:748:4: '%' a= ACTION
+            {
+            match('%'); if (failed) return ;
+            int aStart = getCharIndex();
+            mACTION(); if (failed) return ;
+            Token a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart, getCharIndex()-1);
+            if ( backtracking==1 ) {
+
+              		StringTemplate st = template("actionStringConstructor");
+              		String action = a.getText();
+              		action = action.substring(1,action.length()-1); // stuff inside {...}
+              		st.setAttribute("stringExpr", translateAction(action));
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end TEMPLATE_EXPR
+
+    // $ANTLR start ACTION
+    public void mACTION() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:760:4: ( '{' ( options {greedy=false; } : . )* '}' )
+            // ActionTranslator.g:760:4: '{' ( options {greedy=false; } : . )* '}'
+            {
+            match('{'); if (failed) return ;
+            // ActionTranslator.g:760:8: ( options {greedy=false; } : . )*
+            loop20:
+            do {
+                int alt20=2;
+                int LA20_0 = input.LA(1);
+                if ( (LA20_0=='}') ) {
+                    alt20=2;
+                }
+                else if ( ((LA20_0>='\u0000' && LA20_0<='|')||(LA20_0>='~' && LA20_0<='\uFFFE')) ) {
+                    alt20=1;
+                }
+
+
+                switch (alt20) {
+            	case 1 :
+            	    // ActionTranslator.g:760:33: .
+            	    {
+            	    matchAny(); if (failed) return ;
+
+            	    }
+            	    break;
+
+            	default :
+            	    break loop20;
+                }
+            } while (true);
+
+            match('}'); if (failed) return ;
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ACTION
+
+    // $ANTLR start ESC
+    public void mESC() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ESC;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:763:9: ( '\\\\' '$' | '\\\\' '%' | '\\\\' ~ ('$'|'%'))
+            int alt21=3;
+            int LA21_0 = input.LA(1);
+            if ( (LA21_0=='\\') ) {
+                int LA21_1 = input.LA(2);
+                if ( (LA21_1=='%') ) {
+                    alt21=2;
+                }
+                else if ( (LA21_1=='$') ) {
+                    alt21=1;
+                }
+                else if ( ((LA21_1>='\u0000' && LA21_1<='#')||(LA21_1>='&' && LA21_1<='\uFFFE')) ) {
+                    alt21=3;
+                }
+                else {
+                    if (backtracking>0) {failed=true; return ;}
+                    NoViableAltException nvae =
+                        new NoViableAltException("763:1: ESC : ( '\\\\' '$' | '\\\\' '%' | '\\\\' ~ ('$'|'%'));", 21, 1, input);
+
+                    throw nvae;
+                }
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                NoViableAltException nvae =
+                    new NoViableAltException("763:1: ESC : ( '\\\\' '$' | '\\\\' '%' | '\\\\' ~ ('$'|'%'));", 21, 0, input);
+
+                throw nvae;
+            }
+            switch (alt21) {
+                case 1 :
+                    // ActionTranslator.g:763:9: '\\\\' '$'
+                    {
+                    match('\\'); if (failed) return ;
+                    match('$'); if (failed) return ;
+                    if ( backtracking==1 ) {
+                      chunks.add("$");
+                    }
+
+                    }
+                    break;
+                case 2 :
+                    // ActionTranslator.g:764:4: '\\\\' '%'
+                    {
+                    match('\\'); if (failed) return ;
+                    match('%'); if (failed) return ;
+                    if ( backtracking==1 ) {
+                      chunks.add("%");
+                    }
+
+                    }
+                    break;
+                case 3 :
+                    // ActionTranslator.g:765:4: '\\\\' ~ ('$'|'%')
+                    {
+                    match('\\'); if (failed) return ;
+                    if ( (input.LA(1)>='\u0000' && input.LA(1)<='#')||(input.LA(1)>='&' && input.LA(1)<='\uFFFE') ) {
+                        input.consume();
+                    failed=false;
+                    }
+                    else {
+                        if (backtracking>0) {failed=true; return ;}
+                        MismatchedSetException mse =
+                            new MismatchedSetException(null,input);
+                        recover(mse);    throw mse;
+                    }
+
+                    if ( backtracking==1 ) {
+                      chunks.add(getText());
+                    }
+
+                    }
+                    break;
+
+            }
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ESC
+
+    // $ANTLR start ERROR_XY
+    public void mERROR_XY() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ERROR_XY;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:769:4: ( '$' x= ID '.' y= ID )
+            // ActionTranslator.g:769:4: '$' x= ID '.' y= ID
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( backtracking==1 ) {
+
+              		chunks.add(getText());
+              		generator.issueInvalidAttributeError(x.getText(),y.getText(),
+              		                                     enclosingRule,actionToken,
+              		                                     outerAltNum);
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ERROR_XY
+
+    // $ANTLR start ERROR_X
+    public void mERROR_X() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = ERROR_X;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:779:4: ( '$' x= ID )
+            // ActionTranslator.g:779:4: '$' x= ID
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            if ( backtracking==1 ) {
+
+              		chunks.add(getText());
+              		generator.issueInvalidAttributeError(x.getText(),
+              		                                     enclosingRule,actionToken,
+              		                                     outerAltNum);
+              		
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ERROR_X
+
+    // $ANTLR start UNKNOWN_SYNTAX
+    public void mUNKNOWN_SYNTAX() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = UNKNOWN_SYNTAX;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:789:4: ( '$' | '%' ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )* )
+            int alt23=2;
+            int LA23_0 = input.LA(1);
+            if ( (LA23_0=='$') ) {
+                alt23=1;
+            }
+            else if ( (LA23_0=='%') ) {
+                alt23=2;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                NoViableAltException nvae =
+                    new NoViableAltException("788:1: UNKNOWN_SYNTAX : ( '$' | '%' ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )* );", 23, 0, input);
+
+                throw nvae;
+            }
+            switch (alt23) {
+                case 1 :
+                    // ActionTranslator.g:789:4: '$'
+                    {
+                    match('$'); if (failed) return ;
+                    if ( backtracking==1 ) {
+
+                      		chunks.add(getText());
+                      		// shouldn't need an error here.  Just accept $ if it doesn't look like anything
+                      		
+                    }
+
+                    }
+                    break;
+                case 2 :
+                    // ActionTranslator.g:794:4: '%' ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )*
+                    {
+                    match('%'); if (failed) return ;
+                    // ActionTranslator.g:794:8: ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )*
+                    loop22:
+                    do {
+                        int alt22=9;
+                        switch ( input.LA(1) ) {
+                        case 'A':
+                        case 'B':
+                        case 'C':
+                        case 'D':
+                        case 'E':
+                        case 'F':
+                        case 'G':
+                        case 'H':
+                        case 'I':
+                        case 'J':
+                        case 'K':
+                        case 'L':
+                        case 'M':
+                        case 'N':
+                        case 'O':
+                        case 'P':
+                        case 'Q':
+                        case 'R':
+                        case 'S':
+                        case 'T':
+                        case 'U':
+                        case 'V':
+                        case 'W':
+                        case 'X':
+                        case 'Y':
+                        case 'Z':
+                        case '_':
+                        case 'a':
+                        case 'b':
+                        case 'c':
+                        case 'd':
+                        case 'e':
+                        case 'f':
+                        case 'g':
+                        case 'h':
+                        case 'i':
+                        case 'j':
+                        case 'k':
+                        case 'l':
+                        case 'm':
+                        case 'n':
+                        case 'o':
+                        case 'p':
+                        case 'q':
+                        case 'r':
+                        case 's':
+                        case 't':
+                        case 'u':
+                        case 'v':
+                        case 'w':
+                        case 'x':
+                        case 'y':
+                        case 'z':
+                            alt22=1;
+                            break;
+                        case '.':
+                            alt22=2;
+                            break;
+                        case '(':
+                            alt22=3;
+                            break;
+                        case ')':
+                            alt22=4;
+                            break;
+                        case ',':
+                            alt22=5;
+                            break;
+                        case '{':
+                            alt22=6;
+                            break;
+                        case '}':
+                            alt22=7;
+                            break;
+                        case '\"':
+                            alt22=8;
+                            break;
+
+                        }
+
+                        switch (alt22) {
+                    	case 1 :
+                    	    // ActionTranslator.g:794:9: ID
+                    	    {
+                    	    mID(); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 2 :
+                    	    // ActionTranslator.g:794:12: '.'
+                    	    {
+                    	    match('.'); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 3 :
+                    	    // ActionTranslator.g:794:16: '('
+                    	    {
+                    	    match('('); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 4 :
+                    	    // ActionTranslator.g:794:20: ')'
+                    	    {
+                    	    match(')'); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 5 :
+                    	    // ActionTranslator.g:794:24: ','
+                    	    {
+                    	    match(','); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 6 :
+                    	    // ActionTranslator.g:794:28: '{'
+                    	    {
+                    	    match('{'); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 7 :
+                    	    // ActionTranslator.g:794:32: '}'
+                    	    {
+                    	    match('}'); if (failed) return ;
+
+                    	    }
+                    	    break;
+                    	case 8 :
+                    	    // ActionTranslator.g:794:36: '\"'
+                    	    {
+                    	    match('\"'); if (failed) return ;
+
+                    	    }
+                    	    break;
+
+                    	default :
+                    	    break loop22;
+                        }
+                    } while (true);
+
+                    if ( backtracking==1 ) {
+
+                      		chunks.add(getText());
+                      		ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
+                      								  grammar,
+                      								  actionToken,
+                      								  getText());
+                      		
+                    }
+
+                    }
+                    break;
+
+            }
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end UNKNOWN_SYNTAX
+
+    // $ANTLR start TEXT
+    public void mTEXT() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            int _type = TEXT;
+            int _start = getCharIndex();
+            int _line = getLine();
+            int _charPosition = getCharPositionInLine();
+            int _channel = Token.DEFAULT_CHANNEL;
+            // ActionTranslator.g:804:7: ( (~ ('$'|'%'|'\\\\'))+ )
+            // ActionTranslator.g:804:7: (~ ('$'|'%'|'\\\\'))+
+            {
+            // ActionTranslator.g:804:7: (~ ('$'|'%'|'\\\\'))+
+            int cnt24=0;
+            loop24:
+            do {
+                int alt24=2;
+                int LA24_0 = input.LA(1);
+                if ( ((LA24_0>='\u0000' && LA24_0<='#')||(LA24_0>='&' && LA24_0<='[')||(LA24_0>=']' && LA24_0<='\uFFFE')) ) {
+                    alt24=1;
+                }
+
+
+                switch (alt24) {
+            	case 1 :
+            	    // ActionTranslator.g:804:7: ~ ('$'|'%'|'\\\\')
+            	    {
+            	    if ( (input.LA(1)>='\u0000' && input.LA(1)<='#')||(input.LA(1)>='&' && input.LA(1)<='[')||(input.LA(1)>=']' && input.LA(1)<='\uFFFE') ) {
+            	        input.consume();
+            	    failed=false;
+            	    }
+            	    else {
+            	        if (backtracking>0) {failed=true; return ;}
+            	        MismatchedSetException mse =
+            	            new MismatchedSetException(null,input);
+            	        recover(mse);    throw mse;
+            	    }
+
+
+            	    }
+            	    break;
+
+            	default :
+            	    if ( cnt24 >= 1 ) break loop24;
+            	    if (backtracking>0) {failed=true; return ;}
+                        EarlyExitException eee =
+                            new EarlyExitException(24, input);
+                        throw eee;
+                }
+                cnt24++;
+            } while (true);
+
+            if ( backtracking==1 ) {
+              chunks.add(getText());
+            }
+
+            }
+
+
+            if ( backtracking==1 ) {
+
+                      if ( token==null && ruleNestingLevel==1 ) {
+                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
+                      }
+
+                      
+            }    }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end TEXT
+
+    // $ANTLR start ID
+    public void mID() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:808:9: ( ('a'..'z'|'A'..'Z'|'_') ( ('a'..'z'|'A'..'Z'|'_'|'0'..'9'))* )
+            // ActionTranslator.g:808:9: ('a'..'z'|'A'..'Z'|'_') ( ('a'..'z'|'A'..'Z'|'_'|'0'..'9'))*
+            {
+            if ( (input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
+                input.consume();
+            failed=false;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                MismatchedSetException mse =
+                    new MismatchedSetException(null,input);
+                recover(mse);    throw mse;
+            }
+
+            // ActionTranslator.g:808:33: ( ('a'..'z'|'A'..'Z'|'_'|'0'..'9'))*
+            loop25:
+            do {
+                int alt25=2;
+                int LA25_0 = input.LA(1);
+                if ( ((LA25_0>='0' && LA25_0<='9')||(LA25_0>='A' && LA25_0<='Z')||LA25_0=='_'||(LA25_0>='a' && LA25_0<='z')) ) {
+                    alt25=1;
+                }
+
+
+                switch (alt25) {
+            	case 1 :
+            	    // ActionTranslator.g:808:34: ('a'..'z'|'A'..'Z'|'_'|'0'..'9')
+            	    {
+            	    if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
+            	        input.consume();
+            	    failed=false;
+            	    }
+            	    else {
+            	        if (backtracking>0) {failed=true; return ;}
+            	        MismatchedSetException mse =
+            	            new MismatchedSetException(null,input);
+            	        recover(mse);    throw mse;
+            	    }
+
+
+            	    }
+            	    break;
+
+            	default :
+            	    break loop25;
+                }
+            } while (true);
+
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end ID
+
+    // $ANTLR start INT
+    public void mINT() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:812:7: ( ( '0' .. '9' )+ )
+            // ActionTranslator.g:812:7: ( '0' .. '9' )+
+            {
+            // ActionTranslator.g:812:7: ( '0' .. '9' )+
+            int cnt26=0;
+            loop26:
+            do {
+                int alt26=2;
+                int LA26_0 = input.LA(1);
+                if ( ((LA26_0>='0' && LA26_0<='9')) ) {
+                    alt26=1;
+                }
+
+
+                switch (alt26) {
+            	case 1 :
+            	    // ActionTranslator.g:812:7: '0' .. '9'
+            	    {
+            	    matchRange('0','9'); if (failed) return ;
+
+            	    }
+            	    break;
+
+            	default :
+            	    if ( cnt26 >= 1 ) break loop26;
+            	    if (backtracking>0) {failed=true; return ;}
+                        EarlyExitException eee =
+                            new EarlyExitException(26, input);
+                        throw eee;
+                }
+                cnt26++;
+            } while (true);
+
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end INT
+
+    // $ANTLR start WS
+    public void mWS() throws RecognitionException {
+        try {
+            ruleNestingLevel++;
+            // ActionTranslator.g:816:6: ( ( (' '|'\\t'|'\\n'))+ )
+            // ActionTranslator.g:816:6: ( (' '|'\\t'|'\\n'))+
+            {
+            // ActionTranslator.g:816:6: ( (' '|'\\t'|'\\n'))+
+            int cnt27=0;
+            loop27:
+            do {
+                int alt27=2;
+                int LA27_0 = input.LA(1);
+                if ( ((LA27_0>='\t' && LA27_0<='\n')||LA27_0==' ') ) {
+                    alt27=1;
+                }
+
+
+                switch (alt27) {
+            	case 1 :
+            	    // ActionTranslator.g:816:7: (' '|'\\t'|'\\n')
+            	    {
+            	    if ( (input.LA(1)>='\t' && input.LA(1)<='\n')||input.LA(1)==' ' ) {
+            	        input.consume();
+            	    failed=false;
+            	    }
+            	    else {
+            	        if (backtracking>0) {failed=true; return ;}
+            	        MismatchedSetException mse =
+            	            new MismatchedSetException(null,input);
+            	        recover(mse);    throw mse;
+            	    }
+
+
+            	    }
+            	    break;
+
+            	default :
+            	    if ( cnt27 >= 1 ) break loop27;
+            	    if (backtracking>0) {failed=true; return ;}
+                        EarlyExitException eee =
+                            new EarlyExitException(27, input);
+                        throw eee;
+                }
+                cnt27++;
+            } while (true);
+
+
+            }
+
+        }
+        finally {
+            ruleNestingLevel--;
+        }
+    }
+    // $ANTLR end WS
+
+    public void mTokens() throws RecognitionException {
+        // ActionTranslator.g:1:25: ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> ISOLATED_LEXER_RULE_REF | ( SET_LOCAL_ATTR [...]
+        int alt28=27;
+        int LA28_0 = input.LA(1);
+        if ( (LA28_0=='$') ) {
+            if ( (synpred1()) ) {
+                alt28=1;
+            }
+            else if ( (synpred2()) ) {
+                alt28=2;
+            }
+            else if ( (synpred3()) ) {
+                alt28=3;
+            }
+            else if ( (synpred4()) ) {
+                alt28=4;
+            }
+            else if ( (synpred5()) ) {
+                alt28=5;
+            }
+            else if ( (synpred6()) ) {
+                alt28=6;
+            }
+            else if ( (synpred7()) ) {
+                alt28=7;
+            }
+            else if ( (synpred8()) ) {
+                alt28=8;
+            }
+            else if ( (synpred9()) ) {
+                alt28=9;
+            }
+            else if ( (synpred10()) ) {
+                alt28=10;
+            }
+            else if ( (synpred11()) ) {
+                alt28=11;
+            }
+            else if ( (synpred12()) ) {
+                alt28=12;
+            }
+            else if ( (synpred13()) ) {
+                alt28=13;
+            }
+            else if ( (synpred14()) ) {
+                alt28=14;
+            }
+            else if ( (synpred15()) ) {
+                alt28=15;
+            }
+            else if ( (synpred16()) ) {
+                alt28=16;
+            }
+            else if ( (synpred17()) ) {
+                alt28=17;
+            }
+            else if ( (synpred24()) ) {
+                alt28=24;
+            }
+            else if ( (synpred25()) ) {
+                alt28=25;
+            }
+            else if ( (synpred26()) ) {
+                alt28=26;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                NoViableAltException nvae =
+                    new NoViableAltException("1:1: Tokens options {k=1; } : ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> IS [...]
+
+                throw nvae;
+            }
+        }
+        else if ( (LA28_0=='%') ) {
+            if ( (synpred18()) ) {
+                alt28=18;
+            }
+            else if ( (synpred19()) ) {
+                alt28=19;
+            }
+            else if ( (synpred20()) ) {
+                alt28=20;
+            }
+            else if ( (synpred21()) ) {
+                alt28=21;
+            }
+            else if ( (synpred22()) ) {
+                alt28=22;
+            }
+            else if ( (synpred26()) ) {
+                alt28=26;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                NoViableAltException nvae =
+                    new NoViableAltException("1:1: Tokens options {k=1; } : ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> IS [...]
+
+                throw nvae;
+            }
+        }
+        else if ( (LA28_0=='\\') ) {
+            alt28=23;
+        }
+        else if ( ((LA28_0>='\u0000' && LA28_0<='#')||(LA28_0>='&' && LA28_0<='[')||(LA28_0>=']' && LA28_0<='\uFFFE')) ) {
+            alt28=27;
+        }
+        else {
+            if (backtracking>0) {failed=true; return ;}
+            NoViableAltException nvae =
+                new NoViableAltException("1:1: Tokens options {k=1; } : ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> ISOLAT [...]
+
+            throw nvae;
+        }
+        switch (alt28) {
+            case 1 :
+                // ActionTranslator.g:1:25: ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR
+                {
+                mSET_ENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 2 :
+                // ActionTranslator.g:1:88: ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR
+                {
+                mENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 3 :
+                // ActionTranslator.g:1:143: ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR
+                {
+                mSET_TOKEN_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 4 :
+                // ActionTranslator.g:1:188: ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR
+                {
+                mTOKEN_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 5 :
+                // ActionTranslator.g:1:225: ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR
+                {
+                mSET_RULE_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 6 :
+                // ActionTranslator.g:1:268: ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR
+                {
+                mRULE_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 7 :
+                // ActionTranslator.g:1:303: ( LABEL_REF )=> LABEL_REF
+                {
+                mLABEL_REF(); if (failed) return ;
+
+                }
+                break;
+            case 8 :
+                // ActionTranslator.g:1:326: ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF
+                {
+                mISOLATED_TOKEN_REF(); if (failed) return ;
+
+                }
+                break;
+            case 9 :
+                // ActionTranslator.g:1:367: ( ISOLATED_LEXER_RULE_REF )=> ISOLATED_LEXER_RULE_REF
+                {
+                mISOLATED_LEXER_RULE_REF(); if (failed) return ;
+
+                }
+                break;
+            case 10 :
+                // ActionTranslator.g:1:418: ( SET_LOCAL_ATTR )=> SET_LOCAL_ATTR
+                {
+                mSET_LOCAL_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 11 :
+                // ActionTranslator.g:1:451: ( LOCAL_ATTR )=> LOCAL_ATTR
+                {
+                mLOCAL_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 12 :
+                // ActionTranslator.g:1:476: ( SET_DYNAMIC_SCOPE_ATTR )=> SET_DYNAMIC_SCOPE_ATTR
+                {
+                mSET_DYNAMIC_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 13 :
+                // ActionTranslator.g:1:525: ( DYNAMIC_SCOPE_ATTR )=> DYNAMIC_SCOPE_ATTR
+                {
+                mDYNAMIC_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 14 :
+                // ActionTranslator.g:1:566: ( ERROR_SCOPED_XY )=> ERROR_SCOPED_XY
+                {
+                mERROR_SCOPED_XY(); if (failed) return ;
+
+                }
+                break;
+            case 15 :
+                // ActionTranslator.g:1:601: ( DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR )=> DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
+                {
+                mDYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 16 :
+                // ActionTranslator.g:1:676: ( DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR )=> DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
+                {
+                mDYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR(); if (failed) return ;
+
+                }
+                break;
+            case 17 :
+                // ActionTranslator.g:1:751: ( ISOLATED_DYNAMIC_SCOPE )=> ISOLATED_DYNAMIC_SCOPE
+                {
+                mISOLATED_DYNAMIC_SCOPE(); if (failed) return ;
+
+                }
+                break;
+            case 18 :
+                // ActionTranslator.g:1:800: ( TEMPLATE_INSTANCE )=> TEMPLATE_INSTANCE
+                {
+                mTEMPLATE_INSTANCE(); if (failed) return ;
+
+                }
+                break;
+            case 19 :
+                // ActionTranslator.g:1:839: ( INDIRECT_TEMPLATE_INSTANCE )=> INDIRECT_TEMPLATE_INSTANCE
+                {
+                mINDIRECT_TEMPLATE_INSTANCE(); if (failed) return ;
+
+                }
+                break;
+            case 20 :
+                // ActionTranslator.g:1:896: ( SET_EXPR_ATTRIBUTE )=> SET_EXPR_ATTRIBUTE
+                {
+                mSET_EXPR_ATTRIBUTE(); if (failed) return ;
+
+                }
+                break;
+            case 21 :
+                // ActionTranslator.g:1:937: ( SET_ATTRIBUTE )=> SET_ATTRIBUTE
+                {
+                mSET_ATTRIBUTE(); if (failed) return ;
+
+                }
+                break;
+            case 22 :
+                // ActionTranslator.g:1:968: ( TEMPLATE_EXPR )=> TEMPLATE_EXPR
+                {
+                mTEMPLATE_EXPR(); if (failed) return ;
+
+                }
+                break;
+            case 23 :
+                // ActionTranslator.g:1:999: ( ESC )=> ESC
+                {
+                mESC(); if (failed) return ;
+
+                }
+                break;
+            case 24 :
+                // ActionTranslator.g:1:1010: ( ERROR_XY )=> ERROR_XY
+                {
+                mERROR_XY(); if (failed) return ;
+
+                }
+                break;
+            case 25 :
+                // ActionTranslator.g:1:1031: ( ERROR_X )=> ERROR_X
+                {
+                mERROR_X(); if (failed) return ;
+
+                }
+                break;
+            case 26 :
+                // ActionTranslator.g:1:1050: ( UNKNOWN_SYNTAX )=> UNKNOWN_SYNTAX
+                {
+                mUNKNOWN_SYNTAX(); if (failed) return ;
+
+                }
+                break;
+            case 27 :
+                // ActionTranslator.g:1:1083: ( TEXT )=> TEXT
+                {
+                mTEXT(); if (failed) return ;
+
+                }
+                break;
+
+        }
+
+    }
+
+    // $ANTLR start synpred1
+    public void synpred1_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:25: ( SET_ENCLOSING_RULE_SCOPE_ATTR )
+        // ActionTranslator.g:1:26: SET_ENCLOSING_RULE_SCOPE_ATTR
+        {
+        mSET_ENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred1
+
+    // $ANTLR start synpred2
+    public void synpred2_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:88: ( ENCLOSING_RULE_SCOPE_ATTR )
+        // ActionTranslator.g:1:89: ENCLOSING_RULE_SCOPE_ATTR
+        {
+        mENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred2
+
+    // $ANTLR start synpred3
+    public void synpred3_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:143: ( SET_TOKEN_SCOPE_ATTR )
+        // ActionTranslator.g:1:144: SET_TOKEN_SCOPE_ATTR
+        {
+        mSET_TOKEN_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred3
+
+    // $ANTLR start synpred4
+    public void synpred4_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:188: ( TOKEN_SCOPE_ATTR )
+        // ActionTranslator.g:1:189: TOKEN_SCOPE_ATTR
+        {
+        mTOKEN_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred4
+
+    // $ANTLR start synpred5
+    public void synpred5_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:225: ( SET_RULE_SCOPE_ATTR )
+        // ActionTranslator.g:1:226: SET_RULE_SCOPE_ATTR
+        {
+        mSET_RULE_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred5
+
+    // $ANTLR start synpred6
+    public void synpred6_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:268: ( RULE_SCOPE_ATTR )
+        // ActionTranslator.g:1:269: RULE_SCOPE_ATTR
+        {
+        mRULE_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred6
+
+    // $ANTLR start synpred7
+    public void synpred7_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:303: ( LABEL_REF )
+        // ActionTranslator.g:1:304: LABEL_REF
+        {
+        mLABEL_REF(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred7
+
+    // $ANTLR start synpred8
+    public void synpred8_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:326: ( ISOLATED_TOKEN_REF )
+        // ActionTranslator.g:1:327: ISOLATED_TOKEN_REF
+        {
+        mISOLATED_TOKEN_REF(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred8
+
+    // $ANTLR start synpred9
+    public void synpred9_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:367: ( ISOLATED_LEXER_RULE_REF )
+        // ActionTranslator.g:1:368: ISOLATED_LEXER_RULE_REF
+        {
+        mISOLATED_LEXER_RULE_REF(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred9
+
+    // $ANTLR start synpred10
+    public void synpred10_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:418: ( SET_LOCAL_ATTR )
+        // ActionTranslator.g:1:419: SET_LOCAL_ATTR
+        {
+        mSET_LOCAL_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred10
+
+    // $ANTLR start synpred11
+    public void synpred11_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:451: ( LOCAL_ATTR )
+        // ActionTranslator.g:1:452: LOCAL_ATTR
+        {
+        mLOCAL_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred11
+
+    // $ANTLR start synpred12
+    public void synpred12_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:476: ( SET_DYNAMIC_SCOPE_ATTR )
+        // ActionTranslator.g:1:477: SET_DYNAMIC_SCOPE_ATTR
+        {
+        mSET_DYNAMIC_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred12
+
+    // $ANTLR start synpred13
+    public void synpred13_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:525: ( DYNAMIC_SCOPE_ATTR )
+        // ActionTranslator.g:1:526: DYNAMIC_SCOPE_ATTR
+        {
+        mDYNAMIC_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred13
+
+    // $ANTLR start synpred14
+    public void synpred14_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:566: ( ERROR_SCOPED_XY )
+        // ActionTranslator.g:1:567: ERROR_SCOPED_XY
+        {
+        mERROR_SCOPED_XY(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred14
+
+    // $ANTLR start synpred15
+    public void synpred15_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:601: ( DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR )
+        // ActionTranslator.g:1:602: DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
+        {
+        mDYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred15
+
+    // $ANTLR start synpred16
+    public void synpred16_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:676: ( DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR )
+        // ActionTranslator.g:1:677: DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
+        {
+        mDYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred16
+
+    // $ANTLR start synpred17
+    public void synpred17_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:751: ( ISOLATED_DYNAMIC_SCOPE )
+        // ActionTranslator.g:1:752: ISOLATED_DYNAMIC_SCOPE
+        {
+        mISOLATED_DYNAMIC_SCOPE(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred17
+
+    // $ANTLR start synpred18
+    public void synpred18_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:800: ( TEMPLATE_INSTANCE )
+        // ActionTranslator.g:1:801: TEMPLATE_INSTANCE
+        {
+        mTEMPLATE_INSTANCE(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred18
+
+    // $ANTLR start synpred19
+    public void synpred19_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:839: ( INDIRECT_TEMPLATE_INSTANCE )
+        // ActionTranslator.g:1:840: INDIRECT_TEMPLATE_INSTANCE
+        {
+        mINDIRECT_TEMPLATE_INSTANCE(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred19
+
+    // $ANTLR start synpred20
+    public void synpred20_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:896: ( SET_EXPR_ATTRIBUTE )
+        // ActionTranslator.g:1:897: SET_EXPR_ATTRIBUTE
+        {
+        mSET_EXPR_ATTRIBUTE(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred20
+
+    // $ANTLR start synpred21
+    public void synpred21_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:937: ( SET_ATTRIBUTE )
+        // ActionTranslator.g:1:938: SET_ATTRIBUTE
+        {
+        mSET_ATTRIBUTE(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred21
+
+    // $ANTLR start synpred22
+    public void synpred22_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:968: ( TEMPLATE_EXPR )
+        // ActionTranslator.g:1:969: TEMPLATE_EXPR
+        {
+        mTEMPLATE_EXPR(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred22
+
+    // $ANTLR start synpred24
+    public void synpred24_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:1010: ( ERROR_XY )
+        // ActionTranslator.g:1:1011: ERROR_XY
+        {
+        mERROR_XY(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred24
+
+    // $ANTLR start synpred25
+    public void synpred25_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:1031: ( ERROR_X )
+        // ActionTranslator.g:1:1032: ERROR_X
+        {
+        mERROR_X(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred25
+
+    // $ANTLR start synpred26
+    public void synpred26_fragment() throws RecognitionException {   
+        // ActionTranslator.g:1:1050: ( UNKNOWN_SYNTAX )
+        // ActionTranslator.g:1:1051: UNKNOWN_SYNTAX
+        {
+        mUNKNOWN_SYNTAX(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred26
+
+    public boolean synpred25() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred25_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred7() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred7_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred14() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred14_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred15() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred15_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred22() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred22_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred12() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred12_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred4() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred4_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred9() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred9_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred1() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred1_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred20() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred20_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred17() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred17_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred2() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred2_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred18() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred18_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred3() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred3_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred11() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred11_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred26() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred26_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred8() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred8_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred10() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred10_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred21() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred21_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred16() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred16_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred19() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred19_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred24() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred24_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred5() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred5_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred6() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred6_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public boolean synpred13() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred13_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+
+
+ 
+
+}
\ No newline at end of file
diff --git a/src/org/antlr/codegen/CPPTarget.java b/src/org/antlr/codegen/CPPTarget.java
new file mode 100644
index 0000000..2bfafbd
--- /dev/null
+++ b/src/org/antlr/codegen/CPPTarget.java
@@ -0,0 +1,140 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.tool.Grammar;
+import org.antlr.Tool;
+
+import java.io.IOException;
+
+public class CPPTarget extends Target {
+	
+	public String escapeChar( int c ) {
+		// System.out.println("CPPTarget.escapeChar("+c+")");
+		switch (c) {
+		case '\n' : return "\\n";
+		case '\t' : return "\\t";
+		case '\r' : return "\\r";
+		case '\\' : return "\\\\";
+		case '\'' : return "\\'";
+		case '"' :  return "\\\"";
+		default :
+			if ( c < ' ' || c > 126 )
+			{
+				if (c > 255)
+				{
+					String s = Integer.toString(c,16);
+					// put leading zeroes in front of the thing..
+					while( s.length() < 4 )
+						s = '0' + s;
+					return "\\u" + s;
+				}
+				else {
+					return "\\" + Integer.toString(c,8);
+				}
+			}
+			else {
+				return String.valueOf((char)c);
+			}
+		}
+	}
+
+	/** Converts a String into a representation that can be use as a literal
+	 * when surrounded by double-quotes.
+	 *
+	 * Used for escaping semantic predicate strings for exceptions.
+	 *
+	 * @param s The String to be changed into a literal
+	 */
+	public String escapeString(String s)
+	{
+		StringBuffer retval = new StringBuffer();
+		for (int i = 0; i < s.length(); i++) {
+			retval.append(escapeChar(s.charAt(i)));
+		}
+
+		return retval.toString();
+	}
+
+	protected void genRecognizerHeaderFile(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   StringTemplate headerFileST,
+										   String extName)
+		throws IOException
+	{
+		StringTemplateGroup templates = generator.getTemplates();
+		generator.write(headerFileST, grammar.name+extName);
+	}
+
+	/** Convert from an ANTLR char literal found in a grammar file to
+	 *  an equivalent char literal in the target language.  For Java, this
+	 *  is the identify translation; i.e., '\n' -> '\n'.  Most languages
+	 *  will be able to use this 1-to-1 mapping.  Expect single quotes
+	 *  around the incoming literal.
+	 *  Depending on the charvocabulary the charliteral should be prefixed with a 'L'
+	 */
+	public String getTargetCharLiteralFromANTLRCharLiteral( CodeGenerator codegen, String literal) {
+		int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+		String prefix = "'";
+		if( codegen.grammar.getMaxCharValue() > 255 )
+			prefix = "L'";
+		else if( (c & 0x80) != 0 )	// if in char mode prevent sign extensions
+			return ""+c;
+		return prefix+escapeChar(c)+"'";
+	}
+
+	/** Convert from an ANTLR string literal found in a grammar file to
+	 *  an equivalent string literal in the target language.  For Java, this
+	 *  is the identify translation; i.e., "\"\n" -> "\"\n".  Most languages
+	 *  will be able to use this 1-to-1 mapping.  Expect double quotes 
+	 *  around the incoming literal.
+	 *  Depending on the charvocabulary the string should be prefixed with a 'L'
+	 */
+	public String getTargetStringLiteralFromANTLRStringLiteral( CodeGenerator codegen, String literal) {
+		StringBuffer buf = Grammar.getUnescapedStringFromGrammarStringLiteral(literal);
+		String prefix = "\"";
+		if( codegen.grammar.getMaxCharValue() > 255 )
+			prefix = "L\"";
+		return prefix+escapeString(buf.toString())+"\"";
+	}
+	/** Character constants get truncated to this value.
+	 * TODO: This should be derived from the charVocabulary. Depending on it
+	 * being 255 or 0xFFFF the templates should generate normal character
+	 * constants or multibyte ones.
+	 */
+	public int getMaxCharValue( CodeGenerator codegen ) {
+		int maxval = 255; // codegen.grammar.get????();
+		if ( maxval <= 255 )
+			return 255;
+		else
+			return maxval;
+	}
+}
diff --git a/src/org/antlr/codegen/CSharpTarget.java b/src/org/antlr/codegen/CSharpTarget.java
new file mode 100644
index 0000000..e1da9bd
--- /dev/null
+++ b/src/org/antlr/codegen/CSharpTarget.java
@@ -0,0 +1,46 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+public class CSharpTarget extends Target 
+{
+	protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+													 CodeGenerator generator,
+													 Grammar grammar,
+													 StringTemplate recognizerST,
+													 StringTemplate cyclicDFAST)
+	{
+		return recognizerST;
+	}
+}
+
diff --git a/src/org/antlr/codegen/CTarget.java b/src/org/antlr/codegen/CTarget.java
new file mode 100644
index 0000000..ac01022
--- /dev/null
+++ b/src/org/antlr/codegen/CTarget.java
@@ -0,0 +1,238 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+ 
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+import java.io.IOException;
+import java.util.ArrayList;
+        
+public class CTarget extends Target {
+    
+        ArrayList strings = new ArrayList();
+ 
+        protected void genRecognizerFile(Tool tool,
+									CodeGenerator generator,
+									Grammar grammar,
+									StringTemplate outputFileST)
+		throws IOException
+	{
+                // Before we write this, and cause it to generate its string,
+                // we need to add all the string literals that we are going to match
+                //
+                outputFileST.setAttribute("literals", strings);
+                //System.out.println(outputFileST.toStructureString());
+		String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+		generator.write(outputFileST, fileName);
+	}
+                
+	protected void genRecognizerHeaderFile(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   StringTemplate headerFileST,
+										   String extName)
+		throws IOException
+	{
+            generator.write(headerFileST, grammar.name+ Grammar.grammarTypeToFileNameSuffix[grammar.type] +extName);
+	}
+        
+        protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   StringTemplate recognizerST,
+										   StringTemplate cyclicDFAST)
+	{
+		return recognizerST;
+	}
+        
+ 	/** Is scope in @scope::name {action} valid for this kind of grammar?
+	 *  Targets like C++ may want to allow new scopes like headerfile or
+	 *  some such.  The action names themselves are not policed at the
+	 *  moment so targets can add template actions w/o having to recompile
+	 *  ANTLR.
+	 */
+	public boolean isValidActionScope(int grammarType, String scope) {
+		switch (grammarType) {
+			case Grammar.LEXER :
+				if ( scope.equals("lexer") ) {return true;}
+                                if ( scope.equals("header") ) {return true;}
+                                if ( scope.equals("includes") ) {return true;}
+                                if ( scope.equals("preincludes") ) {return true;}
+                                if ( scope.equals("overrides") ) {return true;}
+				break;
+			case Grammar.PARSER :
+				if ( scope.equals("parser") ) {return true;}
+                                if ( scope.equals("header") ) {return true;}
+                                if ( scope.equals("includes") ) {return true;}
+                                if ( scope.equals("preincludes") ) {return true;}
+                                if ( scope.equals("overrides") ) {return true;}
+				break;
+			case Grammar.COMBINED :
+				if ( scope.equals("parser") ) {return true;}
+				if ( scope.equals("lexer") ) {return true;}
+                                if ( scope.equals("header") ) {return true;}
+                                if ( scope.equals("includes") ) {return true;}
+                                if ( scope.equals("preincludes") ) {return true;}
+                                if ( scope.equals("overrides") ) {return true;}
+				break;
+			case Grammar.TREE_PARSER :
+				if ( scope.equals("treeparser") ) {return true;}
+                                if ( scope.equals("header") ) {return true;}
+                                if ( scope.equals("includes") ) {return true;}
+                                if ( scope.equals("preincludes") ) {return true;}
+                                if ( scope.equals("overrides") ) {return true;}
+				break;
+		}
+		return false;
+	}
+        
+        public String getTargetCharLiteralFromANTLRCharLiteral(
+		CodeGenerator generator,
+		String literal)
+	{
+                
+                if  (literal.startsWith("'\\u") )
+                {
+                    literal = "0x" +literal.substring(3, 7);
+                }
+                else
+                {
+                    int c = literal.charAt(1);
+                      
+                    if  (c < 32 || c > 127) {
+                        literal  =  "0x" + Integer.toHexString(c);
+                    }
+                }
+                
+                return literal;
+	}
+        
+	/** Convert from an ANTLR string literal found in a grammar file to
+	 *  an equivalent string literal in the C target.
+         *  Because we msut support Unicode character sets and have chosen
+         *  to have the lexer match UTF32 characters, then we must encode
+         *  string matches to use 32 bit character arrays. Here then we
+         *  must produce the C array and cater for the case where the 
+         *  lexer has been eoncded with a string such as "xyz\n", which looks
+         *  slightly incogrous to me but is not incorrect.
+	 */
+	public String getTargetStringLiteralFromANTLRStringLiteral(
+		CodeGenerator generator,
+		String literal)
+	{
+            int             index;
+            int             outc;
+            String          bytes;
+            StringBuffer    buf     = new StringBuffer();
+            
+            buf.append("{ ");
+            
+            // We need ot lose any escaped characters of the form \x and just
+            // replace them with their actual values as well as lose the surrounding
+            // quote marks.
+            //
+            for (int i = 1; i< literal.length()-1; i++)
+            {
+                buf.append("0x");
+                                
+                if  (literal.charAt(i) == '\\') 
+                {
+                    i++; // Assume that there is a next character, this will just yield
+                         // invalid strings if not, which is what the input would be of course - invalid
+                    switch (literal.charAt(i))
+                    {
+                        case 'u':
+                        case 'U':
+                            buf.append(literal.substring(i+1, i+5));  // Already a hex string
+                            i = i + 5;                                // Move to next string/char/escape
+                            break;
+                            
+                        case    'n':
+                        case    'N':
+                            
+                            buf.append("0A");
+                            break;
+                            
+                        case    'r':
+                        case    'R':
+                            
+                            buf.append("0D");
+                            break;
+                            
+                        case    't':
+                        case    'T':
+                            
+                            buf.append("09");
+                            break;
+                        
+                        case    'b':
+                        case    'B':
+                            
+                            buf.append("08");
+                            break;
+                            
+                        case    'f':
+                        case    'F':
+                            
+                            buf.append("0C");
+                            break;
+                            
+                        default:
+                            
+                            // Anything else is what it is!
+                            //
+                            buf.append(Integer.toHexString((int)literal.charAt(i)).toUpperCase());
+                            break;
+                    }
+                }
+                else
+                {
+                    buf.append(Integer.toHexString((int)literal.charAt(i)).toUpperCase());
+                }
+                buf.append(", ");               
+            }
+            buf.append(" ANTLR3_STRING_TERMINATOR}");
+            
+            bytes   = buf.toString();            
+            index   = strings.indexOf(bytes);
+            
+            if  (index == -1)
+            {
+                strings.add(bytes);
+                index = strings.indexOf(bytes);
+            }
+             
+            String strref = "lit_" + String.valueOf(index+1);
+
+            return strref;
+	}
+       
+}
+
diff --git a/src/org/antlr/codegen/CodeGenTreeWalker.java b/src/org/antlr/codegen/CodeGenTreeWalker.java
new file mode 100644
index 0000000..35be1e2
--- /dev/null
+++ b/src/org/antlr/codegen/CodeGenTreeWalker.java
@@ -0,0 +1,3132 @@
+// $ANTLR 2.7.7 (2006-01-29): "codegen.g" -> "CodeGenTreeWalker.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.codegen;
+    import org.antlr.tool.*;
+    import org.antlr.analysis.*;
+    import org.antlr.misc.*;
+	import java.util.*;
+	import org.antlr.stringtemplate.*;
+    import antlr.TokenWithIndex;
+    import antlr.CommonToken;
+
+import antlr.TreeParser;
+import antlr.Token;
+import antlr.collections.AST;
+import antlr.RecognitionException;
+import antlr.ANTLRException;
+import antlr.NoViableAltException;
+import antlr.MismatchedTokenException;
+import antlr.SemanticException;
+import antlr.collections.impl.BitSet;
+import antlr.ASTPair;
+import antlr.collections.impl.ASTArray;
+
+
+/** Walk a grammar and generate code by gradually building up
+ *  a bigger and bigger StringTemplate.
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  June 15, 2004
+ */
+public class CodeGenTreeWalker extends antlr.TreeParser       implements CodeGenTreeWalkerTokenTypes
+ {
+
+	protected static final int RULE_BLOCK_NESTING_LEVEL = 0;
+	protected static final int OUTER_REWRITE_NESTING_LEVEL = 0;
+
+    protected String currentRuleName = null;
+    protected int blockNestingLevel = 0;
+    protected int rewriteBlockNestingLevel = 0;
+	protected int outerAltNum = 0;
+    protected StringTemplate currentBlockST = null;
+    protected boolean currentAltHasASTRewrite = false;
+    protected int rewriteTreeNestingLevel = 0;
+    protected Set rewriteRuleRefs = null;
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "codegen: "+ex.toString(),
+            ex);
+    }
+
+    public void reportError(String s) {
+        System.out.println("codegen: error: " + s);
+    }
+
+    protected CodeGenerator generator;
+    protected Grammar grammar;
+    protected StringTemplateGroup templates;
+
+    /** The overall lexer/parser template; simulate dynamically scoped
+     *  attributes by making this an instance var of the walker.
+     */
+    protected StringTemplate recognizerST;
+
+    protected StringTemplate outputFileST;
+    protected StringTemplate headerFileST;
+
+    protected String outputOption = "";
+
+	protected StringTemplate getWildcardST(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
+		String name = "wildcard";
+		if ( grammar.type==Grammar.LEXER ) {
+			name = "wildcardChar";
+		}
+		return getTokenElementST(name, name, elementAST, ast_suffix, label);
+	}
+
+	protected StringTemplate getRuleElementST(String name,
+										      String elementName,
+											  GrammarAST elementAST,
+    										  GrammarAST ast_suffix,
+    										  String label)
+	{
+		String suffix = getSTSuffix(ast_suffix,label);
+		name += suffix;
+		// if we're building trees and there is no label, gen a label
+		// unless we're in a synpred rule.
+		Rule r = grammar.getRule(currentRuleName);
+		if ( (grammar.buildAST()||suffix.length()>0) && label==null &&
+		     (r==null || !r.isSynPred) )
+		{
+			// we will need a label to do the AST or tracking, make one
+			label = generator.createUniqueLabel(elementName);
+			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
+			grammar.defineRuleRefLabel(currentRuleName, labelTok, elementAST);
+		}
+		StringTemplate elementST = templates.getInstanceOf(name);
+		if ( label!=null ) {
+			elementST.setAttribute("label", label);
+		}
+		return elementST;
+	}
+
+	protected StringTemplate getTokenElementST(String name,
+											   String elementName,
+											   GrammarAST elementAST,
+											   GrammarAST ast_suffix,
+											   String label)
+	{
+		String suffix = getSTSuffix(ast_suffix,label);
+		name += suffix;
+		// if we're building trees and there is no label, gen a label
+		// unless we're in a synpred rule.
+		Rule r = grammar.getRule(currentRuleName);
+		if ( (grammar.buildAST()||suffix.length()>0) && label==null &&
+		     (r==null || !r.isSynPred) )
+		{
+			label = generator.createUniqueLabel(elementName);
+			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
+			grammar.defineTokenRefLabel(currentRuleName, labelTok, elementAST);
+		}
+		StringTemplate elementST = templates.getInstanceOf(name);
+		if ( label!=null ) {
+			elementST.setAttribute("label", label);
+		}
+		return elementST;
+	}
+
+    public boolean isListLabel(String label) {
+		boolean hasListLabel=false;
+		if ( label!=null ) {
+			Rule r = grammar.getRule(currentRuleName);
+			String stName = null;
+			if ( r!=null ) {
+				Grammar.LabelElementPair pair = r.getLabel(label);
+				if ( pair!=null &&
+					 (pair.type==Grammar.TOKEN_LIST_LABEL||
+					  pair.type==Grammar.RULE_LIST_LABEL) )
+				{
+					hasListLabel=true;
+				}
+			}
+		}
+        return hasListLabel;
+    }
+
+	/** Return a non-empty template name suffix if the token is to be
+	 *  tracked, added to a tree, or both.
+	 */
+	protected String getSTSuffix(GrammarAST ast_suffix, String label) {
+		if ( grammar.type==Grammar.LEXER ) {
+			return "";
+		}
+		// handle list label stuff; make element use "Track"
+
+		String astPart = "";
+		String operatorPart = "";
+		String rewritePart = "";
+		String listLabelPart = "";
+		if ( grammar.buildAST() ) {
+			astPart = "AST";
+		}
+		if ( ast_suffix!=null ) {
+			if ( ast_suffix.getType()==ANTLRParser.ROOT ) {
+    			operatorPart = "RuleRoot";
+    		}
+    		else if ( ast_suffix.getType()==ANTLRParser.BANG ) {
+    			operatorPart = "Bang";
+    		}
+   		}
+		if ( currentAltHasASTRewrite ) {
+			rewritePart = "Track";
+		}
+		if ( isListLabel(label) ) {
+			listLabelPart = "AndListLabel";
+		}
+		String STsuffix = operatorPart+rewritePart+listLabelPart;
+		//System.out.println("suffix = "+STsuffix);
+
+    	return STsuffix;
+	}
+
+    /** Convert rewrite AST lists to target labels list */
+    protected List<String> getTokenTypesAsTargetLabels(Set<GrammarAST> refs) {
+        if ( refs==null || refs.size()==0 ) {
+            return null;
+        }
+        List<String> labels = new ArrayList<String>(refs.size());
+        for (GrammarAST t : refs) {
+            String label;
+            if ( t.getType()==ANTLRParser.RULE_REF ) {
+                label = t.getText();
+            }
+            else if ( t.getType()==ANTLRParser.LABEL ) {
+                label = t.getText();
+            }
+            else {
+                // must be char or string literal
+                label = generator.getTokenTypeAsTargetLabel(
+                            grammar.getTokenType(t.getText()));
+            }
+            labels.add(label);
+        }
+        return labels;
+    }
+
+    protected void init(Grammar g) {
+        this.grammar = g;
+        this.generator = grammar.getCodeGenerator();
+        this.templates = generator.getTemplates();
+    }
+public CodeGenTreeWalker() {
+	tokenNames = _tokenNames;
+}
+
+	public final void grammar(AST _t,
+		Grammar g,
+        StringTemplate recognizerST,
+        StringTemplate outputFileST,
+        StringTemplate headerFileST
+	) throws RecognitionException {
+		
+		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		init(g);
+		this.recognizerST = recognizerST;
+		this.outputFileST = outputFileST;
+		this.headerFileST = headerFileST;
+		String superClass = (String)g.getOption("superClass");
+		outputOption = (String)g.getOption("output");
+		recognizerST.setAttribute("superClass", superClass);
+		if ( g.type!=Grammar.LEXER ) {
+				recognizerST.setAttribute("ASTLabelType", g.getOption("ASTLabelType"));
+			}
+		if ( g.type==Grammar.TREE_PARSER && g.getOption("ASTLabelType")==null ) {
+				ErrorManager.grammarWarning(ErrorManager.MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR,
+										   g,
+										   null,
+										   g.name);
+			}
+		if ( g.type!=Grammar.TREE_PARSER ) {
+				recognizerST.setAttribute("labelType", g.getOption("TokenLabelType"));
+			}
+			recognizerST.setAttribute("numRules", grammar.getRules().size());
+			outputFileST.setAttribute("numRules", grammar.getRules().size());
+			headerFileST.setAttribute("numRules", grammar.getRules().size());
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LEXER_GRAMMAR:
+			{
+				AST __t3 = _t;
+				GrammarAST tmp1_AST_in = (GrammarAST)_t;
+				match(_t,LEXER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t3;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PARSER_GRAMMAR:
+			{
+				AST __t4 = _t;
+				GrammarAST tmp2_AST_in = (GrammarAST)_t;
+				match(_t,PARSER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t4;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TREE_GRAMMAR:
+			{
+				AST __t5 = _t;
+				GrammarAST tmp3_AST_in = (GrammarAST)_t;
+				match(_t,TREE_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t5;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case COMBINED_GRAMMAR:
+			{
+				AST __t6 = _t;
+				GrammarAST tmp4_AST_in = (GrammarAST)_t;
+				match(_t,COMBINED_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t6;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void grammarSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST name = null;
+		GrammarAST cmt = null;
+		
+		try {      // for error handling
+			name = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case DOC_COMMENT:
+			{
+				cmt = (GrammarAST)_t;
+				match(_t,DOC_COMMENT);
+				_t = _t.getNextSibling();
+				
+						 outputFileST.setAttribute("docComment", cmt.getText());
+						 headerFileST.setAttribute("docComment", cmt.getText());
+						
+				break;
+			}
+			case OPTIONS:
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			
+					String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
+			String n = name.getText()+suffix;
+					recognizerST.setAttribute("name", n);
+					outputFileST.setAttribute("name", n);
+					headerFileST.setAttribute("name", n);
+					recognizerST.setAttribute("scopes", grammar.getGlobalScopes());
+					headerFileST.setAttribute("scopes", grammar.getGlobalScopes());
+					
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				AST __t12 = _t;
+				GrammarAST tmp5_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getFirstChild();
+				GrammarAST tmp6_AST_in = (GrammarAST)_t;
+				if ( _t==null ) throw new MismatchedTokenException();
+				_t = _t.getNextSibling();
+				_t = __t12;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKENS:
+			{
+				AST __t14 = _t;
+				GrammarAST tmp7_AST_in = (GrammarAST)_t;
+				match(_t,TOKENS);
+				_t = _t.getFirstChild();
+				GrammarAST tmp8_AST_in = (GrammarAST)_t;
+				if ( _t==null ) throw new MismatchedTokenException();
+				_t = _t.getNextSibling();
+				_t = __t14;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop16:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==SCOPE)) {
+					attrScope(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop16;
+				}
+				
+			} while (true);
+			}
+			{
+			_loop18:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp9_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop18;
+				}
+				
+			} while (true);
+			}
+			rules(_t,recognizerST);
+			_t = _retTree;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void attrScope(AST _t) throws RecognitionException {
+		
+		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t8 = _t;
+			GrammarAST tmp10_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			GrammarAST tmp11_AST_in = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			GrammarAST tmp12_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t8;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rules(AST _t,
+		StringTemplate recognizerST
+	) throws RecognitionException {
+		
+		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StringTemplate rST;
+		
+		
+		try {      // for error handling
+			{
+			int _cnt22=0;
+			_loop22:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==RULE)) {
+					{
+					
+								String ruleName = _t.getFirstChild().getText();
+								Rule r = grammar.getRule(ruleName);
+								
+					if (_t==null) _t=ASTNULL;
+					if (((_t.getType()==RULE))&&(!r.isSynPred || grammar.synPredNamesUsedInDFA.contains(ruleName))) {
+						rST=rule(_t);
+						_t = _retTree;
+						
+										if ( rST!=null ) {
+											recognizerST.setAttribute("rules", rST);
+											outputFileST.setAttribute("rules", rST);
+											headerFileST.setAttribute("rules", rST);
+										}
+										
+					}
+					else if ((_t.getType()==RULE)) {
+						GrammarAST tmp13_AST_in = (GrammarAST)_t;
+						match(_t,RULE);
+						_t = _t.getNextSibling();
+					}
+					else {
+						throw new NoViableAltException(_t);
+					}
+					
+					}
+				}
+				else {
+					if ( _cnt22>=1 ) { break _loop22; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt22++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StringTemplate  rule(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST mod = null;
+		
+		String r;
+		String initAction = null;
+		StringTemplate b;
+			// get the dfa for the BLOCK
+		GrammarAST block=rule_AST_in.getFirstChildWithType(BLOCK);
+		DFA dfa=block.getLookaheadDFA();
+			// init blockNestingLevel so it's block level RULE_BLOCK_NESTING_LEVEL
+			// for alts of rule
+			blockNestingLevel = RULE_BLOCK_NESTING_LEVEL-1;
+			Rule ruleDescr = grammar.getRule(rule_AST_in.getFirstChild().getText());
+		
+			// For syn preds, we don't want any AST code etc... in there.
+			// Save old templates ptr and restore later.  Base templates include Dbg.
+			StringTemplateGroup saveGroup = templates;
+			if ( ruleDescr.isSynPred ) {
+				templates = generator.getBaseTemplates();
+			}
+		
+		
+		try {      // for error handling
+			AST __t24 = _t;
+			GrammarAST tmp14_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			r=id.getText(); currentRuleName = r;
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				mod = _t==ASTNULL ? null : (GrammarAST)_t;
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			AST __t26 = _t;
+			GrammarAST tmp15_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				GrammarAST tmp16_AST_in = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t26;
+			_t = _t.getNextSibling();
+			AST __t28 = _t;
+			GrammarAST tmp17_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				GrammarAST tmp18_AST_in = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t28;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				AST __t31 = _t;
+				GrammarAST tmp19_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getFirstChild();
+				GrammarAST tmp20_AST_in = (GrammarAST)_t;
+				if ( _t==null ) throw new MismatchedTokenException();
+				_t = _t.getNextSibling();
+				_t = __t31;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop34:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp21_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop34;
+				}
+				
+			} while (true);
+			}
+			b=block(_t,"ruleBlock", dfa);
+			_t = _retTree;
+			
+						String description =
+							grammar.grammarTreeToString(rule_AST_in.getFirstChildWithType(BLOCK),
+			false);
+						description =
+			generator.target.getTargetStringLiteralFromString(description);
+						b.setAttribute("description", description);
+						// do not generate lexer rules in combined grammar
+						String stName = null;
+						if ( ruleDescr.isSynPred ) {
+							stName = "synpredRule";
+						}
+						else if ( grammar.type==Grammar.LEXER ) {
+							if ( r.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
+								stName = "tokensRule";
+							}
+							else {
+								stName = "lexerRule";
+							}
+						}
+						else {
+							if ( !(grammar.type==Grammar.COMBINED &&
+								 Character.isUpperCase(r.charAt(0))) )
+							{
+								stName = "rule";
+							}
+						}
+						code = templates.getInstanceOf(stName);
+						if ( code.getName().equals("rule") ) {
+							code.setAttribute("emptyRule",
+								Boolean.valueOf(grammar.isEmptyRule(block)));
+						}
+						code.setAttribute("ruleDescriptor", ruleDescr);
+						String memo = (String)rule_AST_in.getOption("memoize");
+						if ( memo==null ) {
+							memo = (String)grammar.getOption("memoize");
+						}
+						if ( memo!=null && memo.equals("true") &&
+						     (stName.equals("rule")||stName.equals("lexerRule")) )
+						{
+				code.setAttribute("memoize",
+					Boolean.valueOf(memo!=null && memo.equals("true")));
+			}
+						
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t,code);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp22_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			_t = __t24;
+			_t = _t.getNextSibling();
+			
+			if ( code!=null ) {
+						if ( grammar.type==Grammar.LEXER ) {
+					    	boolean naked =
+					    		r.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
+					    	    (mod!=null&&mod.getText().equals(Grammar.FRAGMENT_RULE_MODIFIER));
+					    	code.setAttribute("nakedBlock", Boolean.valueOf(naked));
+						}
+						else {
+							description =
+								grammar.grammarTreeToString(rule_AST_in,false);
+							description =
+							    generator.target.getTargetStringLiteralFromString(description);
+							code.setAttribute("description", description);
+						}
+						Rule theRule = grammar.getRule(r);
+						generator.translateActionAttributeReferencesForSingleScope(
+							theRule,
+							theRule.getActions()
+						);
+						code.setAttribute("ruleName", r);
+						code.setAttribute("block", b);
+						if ( initAction!=null ) {
+							code.setAttribute("initAction", initAction);
+						}
+			}
+					templates = saveGroup;
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final void modifier(AST _t) throws RecognitionException {
+		
+		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_protected:
+			{
+				GrammarAST tmp23_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_protected);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_public:
+			{
+				GrammarAST tmp24_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_public);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_private:
+			{
+				GrammarAST tmp25_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_private);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case FRAGMENT:
+			{
+				GrammarAST tmp26_AST_in = (GrammarAST)_t;
+				match(_t,FRAGMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ruleScopeSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t38 = _t;
+			GrammarAST tmp27_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ACTION:
+			{
+				GrammarAST tmp28_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			case ID:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop41:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ID)) {
+					GrammarAST tmp29_AST_in = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop41;
+				}
+				
+			} while (true);
+			}
+			_t = __t38;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StringTemplate  block(AST _t,
+		String blockTemplateName, DFA dfa
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StringTemplate decision = null;
+		if ( dfa!=null ) {
+		code = templates.getInstanceOf(blockTemplateName);
+		decision = generator.genLookaheadDecision(recognizerST,dfa);
+		code.setAttribute("decision", decision);
+		code.setAttribute("decisionNumber", dfa.getDecisionNumber());
+				code.setAttribute("maxK",dfa.getMaxLookaheadDepth());
+				code.setAttribute("maxAlt",dfa.getNumberOfAlts());
+		}
+		else {
+		code = templates.getInstanceOf(blockTemplateName+"SingleAlt");
+		}
+		blockNestingLevel++;
+		code.setAttribute("blockLevel", blockNestingLevel);
+		code.setAttribute("enclosingBlockLevel", blockNestingLevel-1);
+		StringTemplate alt = null;
+		StringTemplate rew = null;
+		StringTemplate sb = null;
+		GrammarAST r = null;
+		int altNum = 1;
+			if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+		this.outerAltNum=1;
+		}
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			if (((_t.getType()==BLOCK))&&(block_AST_in.getSetValue()!=null)) {
+				sb=setBlock(_t);
+				_t = _retTree;
+				
+				code.setAttribute("alts",sb);
+				blockNestingLevel--;
+				
+			}
+			else if ((_t.getType()==BLOCK)) {
+				AST __t43 = _t;
+				GrammarAST tmp30_AST_in = (GrammarAST)_t;
+				match(_t,BLOCK);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case OPTIONS:
+				{
+					GrammarAST tmp31_AST_in = (GrammarAST)_t;
+					match(_t,OPTIONS);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case ALT:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				{
+				int _cnt46=0;
+				_loop46:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==ALT)) {
+						alt=alternative(_t);
+						_t = _retTree;
+						r=(GrammarAST)_t;
+						rew=rewrite(_t);
+						_t = _retTree;
+						
+						if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+							this.outerAltNum++;
+						}
+						// add the rewrite code as just another element in the alt :)
+								  if ( rew!=null ) {
+								  	alt.setAttribute("elements.{el,line,pos}",
+								  		rew, Utils.integer(r.getLine()), Utils.integer(r.getColumn()));
+								  }
+								  // add this alt to the list of alts for this block
+						code.setAttribute("alts",alt);
+						alt.setAttribute("altNum", Utils.integer(altNum));
+						alt.setAttribute("outerAlt",
+						Boolean.valueOf(blockNestingLevel==RULE_BLOCK_NESTING_LEVEL));
+						altNum++;
+						
+					}
+					else {
+						if ( _cnt46>=1 ) { break _loop46; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt46++;
+				} while (true);
+				}
+				GrammarAST tmp32_AST_in = (GrammarAST)_t;
+				match(_t,EOB);
+				_t = _t.getNextSibling();
+				_t = __t43;
+				_t = _t.getNextSibling();
+				blockNestingLevel--;
+			}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final void exceptionGroup(AST _t,
+		StringTemplate ruleST
+	) throws RecognitionException {
+		
+		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			{
+				{
+				int _cnt50=0;
+				_loop50:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==LITERAL_catch)) {
+						exceptionHandler(_t,ruleST);
+						_t = _retTree;
+					}
+					else {
+						if ( _cnt50>=1 ) { break _loop50; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt50++;
+				} while (true);
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case LITERAL_finally:
+				{
+					finallyClause(_t,ruleST);
+					_t = _retTree;
+					break;
+				}
+				case EOR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				break;
+			}
+			case LITERAL_finally:
+			{
+				finallyClause(_t,ruleST);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StringTemplate  setBlock(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST setBlock_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST s = null;
+		
+		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
+		Rule r = grammar.getRule(currentRuleName);
+		currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
+		if ( currentAltHasASTRewrite ) {
+		r.trackTokenReferenceInAlt(setBlock_AST_in, outerAltNum);
+		}
+		}
+		
+		
+		try {      // for error handling
+			s = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getNextSibling();
+			
+			StringTemplate setcode =
+			getTokenElementST("matchSet", "set", s, null, null);
+			int i = ((TokenWithIndex)s.getToken()).getIndex();
+					setcode.setAttribute("elementIndex", i);
+					if ( grammar.type!=Grammar.LEXER ) {
+						generator.generateLocalFOLLOW(s,"set",currentRuleName,i);
+			}
+			setcode.setAttribute("s",
+			generator.genSetExpr(templates,s.getSetValue(),1,false));
+			StringTemplate altcode=templates.getInstanceOf("alt");
+					altcode.setAttribute("elements.{el,line,pos}",
+									     setcode,
+			Utils.integer(s.getLine()),
+			Utils.integer(s.getColumn())
+			);
+			altcode.setAttribute("altNum", Utils.integer(1));
+			altcode.setAttribute("outerAlt",
+			Boolean.valueOf(blockNestingLevel==RULE_BLOCK_NESTING_LEVEL));
+			if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
+			altcode.setAttribute("autoAST", Boolean.valueOf(true));
+			}
+			code = altcode;
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  alternative(AST _t) throws RecognitionException {
+		StringTemplate code=templates.getInstanceOf("alt");
+		
+		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST a = null;
+		
+		/*
+		// TODO: can we use Rule.altsWithRewrites???
+		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+			GrammarAST aRewriteNode = #alternative.findFirstType(REWRITE);
+			if ( grammar.buildAST() &&
+				 (aRewriteNode!=null||
+				 (#alternative.getNextSibling()!=null &&
+				  #alternative.getNextSibling().getType()==REWRITE)) )
+			{
+				currentAltHasASTRewrite = true;
+			}
+			else {
+				currentAltHasASTRewrite = false;
+			}
+		}
+		*/
+		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
+		Rule r = grammar.getRule(currentRuleName);
+		currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
+		}
+		String description = grammar.grammarTreeToString(alternative_AST_in, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+		code.setAttribute("description", description);
+		if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
+			code.setAttribute("autoAST", Boolean.valueOf(true));
+		}
+		StringTemplate e;
+		
+		
+		try {      // for error handling
+			AST __t57 = _t;
+			a = _t==ASTNULL ? null :(GrammarAST)_t;
+			match(_t,ALT);
+			_t = _t.getFirstChild();
+			{
+			int _cnt59=0;
+			_loop59:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.getType()==ROOT||_t.getType()==RULE_REF||_t.g [...]
+					GrammarAST elAST=(GrammarAST)_t;
+					e=element(_t,null,null);
+					_t = _retTree;
+					
+								if ( e!=null ) {
+										code.setAttribute("elements.{el,line,pos}",
+														  e,
+														  Utils.integer(elAST.getLine()),
+														  Utils.integer(elAST.getColumn())
+														 );
+								}
+								
+				}
+				else {
+					if ( _cnt59>=1 ) { break _loop59; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt59++;
+			} while (true);
+			}
+			GrammarAST tmp33_AST_in = (GrammarAST)_t;
+			match(_t,EOA);
+			_t = _t.getNextSibling();
+			_t = __t57;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST r = null;
+		GrammarAST pred = null;
+		
+		StringTemplate alt;
+		if ( rewrite_AST_in.getType()==REWRITE ) {
+			if ( generator.grammar.buildTemplate() ) {
+				code = templates.getInstanceOf("rewriteTemplate");
+			}
+			else {
+				code = templates.getInstanceOf("rewriteCode");
+				code.setAttribute("treeLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
+				code.setAttribute("rewriteBlockLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
+		code.setAttribute("referencedElementsDeep",
+		getTokenTypesAsTargetLabels(rewrite_AST_in.rewriteRefsDeep));
+		Set<String> tokenLabels =
+		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.TOKEN_LABEL);
+		Set<String> tokenListLabels =
+		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.TOKEN_LIST_LABEL);
+		Set<String> ruleLabels =
+		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.RULE_LABEL);
+		Set<String> ruleListLabels =
+		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.RULE_LIST_LABEL);
+		// just in case they ref $r for "previous value", make a stream
+		// from retval.tree
+		StringTemplate retvalST = templates.getInstanceOf("prevRuleRootRef");
+		ruleLabels.add(retvalST.toString());
+		code.setAttribute("referencedTokenLabels", tokenLabels);
+		code.setAttribute("referencedTokenListLabels", tokenListLabels);
+		code.setAttribute("referencedRuleLabels", ruleLabels);
+		code.setAttribute("referencedRuleListLabels", ruleListLabels);
+			}
+		}
+		
+		
+		try {      // for error handling
+			{
+			_loop95:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==REWRITE)) {
+					rewriteRuleRefs = new HashSet();
+					AST __t93 = _t;
+					r = _t==ASTNULL ? null :(GrammarAST)_t;
+					match(_t,REWRITE);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case SEMPRED:
+					{
+						pred = (GrammarAST)_t;
+						match(_t,SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case ALT:
+					case TEMPLATE:
+					case ACTION:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					alt=rewrite_alternative(_t);
+					_t = _retTree;
+					_t = __t93;
+					_t = _t.getNextSibling();
+					
+					rewriteBlockNestingLevel = OUTER_REWRITE_NESTING_LEVEL;
+								List predChunks = null;
+								if ( pred!=null ) {
+									//predText = #pred.getText();
+							predChunks = generator.translateAction(currentRuleName,pred);
+								}
+								String description =
+								    grammar.grammarTreeToString(r,false);
+								description = generator.target.getTargetStringLiteralFromString(description);
+								code.setAttribute("alts.{pred,alt,description}",
+												  predChunks,
+												  alt,
+												  description);
+								pred=null;
+								
+				}
+				else {
+					break _loop95;
+				}
+				
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final void exceptionHandler(AST _t,
+		StringTemplate ruleST
+	) throws RecognitionException {
+		
+		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t53 = _t;
+			GrammarAST tmp34_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_catch);
+			_t = _t.getFirstChild();
+			GrammarAST tmp35_AST_in = (GrammarAST)_t;
+			match(_t,ARG_ACTION);
+			_t = _t.getNextSibling();
+			GrammarAST tmp36_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t53;
+			_t = _t.getNextSibling();
+			
+				List chunks = generator.translateAction(currentRuleName,tmp36_AST_in);
+				ruleST.setAttribute("exceptions.{decl,action}",tmp35_AST_in.getText(),chunks);
+				
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void finallyClause(AST _t,
+		StringTemplate ruleST
+	) throws RecognitionException {
+		
+		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t55 = _t;
+			GrammarAST tmp37_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_finally);
+			_t = _t.getFirstChild();
+			GrammarAST tmp38_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t55;
+			_t = _t.getNextSibling();
+			
+				List chunks = generator.translateAction(currentRuleName,tmp38_AST_in);
+				ruleST.setAttribute("finally",chunks);
+				
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StringTemplate  element(AST _t,
+		GrammarAST label, GrammarAST astSuffix
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST n = null;
+		GrammarAST alabel = null;
+		GrammarAST label2 = null;
+		GrammarAST a = null;
+		GrammarAST b = null;
+		GrammarAST sp = null;
+		GrammarAST gsp = null;
+		
+		IntSet elements=null;
+		GrammarAST ast = null;
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				AST __t61 = _t;
+				GrammarAST tmp39_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getFirstChild();
+				code=element(_t,label,tmp39_AST_in);
+				_t = _retTree;
+				_t = __t61;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				AST __t62 = _t;
+				GrammarAST tmp40_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getFirstChild();
+				code=element(_t,label,tmp40_AST_in);
+				_t = _retTree;
+				_t = __t62;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case NOT:
+			{
+				AST __t63 = _t;
+				n = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				code=notElement(_t,n, label, astSuffix);
+				_t = _retTree;
+				_t = __t63;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t64 = _t;
+				GrammarAST tmp41_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				alabel = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				code=element(_t,alabel,astSuffix);
+				_t = _retTree;
+				_t = __t64;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PLUS_ASSIGN:
+			{
+				AST __t65 = _t;
+				GrammarAST tmp42_AST_in = (GrammarAST)_t;
+				match(_t,PLUS_ASSIGN);
+				_t = _t.getFirstChild();
+				label2 = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				code=element(_t,label2,astSuffix);
+				_t = _retTree;
+				_t = __t65;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t66 = _t;
+				GrammarAST tmp43_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				a = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				b = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t66;
+				_t = _t.getNextSibling();
+				code = templates.getInstanceOf("charRangeRef");
+						 String low =
+						 	generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,a.getText());
+						 String high =
+						 	generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,b.getText());
+				code.setAttribute("a", low);
+				code.setAttribute("b", high);
+				if ( label!=null ) {
+				code.setAttribute("label", label.getText());
+				}
+				
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				code=tree(_t);
+				_t = _retTree;
+				break;
+			}
+			case ACTION:
+			{
+				code=element_action(_t);
+				_t = _retTree;
+				break;
+			}
+			case GATED_SEMPRED:
+			case SEMPRED:
+			{
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case SEMPRED:
+				{
+					sp = (GrammarAST)_t;
+					match(_t,SEMPRED);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case GATED_SEMPRED:
+				{
+					gsp = (GrammarAST)_t;
+					match(_t,GATED_SEMPRED);
+					_t = _t.getNextSibling();
+					sp=gsp;
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				
+				code = templates.getInstanceOf("validateSemanticPredicate");
+				code.setAttribute("pred", generator.translateAction(currentRuleName,sp));
+						String description =
+							generator.target.getTargetStringLiteralFromString(sp.getText());
+						code.setAttribute("description", description);
+				
+				break;
+			}
+			case SYN_SEMPRED:
+			{
+				GrammarAST tmp44_AST_in = (GrammarAST)_t;
+				match(_t,SYN_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BACKTRACK_SEMPRED:
+			{
+				GrammarAST tmp45_AST_in = (GrammarAST)_t;
+				match(_t,BACKTRACK_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case EPSILON:
+			{
+				GrammarAST tmp46_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+				if (_t==null) _t=ASTNULL;
+				if ((((_t.getType() >= BLOCK && _t.getType() <= POSITIVE_CLOSURE)))&&(element_AST_in.getSetValue()==null)) {
+					code=ebnf(_t);
+					_t = _retTree;
+				}
+				else if ((_t.getType()==BLOCK||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==WILDCARD)) {
+					code=atom(_t,label, astSuffix);
+					_t = _retTree;
+				}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  notElement(AST _t,
+		GrammarAST n, GrammarAST label, GrammarAST astSuffix
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST notElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST assign_c = null;
+		GrammarAST assign_s = null;
+		GrammarAST assign_t = null;
+		GrammarAST assign_st = null;
+		
+		IntSet elements=null;
+		String labelText = null;
+		if ( label!=null ) {
+		labelText = label.getText();
+		}
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case CHAR_LITERAL:
+			{
+				assign_c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				
+				int ttype=0;
+				if ( grammar.type==Grammar.LEXER ) {
+				ttype = Grammar.getCharValueFromGrammarCharLiteral(assign_c.getText());
+				}
+				else {
+				ttype = grammar.getTokenType(assign_c.getText());
+				}
+				elements = grammar.complement(ttype);
+				
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				assign_s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				
+				int ttype=0;
+				if ( grammar.type==Grammar.LEXER ) {
+				// TODO: error!
+				}
+				else {
+				ttype = grammar.getTokenType(assign_s.getText());
+				}
+				elements = grammar.complement(ttype);
+				
+				break;
+			}
+			case TOKEN_REF:
+			{
+				assign_t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				
+				int ttype = grammar.getTokenType(assign_t.getText());
+				elements = grammar.complement(ttype);
+				
+				break;
+			}
+			case BLOCK:
+			{
+				assign_st = (GrammarAST)_t;
+				match(_t,BLOCK);
+				_t = _t.getNextSibling();
+				
+				elements = assign_st.getSetValue();
+				elements = grammar.complement(elements);
+				
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			
+			code = getTokenElementST("matchSet",
+			"set",
+			(GrammarAST)n.getFirstChild(),
+			astSuffix,
+			labelText);
+			code.setAttribute("s",generator.genSetExpr(templates,elements,1,false));
+			int i = ((TokenWithIndex)n.getToken()).getIndex();
+			code.setAttribute("elementIndex", i);
+			if ( grammar.type!=Grammar.LEXER ) {
+			generator.generateLocalFOLLOW(n,"set",currentRuleName,i);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  ebnf(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		DFA dfa=null;
+		GrammarAST b = (GrammarAST)ebnf_AST_in.getFirstChild();
+		GrammarAST eob = (GrammarAST)b.getLastChild(); // loops will use EOB DFA
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case BLOCK:
+			{
+				dfa = ebnf_AST_in.getLookaheadDFA();
+				code=block(_t,"block", dfa);
+				_t = _retTree;
+				break;
+			}
+			case OPTIONAL:
+			{
+				dfa = ebnf_AST_in.getLookaheadDFA();
+				AST __t73 = _t;
+				GrammarAST tmp47_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				code=block(_t,"optionalBlock", dfa);
+				_t = _retTree;
+				_t = __t73;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CLOSURE:
+			{
+				dfa = eob.getLookaheadDFA();
+				AST __t74 = _t;
+				GrammarAST tmp48_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				code=block(_t,"closureBlock", dfa);
+				_t = _retTree;
+				_t = __t74;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				dfa = eob.getLookaheadDFA();
+				AST __t75 = _t;
+				GrammarAST tmp49_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				code=block(_t,"positiveClosureBlock", dfa);
+				_t = _retTree;
+				_t = __t75;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			
+					String description = grammar.grammarTreeToString(ebnf_AST_in, false);
+					description = generator.target.getTargetStringLiteralFromString(description);
+				code.setAttribute("description", description);
+				
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  atom(AST _t,
+		GrammarAST label, GrammarAST astSuffix
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST r = null;
+		GrammarAST rarg = null;
+		GrammarAST t = null;
+		GrammarAST targ = null;
+		GrammarAST c = null;
+		GrammarAST s = null;
+		GrammarAST w = null;
+		
+		String labelText=null;
+		if ( label!=null ) {
+		labelText = label.getText();
+		}
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case RULE_REF:
+			{
+				AST __t83 = _t;
+				r = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,RULE_REF);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ARG_ACTION:
+				{
+					rarg = (GrammarAST)_t;
+					match(_t,ARG_ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t83;
+				_t = _t.getNextSibling();
+				
+				grammar.checkRuleReference(r, rarg, currentRuleName);
+				Rule rdef = grammar.getRule(r.getText());
+				// don't insert label=r() if $label.attr not used, no ret value, ...
+				if ( !rdef.getHasReturnValue() ) {
+				labelText = null;
+				}
+				code = getRuleElementST("ruleRef", r.getText(), r, astSuffix, labelText);
+						code.setAttribute("rule", r.getText());
+				
+						if ( rarg!=null ) {
+							List args = generator.translateAction(currentRuleName,rarg);
+							code.setAttribute("args", args);
+						}
+				int i = ((TokenWithIndex)r.getToken()).getIndex();
+						code.setAttribute("elementIndex", i);
+						generator.generateLocalFOLLOW(r,r.getText(),currentRuleName,i);
+						r.code = code;
+				
+				break;
+			}
+			case TOKEN_REF:
+			{
+				AST __t85 = _t;
+				t = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ARG_ACTION:
+				{
+					targ = (GrammarAST)_t;
+					match(_t,ARG_ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t85;
+				_t = _t.getNextSibling();
+				
+				grammar.checkRuleReference(t, targ, currentRuleName);
+						   if ( grammar.type==Grammar.LEXER ) {
+								if ( grammar.getTokenType(t.getText())==Label.EOF ) {
+									code = templates.getInstanceOf("lexerMatchEOF");
+								}
+							    else {
+									code = templates.getInstanceOf("lexerRuleRef");
+				if ( isListLabel(labelText) ) {
+				code = templates.getInstanceOf("lexerRuleRefAndListLabel");
+				}
+									code.setAttribute("rule", t.getText());
+									if ( targ!=null ) {
+										List args = generator.translateAction(currentRuleName,targ);
+										code.setAttribute("args", args);
+									}
+								}
+				int i = ((TokenWithIndex)t.getToken()).getIndex();
+							    code.setAttribute("elementIndex", i);
+							    if ( label!=null ) code.setAttribute("label", labelText);
+						   }
+						   else {
+								code = getTokenElementST("tokenRef", t.getText(), t, astSuffix, labelText);
+								String tokenLabel =
+								   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
+								code.setAttribute("token",tokenLabel);
+				int i = ((TokenWithIndex)t.getToken()).getIndex();
+							    code.setAttribute("elementIndex", i);
+							    generator.generateLocalFOLLOW(t,tokenLabel,currentRuleName,i);
+						   }
+						   t.code = code;
+						
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				
+						if ( grammar.type==Grammar.LEXER ) {
+							code = templates.getInstanceOf("charRef");
+							code.setAttribute("char",
+							   generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,c.getText()));
+							if ( label!=null ) {
+								code.setAttribute("label", labelText);
+							}
+						}
+						else { // else it's a token type reference
+							code = getTokenElementST("tokenRef", "char_literal", c, astSuffix, labelText);
+							String tokenLabel = generator.getTokenTypeAsTargetLabel(grammar.getTokenType(c.getText()));
+							code.setAttribute("token",tokenLabel);
+				int i = ((TokenWithIndex)c.getToken()).getIndex();
+							code.setAttribute("elementIndex", i);
+							generator.generateLocalFOLLOW(c,tokenLabel,currentRuleName,i);
+						}
+				
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				
+						if ( grammar.type==Grammar.LEXER ) {
+							code = templates.getInstanceOf("lexerStringRef");
+							code.setAttribute("string",
+							   generator.target.getTargetStringLiteralFromANTLRStringLiteral(generator,s.getText()));
+							if ( label!=null ) {
+								code.setAttribute("label", labelText);
+							}
+						}
+						else { // else it's a token type reference
+							code = getTokenElementST("tokenRef", "string_literal", s, astSuffix, labelText);
+							String tokenLabel =
+							   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(s.getText()));
+							code.setAttribute("token",tokenLabel);
+				int i = ((TokenWithIndex)s.getToken()).getIndex();
+							code.setAttribute("elementIndex", i);
+							generator.generateLocalFOLLOW(s,tokenLabel,currentRuleName,i);
+						}
+						
+				break;
+			}
+			case WILDCARD:
+			{
+				w = (GrammarAST)_t;
+				match(_t,WILDCARD);
+				_t = _t.getNextSibling();
+				
+						code = getWildcardST(w,astSuffix,labelText);
+						code.setAttribute("elementIndex", ((TokenWithIndex)w.getToken()).getIndex());
+						
+				break;
+			}
+			case BLOCK:
+			{
+				code=set(_t,label,astSuffix);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  tree(AST _t) throws RecognitionException {
+		StringTemplate code=templates.getInstanceOf("tree");
+		
+		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StringTemplate el=null, act=null;
+		GrammarAST elAST=null, actAST=null;
+		NFAState afterDOWN = (NFAState)tree_AST_in.NFATreeDownState.transition(0).target;
+		LookaheadSet s = grammar.LOOK(afterDOWN);
+		if ( s.member(Label.UP) ) {
+			// nullable child list if we can see the UP as the next token
+			// we need an "if ( input.LA(1)==Token.DOWN )" gate around
+			// the child list.
+			code.setAttribute("nullableChildList", "true");
+		}
+		
+		
+		try {      // for error handling
+			AST __t77 = _t;
+			GrammarAST tmp50_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			elAST=(GrammarAST)_t;
+			el=element(_t,null,null);
+			_t = _retTree;
+			
+			code.setAttribute("root.{el,line,pos}",
+										  el,
+										  Utils.integer(elAST.getLine()),
+										  Utils.integer(elAST.getColumn())
+										  );
+			
+			{
+			_loop79:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ACTION)) {
+					actAST=(GrammarAST)_t;
+					act=element_action(_t);
+					_t = _retTree;
+					
+					code.setAttribute("actionsAfterRoot.{el,line,pos}",
+					act,
+					Utils.integer(actAST.getLine()),
+					Utils.integer(actAST.getColumn())
+					);
+					
+				}
+				else {
+					break _loop79;
+				}
+				
+			} while (true);
+			}
+			{
+			_loop81:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.getType()==ROOT||_t.getType()==RULE_REF||_t.g [...]
+					elAST=(GrammarAST)_t;
+					el=element(_t,null,null);
+					_t = _retTree;
+					
+								 code.setAttribute("children.{el,line,pos}",
+												  el,
+												  Utils.integer(elAST.getLine()),
+												  Utils.integer(elAST.getColumn())
+												  );
+								
+				}
+				else {
+					break _loop81;
+				}
+				
+			} while (true);
+			}
+			_t = __t77;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  element_action(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST element_action_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST act = null;
+		
+		try {      // for error handling
+			act = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			
+			code = templates.getInstanceOf("execAction");
+			code.setAttribute("action", generator.translateAction(currentRuleName,act));
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  set(AST _t,
+		GrammarAST label, GrammarAST astSuffix
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST set_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST s = null;
+		
+		String labelText=null;
+		if ( label!=null ) {
+		labelText = label.getText();
+		}
+		
+		
+		try {      // for error handling
+			s = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getNextSibling();
+			
+			code = getTokenElementST("matchSet", "set", s, astSuffix, labelText);
+			int i = ((TokenWithIndex)s.getToken()).getIndex();
+					code.setAttribute("elementIndex", i);
+					if ( grammar.type!=Grammar.LEXER ) {
+						generator.generateLocalFOLLOW(s,"set",currentRuleName,i);
+			}
+			code.setAttribute("s", generator.genSetExpr(templates,s.getSetValue(),1,false));
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final void ast_suffix(AST _t) throws RecognitionException {
+		
+		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				GrammarAST tmp51_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				GrammarAST tmp52_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void setElement(AST _t) throws RecognitionException {
+		
+		GrammarAST setElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST c = null;
+		GrammarAST t = null;
+		GrammarAST s = null;
+		GrammarAST c1 = null;
+		GrammarAST c2 = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TOKEN_REF:
+			{
+				t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t90 = _t;
+				GrammarAST tmp53_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				c1 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				c2 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t90;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StringTemplate  rewrite_alternative(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST a = null;
+		
+		StringTemplate el,st;
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			if (((_t.getType()==ALT))&&(generator.grammar.buildAST())) {
+				AST __t99 = _t;
+				a = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,ALT);
+				_t = _t.getFirstChild();
+				code=templates.getInstanceOf("rewriteElementList");
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case OPTIONAL:
+				case CLOSURE:
+				case POSITIVE_CLOSURE:
+				case LABEL:
+				case ACTION:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case TOKEN_REF:
+				case RULE_REF:
+				case TREE_BEGIN:
+				{
+					{
+					int _cnt102=0;
+					_loop102:
+					do {
+						if (_t==null) _t=ASTNULL;
+						if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
+							GrammarAST elAST=(GrammarAST)_t;
+							el=rewrite_element(_t);
+							_t = _retTree;
+							code.setAttribute("elements.{el,line,pos}",
+												 					el,
+														  		Utils.integer(elAST.getLine()),
+														  		Utils.integer(elAST.getColumn())
+												 					);
+												
+						}
+						else {
+							if ( _cnt102>=1 ) { break _loop102; } else {throw new NoViableAltException(_t);}
+						}
+						
+						_cnt102++;
+					} while (true);
+					}
+					break;
+				}
+				case EPSILON:
+				{
+					GrammarAST tmp54_AST_in = (GrammarAST)_t;
+					match(_t,EPSILON);
+					_t = _t.getNextSibling();
+					code.setAttribute("elements.{el,line,pos}",
+												   templates.getInstanceOf("rewriteEmptyAlt"),
+												   Utils.integer(a.getLine()),
+												   Utils.integer(a.getColumn())
+										 			   );
+									
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				GrammarAST tmp55_AST_in = (GrammarAST)_t;
+				match(_t,EOA);
+				_t = _t.getNextSibling();
+				_t = __t99;
+				_t = _t.getNextSibling();
+			}
+			else if (((_t.getType()==ALT||_t.getType()==TEMPLATE||_t.getType()==ACTION))&&(generator.grammar.buildTemplate())) {
+				code=rewrite_template(_t);
+				_t = _retTree;
+			}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite_block(AST _t,
+		String blockTemplateName
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		rewriteBlockNestingLevel++;
+		code = templates.getInstanceOf(blockTemplateName);
+		StringTemplate save_currentBlockST = currentBlockST;
+		currentBlockST = code;
+		code.setAttribute("rewriteBlockLevel", rewriteBlockNestingLevel);
+		StringTemplate alt=null;
+		
+		
+		try {      // for error handling
+			AST __t97 = _t;
+			GrammarAST tmp56_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			
+			currentBlockST.setAttribute("referencedElementsDeep",
+			getTokenTypesAsTargetLabels(tmp56_AST_in.rewriteRefsDeep));
+			currentBlockST.setAttribute("referencedElements",
+			getTokenTypesAsTargetLabels(tmp56_AST_in.rewriteRefsShallow));
+			
+			alt=rewrite_alternative(_t);
+			_t = _retTree;
+			GrammarAST tmp57_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t97;
+			_t = _t.getNextSibling();
+			
+				code.setAttribute("alt", alt);
+				rewriteBlockNestingLevel--;
+				currentBlockST = save_currentBlockST;
+				
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite_element(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		IntSet elements=null;
+		GrammarAST ast = null;
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LABEL:
+			case ACTION:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			{
+				code=rewrite_atom(_t,false);
+				_t = _retTree;
+				break;
+			}
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			{
+				code=rewrite_ebnf(_t);
+				_t = _retTree;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				code=rewrite_tree(_t);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite_template(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_template_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST ind = null;
+		GrammarAST arg = null;
+		GrammarAST a = null;
+		GrammarAST act = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ALT:
+			{
+				AST __t117 = _t;
+				GrammarAST tmp58_AST_in = (GrammarAST)_t;
+				match(_t,ALT);
+				_t = _t.getFirstChild();
+				GrammarAST tmp59_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				GrammarAST tmp60_AST_in = (GrammarAST)_t;
+				match(_t,EOA);
+				_t = _t.getNextSibling();
+				_t = __t117;
+				_t = _t.getNextSibling();
+				code=templates.getInstanceOf("rewriteEmptyTemplate");
+				break;
+			}
+			case TEMPLATE:
+			{
+				AST __t118 = _t;
+				GrammarAST tmp61_AST_in = (GrammarAST)_t;
+				match(_t,TEMPLATE);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ID:
+				{
+					id = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case ACTION:
+				{
+					ind = (GrammarAST)_t;
+					match(_t,ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				
+						   if ( id!=null && id.getText().equals("template") ) {
+						   		code = templates.getInstanceOf("rewriteInlineTemplate");
+						   }
+						   else if ( id!=null ) {
+						   		code = templates.getInstanceOf("rewriteExternalTemplate");
+						   		code.setAttribute("name", id.getText());
+						   }
+						   else if ( ind!=null ) { // must be %({expr})(args)
+						   		code = templates.getInstanceOf("rewriteIndirectTemplate");
+								List chunks=generator.translateAction(currentRuleName,ind);
+						   		code.setAttribute("expr", chunks);
+						   }
+						
+				AST __t120 = _t;
+				GrammarAST tmp62_AST_in = (GrammarAST)_t;
+				match(_t,ARGLIST);
+				_t = _t.getFirstChild();
+				{
+				_loop123:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==ARG)) {
+						AST __t122 = _t;
+						GrammarAST tmp63_AST_in = (GrammarAST)_t;
+						match(_t,ARG);
+						_t = _t.getFirstChild();
+						arg = (GrammarAST)_t;
+						match(_t,ID);
+						_t = _t.getNextSibling();
+						a = (GrammarAST)_t;
+						match(_t,ACTION);
+						_t = _t.getNextSibling();
+						
+						// must set alt num here rather than in define.g
+						// because actions like %foo(name={$ID.text}) aren't
+						// broken up yet into trees.
+										   a.outerAltNum = this.outerAltNum;
+								   		   List chunks = generator.translateAction(currentRuleName,a);
+								   		   code.setAttribute("args.{name,value}", arg.getText(), chunks);
+								   		
+						_t = __t122;
+						_t = _t.getNextSibling();
+					}
+					else {
+						break _loop123;
+					}
+					
+				} while (true);
+				}
+				_t = __t120;
+				_t = _t.getNextSibling();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case DOUBLE_QUOTE_STRING_LITERAL:
+				{
+					GrammarAST tmp64_AST_in = (GrammarAST)_t;
+					match(_t,DOUBLE_QUOTE_STRING_LITERAL);
+					_t = _t.getNextSibling();
+					
+					String sl = tmp64_AST_in.getText();
+								 String t = sl.substring(1,sl.length()-1); // strip quotes
+								 t = generator.target.getTargetStringLiteralFromString(t);
+					code.setAttribute("template",t);
+					
+					break;
+				}
+				case DOUBLE_ANGLE_STRING_LITERAL:
+				{
+					GrammarAST tmp65_AST_in = (GrammarAST)_t;
+					match(_t,DOUBLE_ANGLE_STRING_LITERAL);
+					_t = _t.getNextSibling();
+					
+					String sl = tmp65_AST_in.getText();
+								 String t = sl.substring(2,sl.length()-2); // strip double angle quotes
+								 t = generator.target.getTargetStringLiteralFromString(t);
+					code.setAttribute("template",t);
+					
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t118;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ACTION:
+			{
+				act = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				
+				// set alt num for same reason as ARGLIST above
+				act.outerAltNum = this.outerAltNum;
+						code=templates.getInstanceOf("rewriteAction");
+						code.setAttribute("action",
+										  generator.translateAction(currentRuleName,act));
+						
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite_atom(AST _t,
+		boolean isRoot
+	) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST r = null;
+		GrammarAST arg = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case RULE_REF:
+			{
+				r = (GrammarAST)_t;
+				match(_t,RULE_REF);
+				_t = _t.getNextSibling();
+				
+					String ruleRefName = r.getText();
+					String stName = "rewriteRuleRef";
+					if ( isRoot ) {
+						stName += "Root";
+					}
+					code = templates.getInstanceOf(stName);
+					code.setAttribute("rule", ruleRefName);
+					if ( grammar.getRule(ruleRefName)==null ) {
+							ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
+													  grammar,
+													  ((GrammarAST)(r)).getToken(),
+													  ruleRefName);
+						code = new StringTemplate(); // blank; no code gen
+					}
+					else if ( grammar.getRule(currentRuleName)
+							     .getRuleRefsInAlt(ruleRefName,outerAltNum)==null )
+						{
+							ErrorManager.grammarError(ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS,
+													  grammar,
+													  ((GrammarAST)(r)).getToken(),
+													  ruleRefName);
+						code = new StringTemplate(); // blank; no code gen
+					}
+					else {
+						// track all rule refs as we must copy 2nd ref to rule and beyond
+						if ( !rewriteRuleRefs.contains(ruleRefName) ) {
+					    		rewriteRuleRefs.add(ruleRefName);
+						}
+						}
+					
+				break;
+			}
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			{
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case TOKEN_REF:
+				{
+					AST __t114 = _t;
+					GrammarAST tmp66_AST_in = (GrammarAST)_t;
+					match(_t,TOKEN_REF);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case ARG_ACTION:
+					{
+						arg = (GrammarAST)_t;
+						match(_t,ARG_ACTION);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t114;
+					_t = _t.getNextSibling();
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					GrammarAST tmp67_AST_in = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case STRING_LITERAL:
+				{
+					GrammarAST tmp68_AST_in = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				
+					String tokenName = rewrite_atom_AST_in.getText();
+					String stName = "rewriteTokenRef";
+					Rule rule = grammar.getRule(currentRuleName);
+					Set tokenRefsInAlt = rule.getTokenRefsInAlt(outerAltNum);
+					boolean imaginary = !tokenRefsInAlt.contains(tokenName);
+					if ( imaginary ) {
+						stName = "rewriteImaginaryTokenRef";
+					}
+					if ( isRoot ) {
+						stName += "Root";
+					}
+					code = templates.getInstanceOf(stName);
+					if ( arg!=null ) {
+							List args = generator.translateAction(currentRuleName,arg);
+							code.setAttribute("args", args);
+					}
+						code.setAttribute("elementIndex", ((TokenWithIndex)rewrite_atom_AST_in.getToken()).getIndex());
+						int ttype = grammar.getTokenType(tokenName);
+						String tok = generator.getTokenTypeAsTargetLabel(ttype);
+					code.setAttribute("token", tok);
+					if ( grammar.getTokenType(tokenName)==Label.INVALID ) {
+							ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE,
+													  grammar,
+													  ((GrammarAST)(rewrite_atom_AST_in)).getToken(),
+													  tokenName);
+						code = new StringTemplate(); // blank; no code gen
+					}
+					
+				break;
+			}
+			case LABEL:
+			{
+				GrammarAST tmp69_AST_in = (GrammarAST)_t;
+				match(_t,LABEL);
+				_t = _t.getNextSibling();
+				
+					String labelName = tmp69_AST_in.getText();
+					Rule rule = grammar.getRule(currentRuleName);
+					Grammar.LabelElementPair pair = rule.getLabel(labelName);
+					if ( labelName.equals(currentRuleName) ) {
+						// special case; ref to old value via $rule
+						StringTemplate labelST = templates.getInstanceOf("prevRuleRootRef");
+						code = templates.getInstanceOf("rewriteRuleLabelRef"+(isRoot?"Root":""));
+						code.setAttribute("label", labelST);
+					}
+					else if ( pair==null ) {
+							ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE,
+													  grammar,
+													  ((GrammarAST)(tmp69_AST_in)).getToken(),
+													  labelName);
+							code = new StringTemplate();
+					}
+					else {
+							String stName = null;
+							switch ( pair.type ) {
+								case Grammar.TOKEN_LABEL :
+									stName = "rewriteTokenLabelRef";
+									break;
+								case Grammar.RULE_LABEL :
+									stName = "rewriteRuleLabelRef";
+									break;
+								case Grammar.TOKEN_LIST_LABEL :
+									stName = "rewriteTokenListLabelRef";
+									break;
+								case Grammar.RULE_LIST_LABEL :
+									stName = "rewriteRuleListLabelRef";
+									break;
+							}
+							if ( isRoot ) {
+								stName += "Root";
+							}
+							code = templates.getInstanceOf(stName);
+							code.setAttribute("label", labelName);
+						}
+					
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp70_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				
+				// actions in rewrite rules yield a tree object
+				String actText = tmp70_AST_in.getText();
+				List chunks = generator.translateAction(currentRuleName,tmp70_AST_in);
+						code = templates.getInstanceOf("rewriteNodeAction"+(isRoot?"Root":""));
+						code.setAttribute("action", chunks);
+				
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite_ebnf(AST _t) throws RecognitionException {
+		StringTemplate code=null;
+		
+		GrammarAST rewrite_ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONAL:
+			{
+				AST __t105 = _t;
+				GrammarAST tmp71_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				code=rewrite_block(_t,"rewriteOptionalBlock");
+				_t = _retTree;
+				_t = __t105;
+				_t = _t.getNextSibling();
+				
+						String description = grammar.grammarTreeToString(rewrite_ebnf_AST_in, false);
+						description = generator.target.getTargetStringLiteralFromString(description);
+						code.setAttribute("description", description);
+						
+				break;
+			}
+			case CLOSURE:
+			{
+				AST __t106 = _t;
+				GrammarAST tmp72_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				code=rewrite_block(_t,"rewriteClosureBlock");
+				_t = _retTree;
+				_t = __t106;
+				_t = _t.getNextSibling();
+				
+						String description = grammar.grammarTreeToString(rewrite_ebnf_AST_in, false);
+						description = generator.target.getTargetStringLiteralFromString(description);
+						code.setAttribute("description", description);
+						
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				AST __t107 = _t;
+				GrammarAST tmp73_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				code=rewrite_block(_t,"rewritePositiveClosureBlock");
+				_t = _retTree;
+				_t = __t107;
+				_t = _t.getNextSibling();
+				
+						String description = grammar.grammarTreeToString(rewrite_ebnf_AST_in, false);
+						description = generator.target.getTargetStringLiteralFromString(description);
+						code.setAttribute("description", description);
+						
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	public final StringTemplate  rewrite_tree(AST _t) throws RecognitionException {
+		StringTemplate code=templates.getInstanceOf("rewriteTree");
+		
+		GrammarAST rewrite_tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		rewriteTreeNestingLevel++;
+		code.setAttribute("treeLevel", rewriteTreeNestingLevel);
+		code.setAttribute("enclosingTreeLevel", rewriteTreeNestingLevel-1);
+		StringTemplate r, el;
+		GrammarAST elAST=null;
+		
+		
+		try {      // for error handling
+			AST __t109 = _t;
+			GrammarAST tmp74_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			elAST=(GrammarAST)_t;
+			r=rewrite_atom(_t,true);
+			_t = _retTree;
+			code.setAttribute("root.{el,line,pos}",
+										   r,
+										   Utils.integer(elAST.getLine()),
+										   Utils.integer(elAST.getColumn())
+										  );
+						
+			{
+			_loop111:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
+					elAST=(GrammarAST)_t;
+					el=rewrite_element(_t);
+					_t = _retTree;
+					
+								  code.setAttribute("children.{el,line,pos}",
+												    el,
+												    Utils.integer(elAST.getLine()),
+												    Utils.integer(elAST.getColumn())
+												    );
+								
+				}
+				else {
+					break _loop111;
+				}
+				
+			} while (true);
+			}
+			_t = __t109;
+			_t = _t.getNextSibling();
+			
+					String description = grammar.grammarTreeToString(rewrite_tree_AST_in, false);
+					description = generator.target.getTargetStringLiteralFromString(description);
+					code.setAttribute("description", description);
+				rewriteTreeNestingLevel--;
+					
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return code;
+	}
+	
+	
+	public static final String[] _tokenNames = {
+		"<0>",
+		"EOF",
+		"<2>",
+		"NULL_TREE_LOOKAHEAD",
+		"\"options\"",
+		"\"tokens\"",
+		"\"parser\"",
+		"LEXER",
+		"RULE",
+		"BLOCK",
+		"OPTIONAL",
+		"CLOSURE",
+		"POSITIVE_CLOSURE",
+		"SYNPRED",
+		"RANGE",
+		"CHAR_RANGE",
+		"EPSILON",
+		"ALT",
+		"EOR",
+		"EOB",
+		"EOA",
+		"ID",
+		"ARG",
+		"ARGLIST",
+		"RET",
+		"LEXER_GRAMMAR",
+		"PARSER_GRAMMAR",
+		"TREE_GRAMMAR",
+		"COMBINED_GRAMMAR",
+		"INITACTION",
+		"LABEL",
+		"TEMPLATE",
+		"\"scope\"",
+		"GATED_SEMPRED",
+		"SYN_SEMPRED",
+		"BACKTRACK_SEMPRED",
+		"\"fragment\"",
+		"ACTION",
+		"DOC_COMMENT",
+		"SEMI",
+		"\"lexer\"",
+		"\"tree\"",
+		"\"grammar\"",
+		"AMPERSAND",
+		"COLON",
+		"RCURLY",
+		"ASSIGN",
+		"STRING_LITERAL",
+		"CHAR_LITERAL",
+		"INT",
+		"STAR",
+		"TOKEN_REF",
+		"\"protected\"",
+		"\"public\"",
+		"\"private\"",
+		"BANG",
+		"ARG_ACTION",
+		"\"returns\"",
+		"\"throws\"",
+		"COMMA",
+		"LPAREN",
+		"OR",
+		"RPAREN",
+		"\"catch\"",
+		"\"finally\"",
+		"PLUS_ASSIGN",
+		"SEMPRED",
+		"IMPLIES",
+		"ROOT",
+		"RULE_REF",
+		"NOT",
+		"TREE_BEGIN",
+		"QUESTION",
+		"PLUS",
+		"WILDCARD",
+		"REWRITE",
+		"DOLLAR",
+		"DOUBLE_QUOTE_STRING_LITERAL",
+		"DOUBLE_ANGLE_STRING_LITERAL",
+		"WS",
+		"COMMENT",
+		"SL_COMMENT",
+		"ML_COMMENT",
+		"OPEN_ELEMENT_OPTION",
+		"CLOSE_ELEMENT_OPTION",
+		"ESC",
+		"DIGIT",
+		"XDIGIT",
+		"NESTED_ARG_ACTION",
+		"NESTED_ACTION",
+		"ACTION_CHAR_LITERAL",
+		"ACTION_STRING_LITERAL",
+		"ACTION_ESC",
+		"WS_LOOP",
+		"INTERNAL_RULE_REF",
+		"WS_OPT",
+		"SRC"
+	};
+	
+	}
+	
diff --git a/src/org/antlr/codegen/CodeGenTreeWalker.smap b/src/org/antlr/codegen/CodeGenTreeWalker.smap
new file mode 100644
index 0000000..c2dbf69
--- /dev/null
+++ b/src/org/antlr/codegen/CodeGenTreeWalker.smap
@@ -0,0 +1,2419 @@
+SMAP
+CodeGenTreeWalker.java
+G
+*S G
+*F
++ 0 codegen.g
+codegen.g
+*L
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+1:32
+1:33
+1:34
+1:35
+1:36
+1:37
+58:62
+59:63
+61:65
+62:66
+63:67
+64:68
+65:69
+66:70
+67:71
+68:72
+70:74
+71:75
+72:76
+73:77
+74:78
+75:79
+76:80
+77:81
+78:82
+79:83
+80:84
+81:85
+82:86
+83:87
+84:88
+86:90
+87:91
+88:92
+90:94
+91:95
+92:96
+94:98
+95:99
+96:100
+97:101
+99:103
+100:104
+102:106
+104:108
+105:109
+106:110
+107:111
+108:112
+109:113
+110:114
+112:116
+113:117
+114:118
+115:119
+116:120
+117:121
+118:122
+119:123
+120:124
+121:125
+122:126
+123:127
+124:128
+125:129
+126:130
+127:131
+128:132
+129:133
+130:134
+131:135
+132:136
+133:137
+134:138
+135:139
+136:140
+138:142
+139:143
+140:144
+141:145
+142:146
+143:147
+144:148
+145:149
+146:150
+147:151
+148:152
+149:153
+150:154
+151:155
+152:156
+153:157
+154:158
+155:159
+156:160
+157:161
+158:162
+159:163
+160:164
+161:165
+163:167
+164:168
+165:169
+166:170
+167:171
+168:172
+169:173
+170:174
+171:175
+172:176
+173:177
+174:178
+175:179
+176:180
+177:181
+178:182
+179:183
+181:185
+182:186
+183:187
+184:188
+185:189
+186:190
+187:191
+188:192
+190:194
+191:195
+192:196
+193:197
+194:198
+195:199
+196:200
+197:201
+198:202
+199:203
+200:204
+201:205
+202:206
+203:207
+204:208
+205:209
+206:210
+207:211
+208:212
+209:213
+210:214
+211:215
+212:216
+214:218
+215:219
+217:221
+218:222
+219:223
+220:224
+221:225
+222:226
+223:227
+224:228
+225:229
+226:230
+227:231
+228:232
+229:233
+230:234
+231:235
+232:236
+233:237
+234:238
+235:239
+236:240
+237:241
+238:242
+239:243
+241:245
+242:246
+243:247
+244:248
+245:249
+248:254
+248:255
+248:256
+248:257
+248:258
+248:259
+248:287
+248:345
+248:346
+248:347
+248:348
+248:349
+248:350
+248:351
+249:263
+250:264
+251:265
+252:266
+253:267
+254:268
+255:269
+256:270
+257:271
+258:272
+259:273
+260:274
+261:275
+262:276
+263:277
+264:278
+265:279
+266:280
+267:281
+268:282
+269:283
+270:284
+276:289
+276:290
+276:291
+276:292
+276:293
+276:294
+276:295
+276:296
+276:297
+276:298
+276:299
+276:300
+276:339
+276:340
+276:341
+276:342
+276:343
+277:303
+277:304
+277:305
+277:306
+277:307
+277:308
+277:309
+277:310
+277:311
+277:312
+278:315
+278:316
+278:317
+278:318
+278:319
+278:320
+278:321
+278:322
+278:323
+278:324
+280:327
+280:328
+280:329
+280:330
+280:331
+280:332
+280:333
+280:334
+280:335
+280:336
+284:496
+284:500
+284:513
+284:514
+284:515
+284:516
+284:517
+284:518
+284:519
+285:501
+285:502
+285:503
+285:504
+285:505
+285:506
+285:507
+285:508
+285:509
+285:510
+285:511
+285:512
+288:353
+288:359
+288:488
+288:489
+288:490
+288:491
+288:492
+288:493
+288:494
+289:356
+289:360
+289:361
+289:362
+290:357
+290:364
+290:365
+290:366
+290:367
+290:368
+290:369
+290:370
+290:385
+290:386
+290:387
+290:388
+290:389
+292:372
+293:373
+297:392
+298:393
+299:394
+300:395
+301:396
+302:397
+303:398
+305:401
+305:402
+305:403
+305:404
+305:405
+305:406
+305:407
+305:408
+305:409
+305:410
+305:411
+305:412
+305:413
+305:423
+305:424
+305:425
+305:426
+305:427
+306:430
+306:431
+306:432
+306:433
+306:434
+306:435
+306:436
+306:437
+306:438
+306:439
+306:440
+306:441
+306:442
+306:451
+306:452
+306:453
+306:454
+306:455
+307:457
+307:458
+307:459
+307:460
+307:461
+307:462
+307:463
+307:464
+307:465
+307:466
+307:467
+307:469
+307:470
+308:471
+308:472
+308:473
+308:474
+308:475
+308:476
+308:477
+308:478
+308:479
+308:480
+308:481
+308:482
+308:484
+308:485
+309:486
+309:487
+312:521
+312:522
+312:523
+312:530
+312:572
+312:573
+312:574
+312:575
+312:576
+312:577
+312:578
+313:527
+316:532
+316:533
+316:534
+316:535
+316:536
+316:553
+316:558
+316:559
+316:560
+316:561
+316:564
+316:565
+316:566
+316:567
+316:569
+316:570
+316:571
+317:539
+318:540
+323:542
+323:543
+323:544
+323:545
+325:547
+326:548
+327:549
+328:550
+329:551
+331:554
+331:555
+331:556
+331:557
+336:580
+336:581
+336:606
+336:852
+336:853
+336:854
+336:855
+336:856
+336:857
+336:858
+336:859
+337:587
+338:588
+339:589
+340:590
+341:591
+342:592
+343:593
+344:594
+345:595
+346:596
+348:598
+349:599
+350:600
+351:601
+352:602
+353:603
+356:584
+356:607
+356:608
+356:609
+356:610
+356:611
+356:612
+356:613
+356:614
+356:822
+356:823
+357:585
+357:616
+357:617
+357:618
+357:619
+357:620
+357:621
+357:622
+357:623
+357:624
+357:625
+357:632
+357:633
+357:634
+357:635
+357:636
+358:638
+358:639
+358:640
+358:641
+358:643
+358:644
+358:645
+358:646
+358:647
+358:648
+358:649
+358:656
+358:657
+358:658
+358:659
+358:660
+358:662
+358:663
+359:664
+359:665
+359:666
+359:667
+359:669
+359:670
+359:671
+359:672
+359:673
+359:674
+359:675
+359:682
+359:683
+359:684
+359:685
+359:686
+359:688
+359:689
+360:691
+360:692
+360:693
+360:694
+360:695
+360:696
+360:697
+360:698
+360:699
+360:700
+360:701
+360:702
+360:703
+360:712
+360:713
+360:714
+360:715
+360:716
+361:719
+361:720
+361:721
+361:722
+361:723
+361:724
+361:732
+361:733
+361:734
+361:735
+361:736
+362:738
+362:739
+362:740
+362:741
+362:742
+362:743
+362:744
+362:745
+362:746
+362:747
+362:748
+362:749
+362:751
+362:752
+363:753
+363:754
+365:756
+366:757
+367:758
+368:759
+369:760
+370:761
+371:762
+372:763
+373:764
+374:765
+375:766
+376:767
+377:768
+378:769
+379:770
+380:771
+381:772
+382:773
+383:774
+384:775
+385:776
+386:777
+387:778
+388:779
+389:780
+390:781
+391:782
+392:783
+393:784
+394:785
+395:786
+396:787
+397:788
+398:789
+399:790
+400:791
+401:792
+402:793
+403:794
+404:795
+405:796
+406:797
+409:800
+409:801
+409:802
+409:803
+409:804
+409:805
+409:806
+409:813
+409:814
+409:815
+409:816
+409:817
+410:819
+410:820
+410:821
+413:825
+414:826
+415:827
+416:828
+417:829
+418:830
+419:831
+420:832
+421:833
+422:834
+423:835
+424:836
+425:837
+426:838
+427:839
+428:840
+429:841
+430:842
+431:843
+432:844
+433:845
+434:846
+435:847
+436:848
+437:849
+438:850
+442:861
+442:865
+442:866
+442:867
+442:896
+442:897
+442:898
+442:899
+442:900
+442:901
+442:902
+442:903
+442:904
+442:905
+442:906
+442:907
+443:868
+443:869
+443:870
+443:871
+443:872
+444:875
+444:876
+444:877
+444:878
+444:879
+445:882
+445:883
+445:884
+445:885
+445:886
+446:889
+446:890
+446:891
+446:892
+446:893
+449:909
+449:913
+449:956
+449:957
+449:958
+449:959
+449:960
+449:961
+449:962
+450:914
+450:915
+450:916
+450:917
+450:919
+450:920
+450:921
+450:922
+450:923
+450:924
+450:925
+450:933
+450:934
+450:935
+450:936
+450:937
+450:939
+450:940
+450:941
+450:942
+450:943
+450:944
+450:945
+450:946
+450:947
+450:948
+450:949
+450:950
+450:952
+450:953
+450:954
+450:955
+453:964
+453:965
+453:966
+453:967
+453:996
+453:1005
+453:1072
+453:1073
+453:1074
+453:1075
+453:1077
+453:1078
+453:1079
+453:1080
+453:1081
+453:1082
+453:1083
+453:1084
+454:971
+455:972
+456:973
+457:974
+458:975
+459:976
+460:977
+461:978
+462:979
+463:980
+464:981
+465:982
+466:983
+467:984
+468:985
+469:986
+470:987
+471:988
+472:989
+473:990
+474:991
+475:992
+476:993
+480:997
+480:998
+480:999
+480:1000
+482:1002
+483:1003
+486:1006
+486:1007
+486:1008
+486:1009
+486:1010
+486:1069
+486:1070
+487:1012
+487:1013
+487:1014
+487:1015
+487:1016
+487:1017
+487:1018
+487:1025
+487:1026
+487:1027
+487:1028
+487:1029
+488:1032
+488:1033
+488:1034
+488:1035
+488:1036
+488:1037
+488:1038
+488:1039
+488:1040
+488:1041
+488:1058
+488:1059
+488:1060
+488:1061
+488:1063
+488:1064
+488:1065
+490:1043
+491:1044
+492:1045
+493:1046
+494:1047
+495:1048
+496:1049
+497:1050
+498:1051
+499:1052
+500:1053
+501:1054
+502:1055
+503:1056
+506:1066
+506:1067
+506:1068
+508:1071
+511:1153
+511:1154
+511:1168
+511:1196
+511:1197
+511:1198
+511:1199
+511:1200
+511:1201
+511:1202
+511:1203
+512:1159
+513:1160
+514:1161
+515:1162
+516:1163
+517:1164
+518:1165
+521:1157
+521:1169
+521:1170
+521:1171
+523:1173
+524:1174
+525:1175
+526:1176
+527:1177
+528:1178
+529:1179
+530:1180
+531:1181
+532:1182
+533:1183
+534:1184
+535:1185
+536:1186
+537:1187
+538:1188
+539:1189
+540:1190
+541:1191
+542:1192
+543:1193
+544:1194
+548:1086
+548:1087
+548:1088
+548:1092
+548:1093
+548:1094
+548:1140
+548:1141
+548:1142
+548:1143
+548:1144
+548:1145
+548:1146
+548:1147
+548:1148
+548:1149
+548:1150
+548:1151
+549:1095
+549:1096
+549:1098
+549:1099
+549:1100
+549:1101
+549:1102
+549:1103
+549:1104
+549:1105
+549:1106
+549:1107
+549:1108
+549:1110
+549:1111
+549:1112
+549:1114
+549:1115
+549:1116
+549:1117
+549:1118
+549:1119
+549:1126
+549:1127
+549:1128
+549:1129
+549:1130
+550:1134
+550:1135
+550:1136
+550:1137
+553:1392
+553:1393
+553:1394
+553:1398
+553:1415
+553:1416
+553:1417
+553:1418
+553:1419
+553:1420
+553:1421
+554:1399
+554:1400
+554:1401
+554:1402
+554:1403
+554:1404
+554:1405
+554:1406
+554:1407
+554:1408
+554:1409
+554:1410
+556:1412
+557:1413
+561:1423
+561:1424
+561:1425
+561:1429
+561:1443
+561:1444
+561:1445
+561:1446
+561:1447
+561:1448
+561:1449
+562:1430
+562:1431
+562:1432
+562:1433
+562:1434
+562:1435
+562:1436
+562:1437
+562:1438
+564:1440
+565:1441
+569:1205
+569:1206
+569:1240
+569:1276
+569:1277
+569:1278
+569:1279
+569:1280
+569:1281
+569:1282
+569:1283
+570:1211
+571:1212
+572:1213
+573:1214
+574:1215
+575:1216
+576:1217
+577:1218
+578:1219
+579:1220
+580:1221
+581:1222
+582:1223
+583:1224
+584:1225
+585:1226
+586:1227
+587:1228
+588:1229
+589:1230
+590:1231
+591:1232
+592:1233
+593:1234
+594:1235
+595:1236
+596:1237
+599:1209
+599:1241
+599:1242
+599:1243
+599:1244
+599:1274
+599:1275
+600:1246
+600:1247
+600:1248
+600:1249
+600:1250
+600:1251
+600:1263
+600:1264
+600:1265
+600:1266
+600:1268
+600:1269
+600:1270
+601:1252
+601:1253
+603:1255
+604:1256
+605:1257
+606:1258
+607:1259
+608:1260
+609:1261
+612:1271
+612:1272
+612:1273
+616:1451
+616:1452
+616:1453
+616:1454
+616:1469
+616:1470
+616:1471
+616:1634
+616:1639
+616:1643
+616:1644
+616:1645
+616:1646
+616:1647
+616:1648
+616:1649
+616:1650
+616:1651
+616:1652
+616:1653
+616:1654
+616:1655
+617:1465
+618:1466
+621:1472
+621:1473
+621:1474
+621:1475
+621:1476
+621:1477
+621:1478
+621:1479
+621:1480
+621:1481
+623:1484
+623:1485
+623:1486
+623:1487
+623:1488
+623:1489
+623:1490
+623:1491
+623:1492
+623:1493
+625:1457
+625:1496
+625:1497
+625:1498
+625:1499
+625:1500
+625:1501
+625:1502
+625:1503
+625:1504
+625:1505
+627:1458
+627:1508
+627:1509
+627:1510
+627:1511
+627:1512
+627:1513
+627:1514
+627:1515
+627:1516
+627:1517
+627:1518
+627:1519
+627:1520
+629:1459
+629:1523
+629:1524
+629:1525
+629:1526
+629:1527
+629:1528
+629:1529
+629:1530
+629:1531
+629:1532
+629:1533
+629:1534
+629:1535
+631:1460
+631:1461
+631:1538
+631:1539
+631:1540
+631:1541
+631:1542
+631:1543
+631:1544
+631:1545
+631:1546
+631:1547
+631:1548
+631:1549
+631:1550
+631:1551
+632:1552
+633:1553
+634:1554
+635:1555
+636:1556
+637:1557
+638:1558
+639:1559
+640:1560
+641:1561
+644:1635
+644:1636
+644:1637
+644:1638
+646:1640
+646:1641
+646:1642
+648:1565
+648:1566
+648:1567
+648:1568
+650:1571
+650:1572
+650:1573
+650:1574
+652:1462
+652:1463
+652:1577
+652:1578
+652:1579
+652:1581
+652:1582
+652:1583
+652:1584
+652:1585
+652:1586
+652:1587
+652:1590
+652:1591
+652:1592
+652:1593
+652:1594
+652:1595
+652:1598
+652:1599
+652:1600
+652:1601
+652:1602
+654:1605
+655:1606
+656:1607
+657:1608
+658:1609
+661:1613
+661:1614
+661:1615
+661:1616
+661:1617
+663:1620
+663:1621
+663:1622
+663:1623
+663:1624
+665:1627
+665:1628
+665:1629
+665:1630
+665:1631
+668:2146
+668:2147
+668:2152
+668:2160
+668:2161
+668:2162
+668:2163
+668:2164
+668:2165
+668:2166
+668:2167
+669:2150
+669:2153
+669:2154
+669:2155
+671:2157
+672:2158
+676:1657
+676:1658
+676:1659
+676:1660
+676:1675
+676:1754
+676:1755
+676:1756
+676:1757
+676:1758
+676:1759
+676:1760
+676:1761
+677:1668
+678:1669
+679:1670
+680:1671
+681:1672
+685:1663
+685:1677
+685:1678
+685:1679
+685:1680
+685:1681
+685:1682
+685:1683
+685:1735
+685:1736
+685:1737
+685:1738
+685:1739
+687:1685
+688:1686
+689:1687
+690:1688
+691:1689
+692:1690
+693:1691
+694:1692
+696:1664
+696:1696
+696:1697
+696:1698
+696:1699
+696:1700
+698:1702
+699:1703
+700:1704
+701:1705
+702:1706
+703:1707
+704:1708
+705:1709
+707:1665
+707:1713
+707:1714
+707:1715
+707:1716
+707:1717
+709:1719
+710:1720
+712:1666
+712:1724
+712:1725
+712:1726
+712:1727
+712:1728
+714:1730
+715:1731
+719:1742
+720:1743
+721:1744
+722:1745
+723:1746
+724:1747
+725:1748
+726:1749
+727:1750
+728:1751
+729:1752
+733:1763
+733:1764
+733:1773
+733:1834
+733:1835
+733:1836
+733:1837
+733:1838
+733:1839
+733:1840
+733:1841
+734:1768
+735:1769
+736:1770
+739:1775
+739:1776
+739:1777
+739:1778
+739:1779
+739:1823
+739:1824
+739:1825
+739:1826
+739:1827
+740:1780
+740:1781
+741:1784
+741:1785
+741:1786
+742:1787
+742:1788
+742:1789
+742:1790
+742:1791
+742:1792
+742:1793
+742:1794
+743:1797
+743:1798
+743:1799
+744:1800
+744:1801
+744:1802
+744:1803
+744:1804
+744:1805
+744:1806
+744:1807
+745:1810
+745:1811
+745:1812
+746:1813
+746:1814
+746:1815
+746:1816
+746:1817
+746:1818
+746:1819
+746:1820
+749:1830
+750:1831
+751:1832
+755:2059
+755:2060
+755:2076
+755:2137
+755:2138
+755:2139
+755:2140
+755:2141
+755:2142
+755:2143
+755:2144
+756:2064
+757:2065
+758:2066
+759:2067
+760:2068
+761:2069
+762:2070
+763:2071
+764:2072
+765:2073
+768:2077
+768:2078
+768:2079
+768:2080
+768:2081
+768:2135
+768:2136
+769:2082
+769:2083
+771:2085
+772:2086
+773:2087
+774:2088
+775:2089
+780:2091
+780:2092
+780:2093
+780:2106
+780:2107
+780:2108
+780:2109
+780:2111
+780:2112
+781:2094
+781:2095
+781:2096
+782:2097
+782:2098
+784:2100
+785:2101
+786:2102
+787:2103
+788:2104
+791:2113
+791:2114
+791:2115
+791:2116
+791:2117
+791:2118
+791:2128
+791:2129
+791:2130
+791:2131
+791:2133
+791:2134
+792:2119
+792:2120
+794:2122
+795:2123
+796:2124
+797:2125
+798:2126
+804:1843
+804:1844
+804:1845
+804:1846
+804:1863
+804:1864
+804:1865
+804:2045
+804:2046
+804:2047
+804:2048
+804:2049
+804:2050
+804:2051
+804:2052
+804:2053
+804:2054
+804:2055
+804:2056
+804:2057
+805:1857
+806:1858
+807:1859
+808:1860
+812:1849
+812:1850
+812:1866
+812:1867
+812:1868
+812:1869
+812:1870
+812:1871
+812:1873
+812:1874
+812:1875
+812:1876
+812:1877
+812:1878
+812:1879
+812:1886
+812:1887
+812:1888
+812:1889
+812:1890
+812:1892
+812:1893
+814:1895
+815:1896
+816:1897
+817:1898
+818:1899
+819:1900
+820:1901
+821:1902
+823:1904
+824:1905
+825:1906
+826:1907
+827:1908
+828:1909
+829:1910
+830:1911
+833:1851
+833:1852
+833:1915
+833:1916
+833:1917
+833:1918
+833:1919
+833:1920
+833:1922
+833:1923
+833:1924
+833:1925
+833:1926
+833:1927
+833:1928
+833:1935
+833:1936
+833:1937
+833:1938
+833:1939
+833:1941
+833:1942
+835:1944
+836:1945
+837:1946
+838:1947
+839:1948
+840:1949
+841:1950
+842:1951
+843:1952
+844:1953
+845:1954
+846:1955
+847:1956
+848:1957
+849:1958
+850:1959
+851:1960
+852:1961
+853:1962
+854:1963
+855:1964
+856:1965
+857:1966
+858:1967
+859:1968
+860:1969
+861:1970
+862:1971
+863:1972
+864:1973
+867:1853
+867:1977
+867:1978
+867:1979
+867:1980
+867:1981
+869:1983
+870:1984
+871:1985
+872:1986
+873:1987
+874:1988
+875:1989
+876:1990
+877:1991
+878:1992
+879:1993
+880:1994
+881:1995
+882:1996
+883:1997
+884:1998
+887:1854
+887:2002
+887:2003
+887:2004
+887:2005
+887:2006
+889:2008
+890:2009
+891:2010
+892:2011
+893:2012
+894:2013
+895:2014
+896:2015
+897:2016
+898:2017
+899:2018
+900:2019
+901:2020
+902:2021
+903:2022
+904:2023
+905:2024
+908:1855
+908:2028
+908:2029
+908:2030
+908:2031
+908:2032
+910:2034
+911:2035
+914:2039
+914:2040
+914:2041
+914:2042
+917:2205
+917:2209
+917:2210
+917:2211
+917:2226
+917:2227
+917:2228
+917:2229
+917:2230
+917:2231
+917:2232
+917:2233
+917:2234
+917:2235
+917:2236
+917:2237
+918:2212
+918:2213
+918:2214
+918:2215
+918:2216
+919:2219
+919:2220
+919:2221
+919:2222
+919:2223
+923:2169
+923:2170
+923:2171
+923:2172
+923:2183
+923:2196
+923:2197
+923:2198
+923:2199
+923:2200
+923:2201
+923:2202
+923:2203
+924:2177
+925:2178
+926:2179
+927:2180
+930:2175
+930:2184
+930:2185
+930:2186
+932:2188
+933:2189
+934:2190
+935:2191
+936:2192
+937:2193
+938:2194
+942:2239
+942:2248
+942:2249
+942:2250
+942:2288
+942:2289
+942:2290
+942:2291
+942:2292
+942:2293
+942:2294
+942:2295
+942:2296
+942:2297
+942:2298
+942:2299
+943:2242
+943:2251
+943:2252
+943:2253
+943:2254
+943:2255
+944:2243
+944:2258
+944:2259
+944:2260
+944:2261
+944:2262
+945:2244
+945:2265
+945:2266
+945:2267
+945:2268
+945:2269
+946:2245
+946:2246
+946:2272
+946:2273
+946:2274
+946:2275
+946:2276
+946:2277
+946:2278
+946:2279
+946:2280
+946:2281
+946:2282
+946:2283
+946:2284
+946:2285
+951:1285
+951:1286
+951:1323
+951:1383
+951:1384
+951:1385
+951:1386
+951:1387
+951:1388
+951:1389
+951:1390
+952:1292
+953:1293
+954:1294
+955:1295
+956:1296
+957:1297
+958:1298
+959:1299
+960:1300
+961:1301
+962:1302
+963:1303
+964:1304
+965:1305
+966:1306
+967:1307
+968:1308
+969:1309
+970:1310
+971:1311
+972:1312
+973:1313
+974:1314
+975:1315
+976:1316
+977:1317
+978:1318
+979:1319
+980:1320
+983:1324
+983:1325
+983:1326
+983:1376
+983:1377
+983:1378
+983:1379
+983:1381
+983:1382
+984:1327
+984:1328
+984:1329
+985:1289
+985:1290
+985:1330
+985:1331
+985:1332
+985:1333
+985:1335
+985:1336
+985:1337
+985:1338
+985:1339
+985:1340
+985:1341
+985:1350
+985:1351
+985:1352
+985:1353
+985:1354
+985:1356
+985:1357
+985:1358
+985:1359
+987:1361
+988:1362
+989:1363
+990:1364
+991:1365
+992:1366
+993:1367
+994:1368
+995:1369
+996:1370
+997:1371
+998:1372
+999:1373
+1000:1374
+1005:2399
+1005:2400
+1005:2401
+1005:2402
+1005:2414
+1005:2437
+1005:2438
+1005:2439
+1005:2440
+1005:2441
+1005:2442
+1005:2443
+1005:2444
+1006:2406
+1007:2407
+1008:2408
+1009:2409
+1010:2410
+1011:2411
+1014:2415
+1014:2416
+1014:2417
+1014:2418
+1014:2430
+1014:2431
+1016:2420
+1017:2421
+1018:2422
+1019:2423
+1021:2425
+1021:2426
+1022:2427
+1022:2428
+1022:2429
+1025:2433
+1026:2434
+1027:2435
+1031:2301
+1031:2302
+1031:2310
+1031:2381
+1031:2385
+1031:2386
+1031:2387
+1031:2388
+1031:2390
+1031:2391
+1031:2392
+1031:2393
+1031:2394
+1031:2395
+1031:2396
+1031:2397
+1032:2307
+1037:2305
+1037:2311
+1037:2312
+1037:2313
+1037:2314
+1037:2315
+1037:2316
+1037:2317
+1037:2379
+1037:2380
+1038:2319
+1038:2320
+1038:2321
+1038:2322
+1038:2323
+1038:2324
+1038:2325
+1038:2326
+1038:2327
+1038:2328
+1038:2329
+1038:2330
+1038:2331
+1038:2333
+1038:2334
+1038:2335
+1038:2336
+1038:2337
+1038:2338
+1038:2347
+1038:2348
+1038:2349
+1038:2350
+1038:2352
+1038:2353
+1038:2354
+1038:2370
+1038:2371
+1038:2372
+1038:2373
+1038:2374
+1039:2339
+1039:2340
+1040:2341
+1041:2342
+1042:2343
+1043:2344
+1044:2345
+1047:2357
+1047:2358
+1047:2359
+1047:2360
+1047:2361
+1048:2362
+1049:2363
+1050:2364
+1051:2365
+1052:2366
+1055:2376
+1055:2377
+1055:2378
+1057:2382
+1057:2383
+1057:2384
+1060:2446
+1060:2447
+1060:2455
+1060:2456
+1060:2457
+1060:2483
+1060:2484
+1060:2485
+1060:2486
+1060:2487
+1060:2488
+1060:2489
+1060:2490
+1060:2491
+1060:2492
+1060:2493
+1060:2494
+1060:2495
+1061:2451
+1062:2452
+1065:2458
+1065:2459
+1065:2460
+1065:2461
+1065:2462
+1065:2463
+1065:2464
+1065:2465
+1065:2466
+1067:2469
+1067:2470
+1067:2471
+1067:2472
+1067:2473
+1067:2474
+1069:2477
+1069:2478
+1069:2479
+1069:2480
+1072:2892
+1072:2893
+1072:2897
+1072:2898
+1072:2899
+1072:2951
+1072:2952
+1072:2953
+1072:2954
+1072:2955
+1072:2956
+1072:2957
+1072:2958
+1072:2959
+1072:2960
+1072:2961
+1072:2962
+1072:2963
+1073:2900
+1073:2901
+1073:2902
+1073:2903
+1073:2904
+1073:2905
+1073:2906
+1073:2907
+1073:2908
+1073:2909
+1075:2911
+1076:2912
+1077:2913
+1079:2917
+1079:2918
+1079:2919
+1079:2920
+1079:2921
+1079:2922
+1079:2923
+1079:2924
+1079:2925
+1079:2926
+1081:2928
+1082:2929
+1083:2930
+1085:2934
+1085:2935
+1085:2936
+1085:2937
+1085:2938
+1085:2939
+1085:2940
+1085:2941
+1085:2942
+1085:2943
+1087:2945
+1088:2946
+1089:2947
+1093:2965
+1093:2966
+1093:2977
+1093:3021
+1093:3022
+1093:3023
+1093:3024
+1093:3025
+1093:3026
+1093:3027
+1093:3028
+1094:2970
+1095:2971
+1096:2972
+1097:2973
+1098:2974
+1101:2978
+1101:2979
+1101:2980
+1101:2981
+1101:2982
+1101:3013
+1101:3014
+1102:2983
+1102:2984
+1103:2985
+1104:2986
+1105:2987
+1106:2988
+1107:2989
+1109:2991
+1109:2992
+1109:2993
+1109:2994
+1109:2995
+1109:2996
+1109:3006
+1109:3007
+1109:3008
+1109:3009
+1109:3011
+1109:3012
+1110:2997
+1110:2998
+1112:3000
+1113:3001
+1114:3002
+1115:3003
+1116:3004
+1121:3016
+1122:3017
+1123:3018
+1124:3019
+1128:2679
+1128:2680
+1128:2681
+1128:2682
+1128:2688
+1128:2689
+1128:2690
+1128:2878
+1128:2879
+1128:2880
+1128:2881
+1128:2882
+1128:2883
+1128:2884
+1128:2885
+1128:2886
+1128:2887
+1128:2888
+1128:2889
+1128:2890
+1129:2685
+1129:2691
+1129:2692
+1129:2693
+1129:2694
+1129:2695
+1131:2697
+1132:2698
+1133:2699
+1134:2700
+1135:2701
+1136:2702
+1137:2703
+1138:2704
+1139:2705
+1140:2706
+1141:2707
+1142:2708
+1143:2709
+1144:2710
+1145:2711
+1146:2712
+1147:2713
+1148:2714
+1149:2715
+1150:2716
+1151:2717
+1152:2718
+1153:2719
+1154:2720
+1155:2721
+1156:2722
+1157:2723
+1158:2724
+1159:2725
+1162:2686
+1162:2729
+1162:2730
+1162:2731
+1162:2732
+1162:2734
+1162:2735
+1162:2736
+1162:2737
+1162:2738
+1162:2739
+1162:2740
+1162:2741
+1162:2743
+1162:2744
+1162:2745
+1162:2746
+1162:2747
+1162:2748
+1162:2749
+1162:2756
+1162:2757
+1162:2758
+1162:2759
+1162:2760
+1162:2762
+1162:2763
+1162:2766
+1162:2767
+1162:2768
+1162:2769
+1162:2770
+1162:2773
+1162:2774
+1162:2775
+1162:2776
+1162:2777
+1162:2780
+1162:2781
+1162:2782
+1162:2783
+1162:2784
+1164:2787
+1165:2788
+1166:2789
+1167:2790
+1168:2791
+1169:2792
+1170:2793
+1171:2794
+1172:2795
+1173:2796
+1174:2797
+1175:2798
+1176:2799
+1177:2800
+1178:2801
+1179:2802
+1180:2803
+1181:2804
+1182:2805
+1183:2806
+1184:2807
+1185:2808
+1186:2809
+1187:2810
+1188:2811
+1189:2812
+1190:2813
+1193:2817
+1193:2818
+1193:2819
+1193:2820
+1193:2821
+1195:2823
+1196:2824
+1197:2825
+1198:2826
+1199:2827
+1200:2828
+1201:2829
+1202:2830
+1203:2831
+1204:2832
+1205:2833
+1206:2834
+1207:2835
+1208:2836
+1209:2837
+1210:2838
+1211:2839
+1212:2840
+1213:2841
+1214:2842
+1215:2843
+1216:2844
+1217:2845
+1218:2846
+1219:2847
+1220:2848
+1221:2849
+1222:2850
+1223:2851
+1224:2852
+1225:2853
+1226:2854
+1227:2855
+1228:2856
+1229:2857
+1230:2858
+1231:2859
+1232:2860
+1235:2864
+1235:2865
+1235:2866
+1235:2867
+1235:2868
+1237:2870
+1238:2871
+1239:2872
+1240:2873
+1241:2874
+1245:2497
+1245:2498
+1245:2507
+1245:2508
+1245:2509
+1245:2665
+1245:2666
+1245:2667
+1245:2668
+1245:2669
+1245:2670
+1245:2671
+1245:2672
+1245:2673
+1245:2674
+1245:2675
+1245:2676
+1245:2677
+1246:2510
+1246:2511
+1246:2512
+1246:2513
+1246:2514
+1246:2515
+1246:2516
+1246:2517
+1246:2518
+1246:2519
+1246:2520
+1246:2521
+1246:2522
+1246:2523
+1246:2524
+1247:2501
+1247:2502
+1247:2527
+1247:2528
+1247:2529
+1247:2530
+1247:2531
+1247:2532
+1247:2534
+1247:2535
+1247:2536
+1247:2537
+1247:2538
+1247:2539
+1247:2540
+1247:2543
+1247:2544
+1247:2545
+1247:2546
+1247:2547
+1247:2550
+1247:2551
+1247:2552
+1247:2553
+1247:2554
+1247:2647
+1247:2648
+1249:2557
+1250:2558
+1251:2559
+1252:2560
+1253:2561
+1254:2562
+1255:2563
+1256:2564
+1257:2565
+1258:2566
+1259:2567
+1260:2568
+1262:2570
+1262:2571
+1262:2572
+1262:2573
+1262:2606
+1262:2607
+1263:2503
+1263:2504
+1263:2574
+1263:2575
+1263:2576
+1263:2577
+1263:2578
+1263:2579
+1263:2580
+1263:2581
+1263:2582
+1263:2583
+1263:2584
+1263:2585
+1263:2586
+1263:2587
+1263:2588
+1263:2597
+1263:2598
+1263:2599
+1263:2600
+1263:2601
+1263:2602
+1263:2604
+1263:2605
+1265:2590
+1266:2591
+1267:2592
+1268:2593
+1269:2594
+1270:2595
+1275:2609
+1275:2610
+1275:2611
+1275:2612
+1275:2613
+1275:2614
+1275:2615
+1275:2641
+1275:2642
+1275:2643
+1275:2644
+1275:2645
+1277:2617
+1278:2618
+1279:2619
+1280:2620
+1282:2624
+1282:2625
+1282:2626
+1282:2627
+1282:2628
+1284:2630
+1285:2631
+1286:2632
+1287:2633
+1292:2505
+1292:2651
+1292:2652
+1292:2653
+1292:2654
+1292:2655
+1294:2657
+1295:2658
+1296:2659
+1297:2660
+1298:2661
+*E
diff --git a/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.java b/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.java
new file mode 100644
index 0000000..5ff363b
--- /dev/null
+++ b/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.java
@@ -0,0 +1,135 @@
+// $ANTLR 2.7.7 (2006-01-29): "codegen.g" -> "CodeGenTreeWalker.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.codegen;
+    import org.antlr.tool.*;
+    import org.antlr.analysis.*;
+    import org.antlr.misc.*;
+	import java.util.*;
+	import org.antlr.stringtemplate.*;
+    import antlr.TokenWithIndex;
+    import antlr.CommonToken;
+
+public interface CodeGenTreeWalkerTokenTypes {
+	int EOF = 1;
+	int NULL_TREE_LOOKAHEAD = 3;
+	int OPTIONS = 4;
+	int TOKENS = 5;
+	int PARSER = 6;
+	int LEXER = 7;
+	int RULE = 8;
+	int BLOCK = 9;
+	int OPTIONAL = 10;
+	int CLOSURE = 11;
+	int POSITIVE_CLOSURE = 12;
+	int SYNPRED = 13;
+	int RANGE = 14;
+	int CHAR_RANGE = 15;
+	int EPSILON = 16;
+	int ALT = 17;
+	int EOR = 18;
+	int EOB = 19;
+	int EOA = 20;
+	int ID = 21;
+	int ARG = 22;
+	int ARGLIST = 23;
+	int RET = 24;
+	int LEXER_GRAMMAR = 25;
+	int PARSER_GRAMMAR = 26;
+	int TREE_GRAMMAR = 27;
+	int COMBINED_GRAMMAR = 28;
+	int INITACTION = 29;
+	int LABEL = 30;
+	int TEMPLATE = 31;
+	int SCOPE = 32;
+	int GATED_SEMPRED = 33;
+	int SYN_SEMPRED = 34;
+	int BACKTRACK_SEMPRED = 35;
+	int FRAGMENT = 36;
+	int ACTION = 37;
+	int DOC_COMMENT = 38;
+	int SEMI = 39;
+	int LITERAL_lexer = 40;
+	int LITERAL_tree = 41;
+	int LITERAL_grammar = 42;
+	int AMPERSAND = 43;
+	int COLON = 44;
+	int RCURLY = 45;
+	int ASSIGN = 46;
+	int STRING_LITERAL = 47;
+	int CHAR_LITERAL = 48;
+	int INT = 49;
+	int STAR = 50;
+	int TOKEN_REF = 51;
+	int LITERAL_protected = 52;
+	int LITERAL_public = 53;
+	int LITERAL_private = 54;
+	int BANG = 55;
+	int ARG_ACTION = 56;
+	int LITERAL_returns = 57;
+	int LITERAL_throws = 58;
+	int COMMA = 59;
+	int LPAREN = 60;
+	int OR = 61;
+	int RPAREN = 62;
+	int LITERAL_catch = 63;
+	int LITERAL_finally = 64;
+	int PLUS_ASSIGN = 65;
+	int SEMPRED = 66;
+	int IMPLIES = 67;
+	int ROOT = 68;
+	int RULE_REF = 69;
+	int NOT = 70;
+	int TREE_BEGIN = 71;
+	int QUESTION = 72;
+	int PLUS = 73;
+	int WILDCARD = 74;
+	int REWRITE = 75;
+	int DOLLAR = 76;
+	int DOUBLE_QUOTE_STRING_LITERAL = 77;
+	int DOUBLE_ANGLE_STRING_LITERAL = 78;
+	int WS = 79;
+	int COMMENT = 80;
+	int SL_COMMENT = 81;
+	int ML_COMMENT = 82;
+	int OPEN_ELEMENT_OPTION = 83;
+	int CLOSE_ELEMENT_OPTION = 84;
+	int ESC = 85;
+	int DIGIT = 86;
+	int XDIGIT = 87;
+	int NESTED_ARG_ACTION = 88;
+	int NESTED_ACTION = 89;
+	int ACTION_CHAR_LITERAL = 90;
+	int ACTION_STRING_LITERAL = 91;
+	int ACTION_ESC = 92;
+	int WS_LOOP = 93;
+	int INTERNAL_RULE_REF = 94;
+	int WS_OPT = 95;
+	int SRC = 96;
+}
diff --git a/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.txt b/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.txt
new file mode 100644
index 0000000..b600f94
--- /dev/null
+++ b/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.txt
@@ -0,0 +1,95 @@
+// $ANTLR 2.7.7 (2006-01-29): codegen.g -> CodeGenTreeWalkerTokenTypes.txt$
+CodeGenTreeWalker    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
diff --git a/src/org/antlr/codegen/CodeGenerator.java b/src/org/antlr/codegen/CodeGenerator.java
new file mode 100644
index 0000000..3e3ed51
--- /dev/null
+++ b/src/org/antlr/codegen/CodeGenerator.java
@@ -0,0 +1,1186 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import antlr.RecognitionException;
+import antlr.TokenStreamRewriteEngine;
+import antlr.collections.AST;
+import org.antlr.Tool;
+import org.antlr.analysis.*;
+import org.antlr.misc.BitSet;
+import org.antlr.misc.*;
+import org.antlr.stringtemplate.*;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+import org.antlr.tool.*;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.Writer;
+import java.util.*;
+
+/** ANTLR's code generator.
+ *
+ *  Generate recognizers derived from grammars.  Language independence
+ *  achieved through the use of StringTemplateGroup objects.  All output
+ *  strings are completely encapsulated in the group files such as Java.stg.
+ *  Some computations are done that are unused by a particular language.
+ *  This generator just computes and sets the values into the templates;
+ *  the templates are free to use or not use the information.
+ *
+ *  To make a new code generation target, define X.stg for language X
+ *  by copying from existing Y.stg most closely releated to your language;
+ *  e.g., to do CSharp.stg copy Java.stg.  The template group file has a
+ *  bunch of templates that are needed by the code generator.  You can add
+ *  a new target w/o even recompiling ANTLR itself.  The language=X option
+ *  in a grammar file dictates which templates get loaded/used.
+ *
+ *  Some language like C need both parser files and header files.  Java needs
+ *  to have a separate file for the cyclic DFA as ANTLR generates bytecodes
+ *  directly (which cannot be in the generated parser Java file).  To facilitate
+ *  this,
+ *
+ * cyclic can be in same file, but header, output must be searpate.  recognizer
+ *  is in outptufile.
+ */
+public class CodeGenerator {
+	/** When generating SWITCH statements, some targets might need to limit
+	 *  the size (based upon the number of case labels).  Generally, this
+	 *  limit will be hit only for lexers where wildcard in a UNICODE
+	 *  vocabulary environment would generate a SWITCH with 65000 labels.
+	 */
+	public int MAX_SWITCH_CASE_LABELS = 300;
+	public int MIN_SWITCH_ALTS = 3;
+	public boolean GENERATE_SWITCHES_WHEN_POSSIBLE = true;
+	public static boolean GEN_ACYCLIC_DFA_INLINE = true;
+	public static boolean EMIT_TEMPLATE_DELIMITERS = false;
+
+	public String classpathTemplateRootDirectoryName =
+		"org/antlr/codegen/templates";
+
+	/** Which grammar are we generating code for?  Each generator
+	 *  is attached to a specific grammar.
+	 */
+	public Grammar grammar;
+
+	/** What language are we generating? */
+	protected String language;
+
+	/** The target specifies how to write out files and do other language
+	 *  specific actions.
+	 */
+	public Target target = null;
+
+	/** Where are the templates this generator should use to generate code? */
+	protected StringTemplateGroup templates;
+
+	/** The basic output templates without AST or templates stuff; this will be
+	 *  the templates loaded for the language such as Java.stg *and* the Dbg
+	 *  stuff if turned on.  This is used for generating syntactic predicates.
+	 */
+	protected StringTemplateGroup baseTemplates;
+
+	protected StringTemplate recognizerST;
+	protected StringTemplate outputFileST;
+	protected StringTemplate headerFileST;
+
+	/** Used to create unique labels */
+	protected int uniqueLabelNumber = 1;
+
+	/** A reference to the ANTLR tool so we can learn about output directories
+	 *  and such.
+	 */
+	protected Tool tool;
+
+	/** Generate debugging event method calls */
+	protected boolean debug;
+
+	/** Create a Tracer object and make the recognizer invoke this. */
+	protected boolean trace;
+
+	/** Track runtime parsing information about decisions etc...
+	 *  This requires the debugging event mechanism to work.
+	 */
+	protected boolean profile;
+
+	protected int lineWidth = 72;
+
+	/** I have factored out the generation of acyclic DFAs to separate class */
+	public ACyclicDFACodeGenerator acyclicDFAGenerator =
+		new ACyclicDFACodeGenerator(this);
+
+	/** I have factored out the generation of cyclic DFAs to separate class */
+	/*
+	public CyclicDFACodeGenerator cyclicDFAGenerator =
+		new CyclicDFACodeGenerator(this);
+		*/
+
+	public static final String VOCAB_FILE_EXTENSION = ".tokens";
+	protected final static String vocabFilePattern =
+		"<tokens:{<attr.name>=<attr.type>\n}>" +
+		"<literals:{<attr.name>=<attr.type>\n}>";
+
+	public CodeGenerator(Tool tool, Grammar grammar, String language) {
+		this.tool = tool;
+		this.grammar = grammar;
+		this.language = language;
+		loadLanguageTarget(language);
+	}
+
+	protected void loadLanguageTarget(String language) {
+		String targetName = "org.antlr.codegen."+language+"Target";
+		try {
+			Class c = Class.forName(targetName);
+			target = (Target)c.newInstance();
+		}
+		catch (ClassNotFoundException cnfe) {
+			target = new Target(); // use default
+		}
+		catch (InstantiationException ie) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_CREATE_TARGET_GENERATOR,
+							   targetName,
+							   ie);
+		}
+		catch (IllegalAccessException cnfe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_CREATE_TARGET_GENERATOR,
+							   targetName,
+							   cnfe);
+		}
+	}
+
+	/** load the main language.stg template group file */
+	public void loadTemplates(String language) {
+		// get a group loader containing main templates dir and target subdir
+		String templateDirs =
+			classpathTemplateRootDirectoryName+":"+
+			classpathTemplateRootDirectoryName+"/"+language;
+		//System.out.println("targets="+templateDirs.toString());
+		StringTemplateGroupLoader loader =
+			new CommonGroupLoader(templateDirs,
+								  ErrorManager.getStringTemplateErrorListener());
+		StringTemplateGroup.registerGroupLoader(loader);
+		StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
+
+		// first load main language template
+		StringTemplateGroup coreTemplates =
+			StringTemplateGroup.loadGroup(language);
+		baseTemplates = coreTemplates;
+		if ( coreTemplates ==null ) {
+			ErrorManager.error(ErrorManager.MSG_MISSING_CODE_GEN_TEMPLATES,
+							   language);
+			return;
+		}
+
+		// dynamically add subgroups that act like filters to apply to
+		// their supergroup.  E.g., Java:Dbg:AST:ASTDbg.
+		String outputOption = (String)grammar.getOption("output");
+		if ( outputOption!=null && outputOption.equals("AST") ) {
+			if ( debug && grammar.type!=Grammar.LEXER ) {
+				StringTemplateGroup dbgTemplates =
+					StringTemplateGroup.loadGroup("Dbg", coreTemplates);
+				baseTemplates = dbgTemplates;
+				StringTemplateGroup astTemplates =
+					StringTemplateGroup.loadGroup("AST",dbgTemplates);
+				StringTemplateGroup astDbgTemplates =
+					StringTemplateGroup.loadGroup("ASTDbg", astTemplates);
+				templates = astDbgTemplates;
+			}
+			else {
+				templates = StringTemplateGroup.loadGroup("AST", coreTemplates);
+			}
+		}
+		else if ( outputOption!=null && outputOption.equals("template") ) {
+			if ( debug && grammar.type!=Grammar.LEXER ) {
+				StringTemplateGroup dbgTemplates =
+					StringTemplateGroup.loadGroup("Dbg", coreTemplates);
+				baseTemplates = dbgTemplates;
+				StringTemplateGroup stTemplates =
+					StringTemplateGroup.loadGroup("ST",dbgTemplates);
+				/*
+				StringTemplateGroup astDbgTemplates =
+					StringTemplateGroup.loadGroup("STDbg", astTemplates);
+				*/
+				templates = stTemplates;
+			}
+			else {
+				templates = StringTemplateGroup.loadGroup("ST", coreTemplates);
+			}
+		}
+		else if ( debug && grammar.type!=Grammar.LEXER ) {
+			templates = StringTemplateGroup.loadGroup("Dbg", coreTemplates);
+			baseTemplates = templates;
+		}
+		else {
+			templates = coreTemplates;
+		}
+
+		if ( EMIT_TEMPLATE_DELIMITERS ) {
+			templates.emitDebugStartStopStrings(true);
+			templates.doNotEmitDebugStringsForTemplate("codeFileExtension");
+			templates.doNotEmitDebugStringsForTemplate("headerFileExtension");
+		}
+	}
+
+	/** Given the grammar to which we are attached, walk the AST associated
+	 *  with that grammar to create NFAs.  Then create the DFAs for all
+	 *  decision points in the grammar by converting the NFAs to DFAs.
+	 *  Finally, walk the AST again to generate code.
+	 *
+	 *  Either 1 or 2 files are written:
+	 *
+	 * 		recognizer: the main parser/lexer/treewalker item
+	 * 		header file: language like C/C++ need extern definitions
+	 *
+	 *  The target, such as JavaTarget, dictates which files get written.
+	 */
+	public StringTemplate genRecognizer() {
+		// LOAD OUTPUT TEMPLATES
+		loadTemplates(language);
+		if ( templates==null ) {
+			return null;
+		}
+
+		// CHECK FOR LEFT RECURSION; Make sure we can actually do analysis
+		grammar.checkAllRulesForLeftRecursion();
+
+		// was there a severe problem while reading in grammar?
+		if ( ErrorManager.doNotAttemptAnalysis() ) {
+			return null;
+		}
+
+		// CREATE NFA FROM GRAMMAR, CREATE DFA FROM NFA
+		target.performGrammarAnalysis(this, grammar);
+
+		// some grammar analysis errors will not yield reliable DFA
+		if ( ErrorManager.doNotAttemptCodeGen() ) {
+			return null;
+		}
+
+		// OPTIMIZE DFA
+		DFAOptimizer optimizer = new DFAOptimizer(grammar);
+		optimizer.optimize();
+
+		// OUTPUT FILE (contains recognizerST)
+		outputFileST = templates.getInstanceOf("outputFile");
+
+		// HEADER FILE
+		if ( templates.isDefined("headerFile") ) {
+			headerFileST = templates.getInstanceOf("headerFile");
+		}
+		else {
+			// create a dummy to avoid null-checks all over code generator
+			headerFileST = new StringTemplate(templates,"");
+			headerFileST.setName("dummy-header-file");
+		}
+
+		boolean filterMode = grammar.getOption("filter")!=null &&
+							  grammar.getOption("filter").equals("true");
+		boolean canBacktrack = grammar.getSyntacticPredicates()!=null ||
+							   filterMode;
+
+		// TODO: move this down further because generating the recognizer
+		// alters the model with info on who uses predefined properties etc...
+		// The actions here might refer to something.
+
+		// The only two possible output files are available at this point.
+		// Verify action scopes are ok for target and dump actions into output
+		// Templates can say <actions.parser.header> for example.
+		Map actions = grammar.getActions();
+		verifyActionScopesOkForTarget(actions);
+		// translate $x::y references
+		translateActionAttributeReferences(actions);
+		Map actionsForGrammarScope =
+			(Map)actions.get(grammar.getDefaultActionScope(grammar.type));
+		if ( filterMode &&
+			 (actionsForGrammarScope==null ||
+			 !actionsForGrammarScope.containsKey(Grammar.SYNPREDGATE_ACTION_NAME)) )
+		{
+			// if filtering, we need to set actions to execute at backtracking
+			// level 1 not 0.  Don't set this action if a user has though
+			StringTemplate gateST = templates.getInstanceOf("filteringActionGate");
+			if ( actionsForGrammarScope==null ) {
+				actionsForGrammarScope=new HashMap();
+				actions.put(grammar.getDefaultActionScope(grammar.type),
+							actionsForGrammarScope);
+			}
+			actionsForGrammarScope.put(Grammar.SYNPREDGATE_ACTION_NAME,
+									   gateST);
+		}
+		headerFileST.setAttribute("actions", actions);
+		outputFileST.setAttribute("actions", actions);
+
+		headerFileST.setAttribute("buildTemplate", new Boolean(grammar.buildTemplate()));
+		outputFileST.setAttribute("buildTemplate", new Boolean(grammar.buildTemplate()));
+		headerFileST.setAttribute("buildAST", new Boolean(grammar.buildAST()));
+		outputFileST.setAttribute("buildAST", new Boolean(grammar.buildAST()));
+
+		String rewrite = (String)grammar.getOption("rewrite");
+		outputFileST.setAttribute("rewrite",
+								  Boolean.valueOf(rewrite!=null&&rewrite.equals("true")));
+		headerFileST.setAttribute("rewrite",
+								  Boolean.valueOf(rewrite!=null&&rewrite.equals("true")));
+
+		outputFileST.setAttribute("backtracking", Boolean.valueOf(canBacktrack));
+		headerFileST.setAttribute("backtracking", Boolean.valueOf(canBacktrack));
+		String memoize = (String)grammar.getOption("memoize");
+		outputFileST.setAttribute("memoize",
+								  Boolean.valueOf(memoize!=null&&memoize.equals("true")&&
+									          canBacktrack));
+		headerFileST.setAttribute("memoize",
+								  Boolean.valueOf(memoize!=null&&memoize.equals("true")&&
+									          canBacktrack));
+
+
+		outputFileST.setAttribute("trace", Boolean.valueOf(trace));
+		headerFileST.setAttribute("trace", Boolean.valueOf(trace));
+
+		outputFileST.setAttribute("profile", Boolean.valueOf(profile));
+		headerFileST.setAttribute("profile", Boolean.valueOf(profile));
+
+		// RECOGNIZER
+		if ( grammar.type==Grammar.LEXER ) {
+			recognizerST = templates.getInstanceOf("lexer");
+			outputFileST.setAttribute("LEXER", Boolean.valueOf(true));
+			headerFileST.setAttribute("LEXER", Boolean.valueOf(true));
+			recognizerST.setAttribute("filterMode",
+									  Boolean.valueOf(filterMode));
+		}
+		else if ( grammar.type==Grammar.PARSER ||
+			grammar.type==Grammar.COMBINED )
+		{
+			recognizerST = templates.getInstanceOf("parser");
+			outputFileST.setAttribute("PARSER", Boolean.valueOf(true));
+			headerFileST.setAttribute("PARSER", Boolean.valueOf(true));
+		}
+		else {
+			recognizerST = templates.getInstanceOf("treeParser");
+			outputFileST.setAttribute("TREE_PARSER", Boolean.valueOf(true));
+			headerFileST.setAttribute("TREE_PARSER", Boolean.valueOf(true));
+		}
+		outputFileST.setAttribute("recognizer", recognizerST);
+		headerFileST.setAttribute("recognizer", recognizerST);
+		outputFileST.setAttribute("actionScope",
+								  grammar.getDefaultActionScope(grammar.type));
+		headerFileST.setAttribute("actionScope",
+								  grammar.getDefaultActionScope(grammar.type));
+
+		String targetAppropriateFileNameString =
+			target.getTargetStringLiteralFromString(grammar.getFileName());
+		outputFileST.setAttribute("fileName", targetAppropriateFileNameString);
+		headerFileST.setAttribute("fileName", targetAppropriateFileNameString);
+		outputFileST.setAttribute("ANTLRVersion", Tool.VERSION);
+		headerFileST.setAttribute("ANTLRVersion", Tool.VERSION);
+		outputFileST.setAttribute("generatedTimestamp", Tool.getCurrentTimeStamp());
+		headerFileST.setAttribute("generatedTimestamp", Tool.getCurrentTimeStamp());
+
+		// GENERATE RECOGNIZER
+		// Walk the AST holding the input grammar, this time generating code
+		// Decisions are generated by using the precomputed DFAs
+		// Fill in the various templates with data
+		CodeGenTreeWalker gen = new CodeGenTreeWalker();
+		try {
+			gen.grammar((AST)grammar.getGrammarTree(),
+						grammar,
+						recognizerST,
+						outputFileST,
+						headerFileST);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+		genTokenTypeConstants(recognizerST);
+		genTokenTypeConstants(outputFileST);
+		genTokenTypeConstants(headerFileST);
+
+		if ( grammar.type!=Grammar.LEXER ) {
+			genTokenTypeNames(recognizerST);
+			genTokenTypeNames(outputFileST);
+			genTokenTypeNames(headerFileST);
+		}
+
+		// Now that we know what synpreds are used, we can set into template
+		Set synpredNames = null;
+		if ( grammar.synPredNamesUsedInDFA.size()>0 ) {
+			synpredNames = grammar.synPredNamesUsedInDFA;
+		}
+		outputFileST.setAttribute("synpreds", synpredNames);
+		headerFileST.setAttribute("synpreds", synpredNames);
+		
+		// all recognizers can see Grammar object
+		recognizerST.setAttribute("grammar", grammar);
+
+		// WRITE FILES
+		try {
+			target.genRecognizerFile(tool,this,grammar,outputFileST);
+			if ( templates.isDefined("headerFile") ) {
+				StringTemplate extST = templates.getInstanceOf("headerFileExtension");
+				target.genRecognizerHeaderFile(tool,this,grammar,headerFileST,extST.toString());
+			}
+			// write out the vocab interchange file; used by antlr,
+			// does not change per target
+			StringTemplate tokenVocabSerialization = genTokenVocabOutput();
+			String vocabFileName = getVocabFileName();
+			if ( vocabFileName!=null ) {
+				write(tokenVocabSerialization, vocabFileName);
+			}
+			//System.out.println(outputFileST.getDOTForDependencyGraph(false));
+		}
+		catch (IOException ioe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE,
+							   getVocabFileName(),
+							   ioe);
+		}
+		/*
+		System.out.println("num obj.prop refs: "+ ASTExpr.totalObjPropRefs);
+		System.out.println("num reflection lookups: "+ ASTExpr.totalReflectionLookups);
+		*/
+
+		return outputFileST;
+	}
+
+	/** Some targets will have some extra scopes like C++ may have
+	 *  '@headerfile:name {action}' or something.  Make sure the
+	 *  target likes the scopes in action table.
+	 */
+	protected void verifyActionScopesOkForTarget(Map actions) {
+		Set actionScopeKeySet = actions.keySet();
+		for (Iterator it = actionScopeKeySet.iterator(); it.hasNext();) {
+			String scope = (String)it.next();
+			if ( !target.isValidActionScope(grammar.type, scope) ) {
+				// get any action from the scope to get error location
+				Map scopeActions = (Map)actions.get(scope);
+				GrammarAST actionAST =
+					(GrammarAST)scopeActions.values().iterator().next();
+				ErrorManager.grammarError(
+					ErrorManager.MSG_INVALID_ACTION_SCOPE,grammar,
+					actionAST.getToken(),scope,
+					Grammar.grammarTypeToString[grammar.type]);
+			}
+		}
+	}
+
+	/** Actions may reference $x::y attributes, call translateAction on
+	 *  each action and replace that action in the Map.
+	 */
+	protected void translateActionAttributeReferences(Map actions) {
+		Set actionScopeKeySet = actions.keySet();
+		for (Iterator it = actionScopeKeySet.iterator(); it.hasNext();) {
+			String scope = (String)it.next();
+			Map scopeActions = (Map)actions.get(scope);
+			translateActionAttributeReferencesForSingleScope(null,scopeActions);
+		}
+	}
+
+	/** Use for translating rule @init{...} actions that have no scope */
+	protected void translateActionAttributeReferencesForSingleScope(
+		Rule r,
+		Map scopeActions)
+	{
+		String ruleName=null;
+		if ( r!=null ) {
+			ruleName = r.name;
+		}
+		Set actionNameSet = scopeActions.keySet();
+		for (Iterator nameIT = actionNameSet.iterator(); nameIT.hasNext();) {
+			String name = (String) nameIT.next();
+			GrammarAST actionAST = (GrammarAST)scopeActions.get(name);
+			List chunks = translateAction(ruleName,actionAST);
+			scopeActions.put(name, chunks); // replace with translation
+		}
+	}
+
+	/** Error recovery in ANTLR recognizers.
+	 *
+	 *  Based upon original ideas:
+	 *
+	 *  Algorithms + Data Structures = Programs by Niklaus Wirth
+	 *
+	 *  and
+	 *
+	 *  A note on error recovery in recursive descent parsers:
+	 *  http://portal.acm.org/citation.cfm?id=947902.947905
+	 *
+	 *  Later, Josef Grosch had some good ideas:
+	 *  Efficient and Comfortable Error Recovery in Recursive Descent Parsers:
+	 *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+	 *
+	 *  Like Grosch I implemented local FOLLOW sets that are combined at run-time
+	 *  upon error to avoid parsing overhead.
+	 */
+	public void generateLocalFOLLOW(GrammarAST referencedElementNode,
+									String referencedElementName,
+									String enclosingRuleName,
+									int elementIndex)
+	{
+		NFAState followingNFAState = referencedElementNode.followingNFAState;
+/*
+		System.out.print("compute FOLLOW "+referencedElementNode.toString()+
+						 " for "+referencedElementName+"#"+elementIndex +" in "+
+						 enclosingRuleName+
+						 " line="+referencedElementNode.getLine());
+*/
+		LookaheadSet follow = null;
+		if ( followingNFAState!=null ) {
+			follow = grammar.LOOK(followingNFAState);
+		}
+
+		if ( follow==null ) {
+			ErrorManager.internalError("no follow state or cannot compute follow");
+			follow = new LookaheadSet();
+		}
+		//System.out.println(" "+follow);
+
+        List tokenTypeList = null;
+        long[] words = null;
+		if ( follow.tokenTypeSet==null ) {
+			words = new long[1];
+            tokenTypeList = new ArrayList();
+        }
+		else {
+			BitSet bits = BitSet.of(follow.tokenTypeSet);
+			words = bits.toPackedArray();
+            tokenTypeList = follow.tokenTypeSet.toList();
+        }
+		// use the target to convert to hex strings (typically)
+		String[] wordStrings = new String[words.length];
+		for (int j = 0; j < words.length; j++) {
+			long w = words[j];
+			wordStrings[j] = target.getTarget64BitStringFromValue(w);
+		}
+        recognizerST.setAttribute("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
+                referencedElementName,
+                enclosingRuleName,
+                wordStrings,
+                tokenTypeList,
+                Utils.integer(elementIndex));
+        outputFileST.setAttribute("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
+                referencedElementName,
+                enclosingRuleName,
+                wordStrings,
+                tokenTypeList,
+                Utils.integer(elementIndex));
+        headerFileST.setAttribute("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
+                referencedElementName,
+                enclosingRuleName,
+                wordStrings,
+                tokenTypeList,
+                Utils.integer(elementIndex));
+	}
+
+	// L O O K A H E A D  D E C I S I O N  G E N E R A T I O N
+
+	/** Generate code that computes the predicted alt given a DFA.  The
+	 *  recognizerST can be either the main generated recognizerTemplate
+	 *  for storage in the main parser file or a separate file.  It's up to
+	 *  the code that ultimately invokes the codegen.g grammar rule.
+	 *
+	 *  Regardless, the output file and header file get a copy of the DFAs.
+	 */
+	public StringTemplate genLookaheadDecision(StringTemplate recognizerST,
+											   DFA dfa)
+	{
+		StringTemplate decisionST;
+		// If we are doing inline DFA and this one is acyclic and LL(*)
+		// I have to check for is-non-LL(*) because if non-LL(*) the cyclic
+		// check is not done by DFA.verify(); that is, verify() avoids
+		// doesStateReachAcceptState() if non-LL(*)
+		if ( dfa.canInlineDecision() ) {
+			decisionST =
+				acyclicDFAGenerator.genFixedLookaheadDecision(getTemplates(), dfa);
+		}
+		else {
+			// generate any kind of DFA here (cyclic or acyclic)
+			dfa.createStateTables(this);
+			outputFileST.setAttribute("cyclicDFAs", dfa);
+			headerFileST.setAttribute("cyclicDFAs", dfa);
+			decisionST = templates.getInstanceOf("dfaDecision");
+			String description = dfa.getNFADecisionStartState().getDescription();
+			description = target.getTargetStringLiteralFromString(description);
+			if ( description!=null ) {
+				decisionST.setAttribute("description", description);
+			}
+			decisionST.setAttribute("decisionNumber",
+									Utils.integer(dfa.getDecisionNumber()));
+		}
+		return decisionST;
+	}
+
+	/** A special state is huge (too big for state tables) or has a predicated
+	 *  edge.  Generate a simple if-then-else.  Cannot be an accept state as
+	 *  they have no emanating edges.  Don't worry about switch vs if-then-else
+	 *  because if you get here, the state is super complicated and needs an
+	 *  if-then-else.  This is used by the new DFA scheme created June 2006.
+	 */
+	public StringTemplate generateSpecialState(DFAState s) {
+		StringTemplate stateST;
+		stateST = templates.getInstanceOf("cyclicDFAState");
+		stateST.setAttribute("needErrorClause", Boolean.valueOf(true));
+		stateST.setAttribute("semPredState",
+							 Boolean.valueOf(s.isResolvedWithPredicates()));
+		stateST.setAttribute("stateNumber", s.stateNumber);
+		stateST.setAttribute("decisionNumber", s.dfa.decisionNumber);
+
+		boolean foundGatedPred = false;
+		StringTemplate eotST = null;
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) s.transition(i);
+			StringTemplate edgeST;
+			if ( edge.label.getAtom()==Label.EOT ) {
+				// this is the default clause; has to held until last
+				edgeST = templates.getInstanceOf("eotDFAEdge");
+				stateST.removeAttribute("needErrorClause");
+				eotST = edgeST;
+			}
+			else {
+				edgeST = templates.getInstanceOf("cyclicDFAEdge");
+				StringTemplate exprST =
+					genLabelExpr(templates,edge,1);
+				edgeST.setAttribute("labelExpr", exprST);
+			}
+			edgeST.setAttribute("edgeNumber", Utils.integer(i+1));
+			edgeST.setAttribute("targetStateNumber",
+								 Utils.integer(edge.target.stateNumber));
+			// stick in any gated predicates for any edge if not already a pred
+			if ( !edge.label.isSemanticPredicate() ) {
+				DFAState t = (DFAState)edge.target;
+				SemanticContext preds =	t.getGatedPredicatesInNFAConfigurations();
+				if ( preds!=null ) {
+					foundGatedPred = true;
+					StringTemplate predST = preds.genExpr(this,
+														  getTemplates(),
+														  t.dfa);
+					edgeST.setAttribute("predicates", predST.toString());
+				}
+			}
+			if ( edge.label.getAtom()!=Label.EOT ) {
+				stateST.setAttribute("edges", edgeST);
+			}
+		}
+		if ( foundGatedPred ) {
+			// state has >= 1 edge with a gated pred (syn or sem)
+			// must rewind input first, set flag.
+			stateST.setAttribute("semPredState", new Boolean(foundGatedPred));
+		}
+		if ( eotST!=null ) {
+			stateST.setAttribute("edges", eotST);
+		}
+		return stateST;
+	}
+
+	/** Generate an expression for traversing an edge. */
+	protected StringTemplate genLabelExpr(StringTemplateGroup templates,
+										  Transition edge,
+										  int k)
+	{
+		Label label = edge.label;
+		if ( label.isSemanticPredicate() ) {
+			return genSemanticPredicateExpr(templates, edge);
+		}
+		if ( label.isSet() ) {
+			return genSetExpr(templates, label.getSet(), k, true);
+		}
+		// must be simple label
+		StringTemplate eST = templates.getInstanceOf("lookaheadTest");
+		eST.setAttribute("atom", getTokenTypeAsTargetLabel(label.getAtom()));
+		eST.setAttribute("atomAsInt", Utils.integer(label.getAtom()));
+		eST.setAttribute("k", Utils.integer(k));
+		return eST;
+	}
+
+	protected StringTemplate genSemanticPredicateExpr(StringTemplateGroup templates,
+													  Transition edge)
+	{
+		DFA dfa = ((DFAState)edge.target).dfa; // which DFA are we in
+		Label label = edge.label;
+		SemanticContext semCtx = label.getSemanticContext();
+		return semCtx.genExpr(this,templates,dfa);
+	}
+
+	/** For intervals such as [3..3, 30..35], generate an expression that
+	 *  tests the lookahead similar to LA(1)==3 || (LA(1)>=30&&LA(1)<=35)
+	 */
+	public StringTemplate genSetExpr(StringTemplateGroup templates,
+									 IntSet set,
+									 int k,
+									 boolean partOfDFA)
+	{
+		if ( !(set instanceof IntervalSet) ) {
+			throw new IllegalArgumentException("unable to generate expressions for non IntervalSet objects");
+		}
+		IntervalSet iset = (IntervalSet)set;
+		if ( iset.getIntervals()==null || iset.getIntervals().size()==0 ) {
+			StringTemplate emptyST = new StringTemplate(templates, "");
+			emptyST.setName("empty-set-expr");
+			return emptyST;
+		}
+		String testSTName = "lookaheadTest";
+		String testRangeSTName = "lookaheadRangeTest";
+		if ( !partOfDFA ) {
+			testSTName = "isolatedLookaheadTest";
+			testRangeSTName = "isolatedLookaheadRangeTest";
+		}
+		StringTemplate setST = templates.getInstanceOf("setTest");
+		Iterator iter = iset.getIntervals().iterator();
+		int rangeNumber = 1;
+		while (iter.hasNext()) {
+			Interval I = (Interval) iter.next();
+			int a = I.a;
+			int b = I.b;
+			StringTemplate eST;
+			if ( a==b ) {
+				eST = templates.getInstanceOf(testSTName);
+				eST.setAttribute("atom", getTokenTypeAsTargetLabel(a));
+				eST.setAttribute("atomAsInt", Utils.integer(a));
+				//eST.setAttribute("k",Utils.integer(k));
+			}
+			else {
+				eST = templates.getInstanceOf(testRangeSTName);
+				eST.setAttribute("lower",getTokenTypeAsTargetLabel(a));
+				eST.setAttribute("lowerAsInt", Utils.integer(a));
+				eST.setAttribute("upper",getTokenTypeAsTargetLabel(b));
+				eST.setAttribute("upperAsInt", Utils.integer(b));
+				eST.setAttribute("rangeNumber",Utils.integer(rangeNumber));
+			}
+			eST.setAttribute("k",Utils.integer(k));
+			setST.setAttribute("ranges", eST);
+			rangeNumber++;
+		}
+		return setST;
+	}
+
+	// T O K E N  D E F I N I T I O N  G E N E R A T I O N
+
+	/** Set attributes tokens and literals attributes in the incoming
+	 *  code template.  This is not the token vocab interchange file, but
+	 *  rather a list of token type ID needed by the recognizer.
+	 */
+	protected void genTokenTypeConstants(StringTemplate code) {
+		// make constants for the token types
+		Iterator tokenIDs = grammar.getTokenIDs().iterator();
+		while (tokenIDs.hasNext()) {
+			String tokenID = (String) tokenIDs.next();
+			int tokenType = grammar.getTokenType(tokenID);
+			if ( tokenType==Label.EOF ||
+				 tokenType>=Label.MIN_TOKEN_TYPE )
+			{
+				// don't do FAUX labels 'cept EOF
+				code.setAttribute("tokens.{name,type}", tokenID, Utils.integer(tokenType));
+			}
+		}
+	}
+
+	/** Generate a token names table that maps token type to a printable
+	 *  name: either the label like INT or the literal like "begin".
+	 */
+	protected void genTokenTypeNames(StringTemplate code) {
+		for (int t=Label.MIN_TOKEN_TYPE; t<=grammar.getMaxTokenType(); t++) {
+			String tokenName = grammar.getTokenDisplayName(t);
+			if ( tokenName!=null ) {
+				tokenName=target.getTargetStringLiteralFromString(tokenName, true);
+				code.setAttribute("tokenNames", tokenName);
+			}
+		}
+	}
+
+	/** Get a meaningful name for a token type useful during code generation.
+	 *  Literals without associated names are converted to the string equivalent
+	 *  of their integer values. Used to generate x==ID and x==34 type comparisons
+	 *  etc...  Essentially we are looking for the most obvious way to refer
+	 *  to a token type in the generated code.  If in the lexer, return the
+	 *  char literal translated to the target language.  For example, ttype=10
+	 *  will yield '\n' from the getTokenDisplayName method.  That must
+	 *  be converted to the target languages literals.  For most C-derived
+	 *  languages no translation is needed.
+	 */
+	public String getTokenTypeAsTargetLabel(int ttype) {
+		if ( grammar.type==Grammar.LEXER ) {
+			String name = grammar.getTokenDisplayName(ttype);
+			return target.getTargetCharLiteralFromANTLRCharLiteral(this,name);
+		}
+		return target.getTokenTypeAsTargetLabel(this,ttype);
+	}
+
+	/** Generate a token vocab file with all the token names/types.  For example:
+	 *  ID=7
+	 *  FOR=8
+	 *  'for'=8
+	 *
+	 *  This is independent of the target language; used by antlr internally
+	 */
+	protected StringTemplate genTokenVocabOutput() {
+		StringTemplate vocabFileST =
+			new StringTemplate(vocabFilePattern,
+							   AngleBracketTemplateLexer.class);
+		vocabFileST.setName("vocab-file");
+		// make constants for the token names
+		Iterator tokenIDs = grammar.getTokenIDs().iterator();
+		while (tokenIDs.hasNext()) {
+			String tokenID = (String) tokenIDs.next();
+			int tokenType = grammar.getTokenType(tokenID);
+			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
+				vocabFileST.setAttribute("tokens.{name,type}", tokenID, Utils.integer(tokenType));
+			}
+		}
+
+		// now dump the strings
+		Iterator literals = grammar.getStringLiterals().iterator();
+		while (literals.hasNext()) {
+			String literal = (String) literals.next();
+			int tokenType = grammar.getTokenType(literal);
+			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
+				vocabFileST.setAttribute("tokens.{name,type}", literal, Utils.integer(tokenType));
+			}
+		}
+
+		return vocabFileST;
+	}
+
+	public List translateAction(String ruleName,
+								GrammarAST actionTree)
+	{
+		if ( actionTree.getType()==ANTLRParser.ARG_ACTION ) {
+			return translateArgAction(ruleName, actionTree);
+		}
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(this,ruleName,actionTree);
+		List chunks = translator.translateToChunks();
+		chunks = target.postProcessAction(chunks, actionTree.token);
+		return chunks;
+	}
+
+	/** Translate an action like [3,"foo",a[3]] and return a List of the
+	 *  translated actions.  Because actions are translated to a list of
+	 *  chunks, this returns List<List<String|StringTemplate>>.
+	 *
+	 *  Simple ',' separator is assumed.
+	 */
+	public List translateArgAction(String ruleName,
+								   GrammarAST actionTree)
+	{
+		String actionText = actionTree.token.getText();
+		StringTokenizer argTokens = new StringTokenizer(actionText, ",");
+		List args = new ArrayList();
+		while ( argTokens.hasMoreTokens() ) {
+			String arg = (String)argTokens.nextToken();
+			antlr.Token actionToken = new antlr.CommonToken(ANTLRParser.ACTION,arg);
+			ActionTranslatorLexer translator =
+				new ActionTranslatorLexer(this,ruleName,
+										  actionToken,
+										  actionTree.outerAltNum);
+			List chunks = translator.translateToChunks();
+			chunks = target.postProcessAction(chunks, actionToken);
+			args.add(chunks);
+		}
+		if ( args.size()==0 ) {
+			return null;
+		}
+		return args;
+	}
+
+	/** Given a template constructor action like %foo(a={...}) in
+	 *  an action, translate it to the appropriate template constructor
+	 *  from the templateLib. This translates a *piece* of the action.
+	 */
+	public StringTemplate translateTemplateConstructor(String ruleName,
+													   int outerAltNum,
+													   antlr.Token actionToken,
+													   String templateActionText)
+	{
+		// first, parse with antlr.g
+		//System.out.println("translate template: "+templateActionText);
+		ANTLRLexer lexer = new ANTLRLexer(new StringReader(templateActionText));
+		lexer.setFilename(grammar.getFileName());
+		lexer.setTokenObjectClass("antlr.TokenWithIndex");
+		TokenStreamRewriteEngine tokenBuffer = new TokenStreamRewriteEngine(lexer);
+		tokenBuffer.discard(ANTLRParser.WS);
+		tokenBuffer.discard(ANTLRParser.ML_COMMENT);
+		tokenBuffer.discard(ANTLRParser.COMMENT);
+		tokenBuffer.discard(ANTLRParser.SL_COMMENT);
+		ANTLRParser parser = new ANTLRParser(tokenBuffer);
+		parser.setFilename(grammar.getFileName());
+		parser.setASTNodeClass("org.antlr.tool.GrammarAST");
+		try {
+			parser.rewrite_template();
+		}
+		catch (RecognitionException re) {
+			ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
+										  grammar,
+										  actionToken,
+										  templateActionText);
+		}
+		catch (Exception tse) {
+			ErrorManager.internalError("can't parse template action",tse);
+		}
+		GrammarAST rewriteTree = (GrammarAST)parser.getAST();
+
+		// then translate via codegen.g
+		CodeGenTreeWalker gen = new CodeGenTreeWalker();
+		gen.init(grammar);
+		gen.currentRuleName = ruleName;
+		gen.outerAltNum = outerAltNum;
+		StringTemplate st = null;
+		try {
+			st = gen.rewrite_template((AST)rewriteTree);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+		return st;
+	}
+
+
+	public void issueInvalidScopeError(String x,
+									   String y,
+									   Rule enclosingRule,
+									   antlr.Token actionToken,
+									   int outerAltNum)
+	{
+		//System.out.println("error $"+x+"::"+y);
+		Rule r = grammar.getRule(x);
+		AttributeScope scope = grammar.getGlobalScope(x);
+		if ( scope==null ) {
+			if ( r!=null ) {
+				scope = r.ruleScope; // if not global, might be rule scope
+			}
+		}
+		if ( scope==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
+										  grammar,
+										  actionToken,
+										  x);
+		}
+		else if ( scope.getAttribute(y)==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE,
+										  grammar,
+										  actionToken,
+										  x,
+										  y);
+		}
+	}
+
+	public void issueInvalidAttributeError(String x,
+										   String y,
+										   Rule enclosingRule,
+										   antlr.Token actionToken,
+										   int outerAltNum)
+	{
+		//System.out.println("error $"+x+"."+y);
+		if ( enclosingRule==null ) {
+			// action not in a rule
+			ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE,
+										  grammar,
+										  actionToken,
+										  x,
+										  y);
+			return;
+		}
+
+		// action is in a rule
+		Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x);
+
+		if ( label!=null || enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ) {
+			// $rulelabel.attr or $ruleref.attr; must be unknown attr
+			String refdRuleName = x;
+			if ( label!=null ) {
+				refdRuleName = enclosingRule.getRuleLabel(x).referencedRuleName;
+			}
+			Rule refdRule = grammar.getRule(refdRuleName);
+			AttributeScope scope = refdRule.getAttributeScope(y);
+			if ( scope==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE,
+										  grammar,
+										  actionToken,
+										  refdRuleName,
+										  y);
+			}
+			else if ( scope.isParameterScope ) {
+				ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_PARAMETER_REF,
+										  grammar,
+										  actionToken,
+										  refdRuleName,
+										  y);
+			}
+			else if ( scope.isDynamicRuleScope ) {
+				ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF,
+										  grammar,
+										  actionToken,
+										  refdRuleName,
+										  y);
+			}
+		}
+
+	}
+
+	public void issueInvalidAttributeError(String x,
+										   Rule enclosingRule,
+										   antlr.Token actionToken,
+										   int outerAltNum)
+	{
+		//System.out.println("error $"+x);
+		if ( enclosingRule==null ) {
+			// action not in a rule
+			ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE,
+										  grammar,
+										  actionToken,
+										  x);
+			return;
+		}
+
+		// action is in a rule
+		Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x);
+		AttributeScope scope = enclosingRule.getAttributeScope(x);
+
+		if ( label!=null ||
+			 enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ||
+			 enclosingRule.name.equals(x) )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_SCOPE,
+										  grammar,
+										  actionToken,
+										  x);
+		}
+		else if ( scope!=null && scope.isDynamicRuleScope ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE,
+										  grammar,
+										  actionToken,
+										  x);
+		}
+		else {
+			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE,
+									  grammar,
+									  actionToken,
+									  x);
+		}
+	}
+
+	// M I S C
+
+	public StringTemplateGroup getTemplates() {
+		return templates;
+	}
+
+	public StringTemplateGroup getBaseTemplates() {
+		return baseTemplates;
+	}
+
+	public void setDebug(boolean debug) {
+		this.debug = debug;
+	}
+
+	public void setTrace(boolean trace) {
+		this.trace = trace;
+	}
+
+	public void setProfile(boolean profile) {
+		this.profile = profile;
+		if ( profile ) {
+			setDebug(true); // requires debug events
+		}
+	}
+
+	public StringTemplate getRecognizerST() {
+		return outputFileST;
+	}
+
+	public String getRecognizerFileName(String name, int type) {
+		StringTemplate extST = templates.getInstanceOf("codeFileExtension");
+		String suffix = Grammar.grammarTypeToFileNameSuffix[type];
+		return name+suffix+extST.toString();
+	}
+
+	/** What is the name of the vocab file generated for this grammar?
+	 *  Returns null if no .tokens file should be generated.
+	 */
+	public String getVocabFileName() {
+		if ( grammar.isBuiltFromString() ) {
+			return null;
+		}
+		return grammar.name+VOCAB_FILE_EXTENSION;
+	}
+
+	public void write(StringTemplate code, String fileName) throws IOException {
+		long start = System.currentTimeMillis();
+		Writer w = tool.getOutputFile(grammar, fileName);
+		// Write the output to a StringWriter
+		StringTemplateWriter wr = templates.getStringTemplateWriter(w);
+		wr.setLineWidth(lineWidth);
+		code.write(wr);
+		w.close();
+		long stop = System.currentTimeMillis();
+		//System.out.println("render time for "+fileName+": "+(int)(stop-start)+"ms");
+	}
+
+	/** You can generate a switch rather than if-then-else for a DFA state
+	 *  if there are no semantic predicates and the number of edge label
+	 *  values is small enough; e.g., don't generate a switch for a state
+	 *  containing an edge label such as 20..52330 (the resulting byte codes
+	 *  would overflow the method 65k limit probably).
+	 */
+	protected boolean canGenerateSwitch(DFAState s) {
+		if ( !GENERATE_SWITCHES_WHEN_POSSIBLE ) {
+			return false;
+		}
+		int size = 0;
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) s.transition(i);
+			if ( edge.label.isSemanticPredicate() ) {
+				return false;
+			}
+			// can't do a switch if the edges are going to require predicates
+			if ( edge.label.getAtom()==Label.EOT ) {
+				int EOTPredicts = ((DFAState)edge.target).getUniquelyPredictedAlt();
+				if ( EOTPredicts==NFA.INVALID_ALT_NUMBER ) {
+					// EOT target has to be a predicate then; no unique alt
+					return false;
+				}
+			}
+			// if target is a state with gated preds, we need to use preds on
+			// this edge then to reach it.
+			if ( ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null ) {
+				return false;
+			}
+			size += edge.label.getSet().size();
+		}
+		if ( s.getNumberOfTransitions()<MIN_SWITCH_ALTS ||
+			 size>MAX_SWITCH_CASE_LABELS ) {
+			return false;
+		}
+		return true;
+	}
+
+	/** Create a label to track a token / rule reference's result.
+	 *  Technically, this is a place where I break model-view separation
+	 *  as I am creating a variable name that could be invalid in a
+	 *  target language, however, label ::= <ID><INT> is probably ok in
+	 *  all languages we care about.
+	 */
+	public String createUniqueLabel(String name) {
+		return new StringBuffer()
+			.append(name).append(uniqueLabelNumber++).toString();
+	}
+}
diff --git a/src/org/antlr/codegen/JavaTarget.java b/src/org/antlr/codegen/JavaTarget.java
new file mode 100644
index 0000000..b7eee8c
--- /dev/null
+++ b/src/org/antlr/codegen/JavaTarget.java
@@ -0,0 +1,44 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+public class JavaTarget extends Target {
+	protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+													 CodeGenerator generator,
+													 Grammar grammar,
+													 StringTemplate recognizerST,
+													 StringTemplate cyclicDFAST)
+	{
+		return recognizerST;
+	}
+}
+
diff --git a/src/org/antlr/codegen/ObjCTarget.java b/src/org/antlr/codegen/ObjCTarget.java
new file mode 100644
index 0000000..9a87b30
--- /dev/null
+++ b/src/org/antlr/codegen/ObjCTarget.java
@@ -0,0 +1,109 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005 Terence Parr
+ Copyright (c) 2006 Kay Roepke (Objective-C runtime)
+ All rights reserved.
+ 
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+ 
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+import org.antlr.Tool;
+import org.antlr.misc.Utils;
+
+import java.io.IOException;
+
+public class ObjCTarget extends Target {
+	protected void genRecognizerHeaderFile(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   StringTemplate headerFileST,
+										   String extName)
+	throws IOException
+	{
+		generator.write(headerFileST, grammar.name + Grammar.grammarTypeToFileNameSuffix[grammar.type] + extName);
+	}
+
+	public String getTargetCharLiteralFromANTLRCharLiteral(CodeGenerator generator,
+														   String literal)
+	{
+		if  (literal.startsWith("'\\u") ) {
+			literal = "0x" +literal.substring(3, 7);
+		} else	{
+			int c = literal.charAt(1); // TJP
+			if  (c < 32 || c > 127) {
+				literal  =  "0x" + Integer.toHexString(c);
+			}
+		}
+
+		return literal;
+	}
+
+	/** Convert from an ANTLR string literal found in a grammar file to
+	*  an equivalent string literal in the target language.  For Java, this
+	*  is the translation 'a\n"' -> "a\n\"".  Expect single quotes
+	*  around the incoming literal.  Just flip the quotes and replace
+	*  double quotes with \"
+	*/
+	public String getTargetStringLiteralFromANTLRStringLiteral(CodeGenerator generator,
+															   String literal)
+	{
+		literal = Utils.replace(literal,"\"","\\\"");
+		StringBuffer buf = new StringBuffer(literal);
+		buf.setCharAt(0,'"');
+		buf.setCharAt(literal.length()-1,'"');
+		buf.insert(0,'@');
+		return buf.toString();
+	}
+
+	/** If we have a label, prefix it with the recognizer's name */
+	public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype) {
+		String name = generator.grammar.getTokenDisplayName(ttype);
+		// If name is a literal, return the token type instead
+		if ( name.charAt(0)=='\'' ) {
+			return String.valueOf(ttype);
+		}
+		return generator.grammar.name + Grammar.grammarTypeToFileNameSuffix[generator.grammar.type] + "_" + name;
+		//return super.getTokenTypeAsTargetLabel(generator, ttype);
+		//return this.getTokenTextAndTypeAsTargetLabel(generator, null, ttype);
+	}
+
+	/** Target must be able to override the labels used for token types. Sometimes also depends on the token text.*/
+	public String getTokenTextAndTypeAsTargetLabel(CodeGenerator generator, String text, int tokenType) {
+		String name = generator.grammar.getTokenDisplayName(tokenType);
+		// If name is a literal, return the token type instead
+		if ( name.charAt(0)=='\'' ) {
+			return String.valueOf(tokenType);
+		}
+		String textEquivalent = text == null ? name : text;
+		if (textEquivalent.charAt(0) >= '0' && textEquivalent.charAt(0) <= '9') {
+			return textEquivalent;
+		} else {
+			return generator.grammar.name + Grammar.grammarTypeToFileNameSuffix[generator.grammar.type] + "_" + textEquivalent;
+		}
+	}
+
+}
+
diff --git a/src/org/antlr/codegen/PythonTarget.java b/src/org/antlr/codegen/PythonTarget.java
new file mode 100644
index 0000000..2d095f6
--- /dev/null
+++ b/src/org/antlr/codegen/PythonTarget.java
@@ -0,0 +1,217 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005 Martin Traverso
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+
+Please excuse my obvious lack of Java experience. The code here is probably
+full of WTFs - though IMHO Java is the Real WTF(TM) here...
+
+ */
+
+package org.antlr.codegen;
+import java.util.*;
+
+public class PythonTarget extends Target {
+    /** Target must be able to override the labels used for token types */
+    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
+					    int ttype) {
+	// use ints for predefined types;
+	// <invalid> <EOR> <DOWN> <UP>
+	if ( ttype >= 0 && ttype <= 3 ) {
+	    return String.valueOf(ttype);
+	}
+
+	String name = generator.grammar.getTokenDisplayName(ttype);
+
+	// If name is a literal, return the token type instead
+	if ( name.charAt(0)=='\'' ) {
+	    return String.valueOf(ttype);
+	}
+
+	return name;
+    }
+
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+	return "u" + literal;
+    }
+
+    private List splitLines(String text) {
+		ArrayList l = new ArrayList();
+		int idx = 0;
+
+		while ( true ) {
+			int eol = text.indexOf("\n", idx);
+			if ( eol == -1 ) {
+				l.add(text.substring(idx));
+				break;
+			}
+			else {
+				l.add(text.substring(idx, eol+1));
+				idx = eol+1;
+			}
+		}
+
+		return l;
+    }
+
+    public List postProcessAction(List chunks, antlr.Token actionToken) {
+		/* TODO
+		   - check for and report TAB usage
+		 */
+
+		//System.out.println("\n*** Action at " + actionToken.getLine() + ":" + actionToken.getColumn());
+
+		/* First I create a new list of chunks. String chunks are splitted into
+		   lines and some whitespace my be added at the beginning.
+
+		   As a result I get a list of chunks
+		   - where the first line starts at column 0
+		   - where every LF is at the end of a string chunk
+		*/
+
+		List nChunks = new ArrayList();
+		for (int i = 0; i < chunks.size(); i++) {
+			Object chunk = chunks.get(i);
+
+			if ( chunk instanceof String ) {
+				String text = (String)chunks.get(i);
+				if ( nChunks.size() == 0 && actionToken.getColumn() > 0 ) {
+					// first chunk and some 'virtual' WS at beginning
+					// prepend to this chunk
+
+					String ws = "";
+					for ( int j = 0 ; j < actionToken.getColumn() ; j++ ) {
+						ws += " ";
+					}
+					text = ws + text;
+				}
+
+				List parts = splitLines(text);
+				for ( int j = 0 ; j < parts.size() ; j++ ) {
+					chunk = parts.get(j);
+					nChunks.add(chunk);
+				}
+			}
+			else {
+				if ( nChunks.size() == 0 && actionToken.getColumn() > 0 ) {
+					// first chunk and some 'virtual' WS at beginning
+					// add as a chunk of its own
+
+					String ws = "";
+					for ( int j = 0 ; j < actionToken.getColumn() ; j++ ) {
+						ws += " ";
+					}
+					nChunks.add(ws);
+				}
+
+				nChunks.add(chunk);
+			}
+		}
+
+		int lineNo = actionToken.getLine();
+		int col = 0;
+
+		// strip trailing empty lines
+		int lastChunk = nChunks.size() - 1;
+		while ( lastChunk > 0
+				&& nChunks.get(lastChunk) instanceof String
+				&& ((String)nChunks.get(lastChunk)).trim().length() == 0 )
+			lastChunk--;
+
+		// string leading empty lines
+		int firstChunk = 0;
+		while ( firstChunk <= lastChunk
+				&& nChunks.get(firstChunk) instanceof String
+				&& ((String)nChunks.get(firstChunk)).trim().length() == 0
+				&& ((String)nChunks.get(firstChunk)).endsWith("\n") ) {
+			lineNo++;
+			firstChunk++;
+		}
+
+		int indent = -1;
+		for ( int i = firstChunk ; i <= lastChunk ; i++ ) {
+			Object chunk = nChunks.get(i);
+
+			//System.out.println(lineNo + ":" + col + " " + quote(chunk.toString()));
+
+			if ( chunk instanceof String ) {
+				String text = (String)chunk;
+
+				if ( col == 0 ) {
+					if ( indent == -1 ) {
+						// first non-blank line
+						// count number of leading whitespaces
+
+						indent = 0;
+						for ( int j = 0; j < text.length(); j++ ) {
+							if ( !Character.isWhitespace(text.charAt(j)) )
+								break;
+			
+							indent++;
+						}
+					}
+
+					if ( text.length() >= indent ) {
+						int j;
+						for ( j = 0; j < indent ; j++ ) {
+							if ( !Character.isWhitespace(text.charAt(j)) ) {
+								// should do real error reporting here...
+								System.err.println("Warning: badly indented line " + lineNo + " in action:");
+								System.err.println(text);
+								break;
+							}
+						}
+
+						nChunks.set(i, text.substring(j));
+					}
+					else if ( text.trim().length() > 0 ) {
+						// should do real error reporting here...
+						System.err.println("Warning: badly indented line " + lineNo + " in action:");
+						System.err.println(text);
+					}
+				}
+
+				if ( text.endsWith("\n") ) {
+					lineNo++;
+					col = 0;
+				}
+				else {
+					col += text.length();
+				}
+			}
+			else {
+				// not really correct, but all I need is col to increment...
+				col += 1;
+			}
+		}
+
+		return nChunks;
+    }
+}
diff --git a/src/org/antlr/codegen/RubyTarget.java b/src/org/antlr/codegen/RubyTarget.java
new file mode 100644
index 0000000..d40a74b
--- /dev/null
+++ b/src/org/antlr/codegen/RubyTarget.java
@@ -0,0 +1,73 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005 Martin Traverso
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.codegen;
+
+public class RubyTarget
+        extends Target
+{
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal)
+    {
+        literal = literal.substring(1, literal.length() - 1);
+
+        String result = "?";
+
+        if (literal.equals("\\")) {
+            result += "\\\\";
+        }
+        else if (literal.equals(" ")) {
+            result += "\\s";
+        }
+        else if (literal.startsWith("\\u")) {
+            result = "0x" + literal.substring(2);
+        }
+        else {
+            result += literal;
+        }
+
+        return result;
+    }
+
+    public int getMaxCharValue(CodeGenerator generator)
+    {
+        // we don't support unicode, yet.
+        return 0xFF;
+    }
+
+    public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype)
+    {
+        String name = generator.grammar.getTokenDisplayName(ttype);
+        // If name is a literal, return the token type instead
+        if ( name.charAt(0)=='\'' ) {
+            return generator.grammar.computeTokenNameFromLiteral(ttype, name);
+        }
+        return name;
+    }
+}
diff --git a/src/org/antlr/codegen/Target.java b/src/org/antlr/codegen/Target.java
new file mode 100644
index 0000000..2901f4e
--- /dev/null
+++ b/src/org/antlr/codegen/Target.java
@@ -0,0 +1,294 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.misc.Utils;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+import java.io.IOException;
+import java.util.List;
+
+/** The code generator for ANTLR can usually be retargeted just by providing
+ *  a new X.stg file for language X, however, sometimes the files that must
+ *  be generated vary enough that some X-specific functionality is required.
+ *  For example, in C, you must generate header files whereas in Java you do not.
+ *  Other languages may want to keep DFA separate from the main
+ *  generated recognizer file.
+ *
+ *  The notion of a Code Generator target abstracts out the creation
+ *  of the various files.  As new language targets get added to the ANTLR
+ *  system, this target class may have to be altered to handle more
+ *  functionality.  Eventually, just about all language generation issues
+ *  will be expressible in terms of these methods.
+ *
+ *  If org.antlr.codegen.XTarget class exists, it is used else
+ *  Target base class is used.  I am using a superclass rather than an
+ *  interface for this target concept because I can add functionality
+ *  later without breaking previously written targets (extra interface
+ *  methods would force adding dummy functions to all code generator
+ *  target classes).
+ *
+ */
+public class Target {
+
+	/** For pure strings of Java 16-bit unicode char, how can we display
+	 *  it in the target language as a literal.  Useful for dumping
+	 *  predicates and such that may refer to chars that need to be escaped
+	 *  when represented as strings.  Also, templates need to be escaped so
+	 *  that the target language can hold them as a string.
+	 *
+	 *  I have defined (via the constructor) the set of typical escapes,
+	 *  but your Target subclass is free to alter the translated chars or
+	 *  add more definitions.  This is nonstatic so each target can have
+	 *  a different set in memory at same time.
+	 */
+	protected String[] targetCharValueEscape = new String[255];
+
+	public Target() {
+		targetCharValueEscape['\n'] = "\\n";
+		targetCharValueEscape['\r'] = "\\r";
+		targetCharValueEscape['\t'] = "\\t";
+		targetCharValueEscape['\b'] = "\\b";
+		targetCharValueEscape['\f'] = "\\f";
+		targetCharValueEscape['\\'] = "\\\\";
+		targetCharValueEscape['\''] = "\\'";
+		targetCharValueEscape['"'] = "\\\"";
+	}
+
+	protected void genRecognizerFile(Tool tool,
+									 CodeGenerator generator,
+									 Grammar grammar,
+									 StringTemplate outputFileST)
+		throws IOException
+	{
+		String fileName =
+			generator.getRecognizerFileName(grammar.name, grammar.type);
+		generator.write(outputFileST, fileName);
+	}
+
+	protected void genRecognizerHeaderFile(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   StringTemplate headerFileST,
+										   String extName) // e.g., ".h"
+		throws IOException
+	{
+		// no header file by default
+	}
+
+	protected void performGrammarAnalysis(CodeGenerator generator,
+										  Grammar grammar)
+	{
+		// Build NFAs from the grammar AST
+		grammar.createNFAs();
+
+		// Create the DFA predictors for each decision
+		grammar.createLookaheadDFAs();
+	}
+
+	/** Is scope in @scope::name {action} valid for this kind of grammar?
+	 *  Targets like C++ may want to allow new scopes like headerfile or
+	 *  some such.  The action names themselves are not policed at the
+	 *  moment so targets can add template actions w/o having to recompile
+	 *  ANTLR.
+	 */
+	public boolean isValidActionScope(int grammarType, String scope) {
+		switch (grammarType) {
+			case Grammar.LEXER :
+				if ( scope.equals("lexer") ) {return true;}
+				break;
+			case Grammar.PARSER :
+				if ( scope.equals("parser") ) {return true;}
+				break;
+			case Grammar.COMBINED :
+				if ( scope.equals("parser") ) {return true;}
+				if ( scope.equals("lexer") ) {return true;}
+				break;
+			case Grammar.TREE_PARSER :
+				if ( scope.equals("treeparser") ) {return true;}
+				break;
+		}
+		return false;
+	}
+
+	/** Target must be able to override the labels used for token types */
+	public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype) {
+		String name = generator.grammar.getTokenDisplayName(ttype);
+		// If name is a literal, return the token type instead
+		if ( name.charAt(0)=='\'' ) {
+			return String.valueOf(ttype);
+		}
+		return name;
+	}
+
+	/** Convert from an ANTLR char literal found in a grammar file to
+	 *  an equivalent char literal in the target language.  For most
+	 *  languages, this means leaving 'x' as 'x'.  Actually, we need
+	 *  to escape '\u000A' so that it doesn't get converted to \n by
+	 *  the compiler.  Convert the literal to the char value and then
+	 *  to an appropriate target char literal.
+	 *
+	 *  Expect single quotes around the incoming literal.
+	 */
+	public String getTargetCharLiteralFromANTLRCharLiteral(
+		CodeGenerator generator,
+		String literal)
+	{
+		StringBuffer buf = new StringBuffer();
+		buf.append('\'');
+		int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+		if ( c<Label.MIN_CHAR_VALUE ) {
+			return "'\u0000'";
+		}
+		if ( c<targetCharValueEscape.length &&
+			 targetCharValueEscape[c]!=null )
+		{
+			buf.append(targetCharValueEscape[c]);
+		}
+		else if ( Character.UnicodeBlock.of((char)c)==
+				  Character.UnicodeBlock.BASIC_LATIN &&
+				  !Character.isISOControl((char)c) )
+		{
+			// normal char
+			buf.append((char)c);
+		}
+		else {
+			// must be something unprintable...use \\uXXXX
+			// turn on the bit above max "\\uFFFF" value so that we pad with zeros
+			// then only take last 4 digits
+			String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
+			buf.append("\\u");
+			buf.append(hex);
+		}
+
+		buf.append('\'');
+		return buf.toString();
+	}
+
+	/** Convert from an ANTLR string literal found in a grammar file to
+	 *  an equivalent string literal in the target language.  For Java, this
+	 *  is the translation 'a\n"' -> "a\n\"".  Expect single quotes
+	 *  around the incoming literal.  Just flip the quotes and replace
+	 *  double quotes with \"
+	 */
+	public String getTargetStringLiteralFromANTLRStringLiteral(
+		CodeGenerator generator,
+		String literal)
+	{
+		literal = Utils.replace(literal,"\"","\\\"");
+		StringBuffer buf = new StringBuffer(literal);
+		buf.setCharAt(0,'"');
+		buf.setCharAt(literal.length()-1,'"');
+		return buf.toString();
+	}
+
+	/** Given a random string of Java unicode chars, return a new string with
+	 *  optionally appropriate quote characters for target language and possibly
+	 *  with some escaped characters.  For example, if the incoming string has
+	 *  actual newline characters, the output of this method would convert them
+	 *  to the two char sequence \n for Java, C, C++, ...  The new string has
+	 *  double-quotes around it as well.  Example String in memory:
+	 *
+	 *     a"[newlinechar]b'c[carriagereturnchar]d[tab]e\f
+	 *
+	 *  would be converted to the valid Java s:
+	 *
+	 *     "a\"\nb'c\rd\te\\f"
+	 *
+	 *  or
+	 *
+	 *     a\"\nb'c\rd\te\\f
+	 *
+	 *  depending on the quoted arg.
+	 */
+	public String getTargetStringLiteralFromString(String s, boolean quoted) {
+		if ( s==null ) {
+			return null;
+		}
+		StringBuffer buf = new StringBuffer();
+		if ( quoted ) {
+			buf.append('"');
+		}
+		for (int i=0; i<s.length(); i++) {
+			int c = s.charAt(i);
+			if ( c!='\'' && // don't escape single quotes in strings for java
+				 c<targetCharValueEscape.length &&
+				 targetCharValueEscape[c]!=null )
+			{
+				buf.append(targetCharValueEscape[c]);
+			}
+			else {
+				buf.append((char)c);
+			}
+		}
+		if ( quoted ) {
+			buf.append('"');
+		}
+		return buf.toString();
+	}
+
+	public String getTargetStringLiteralFromString(String s) {
+		return getTargetStringLiteralFromString(s, false);
+	}
+
+	/** Convert long to 0xNNNNNNNNNNNNNNNN by default for spitting out
+	 *  with bitsets.  I.e., convert bytes to hex string.
+	 */
+	public String getTarget64BitStringFromValue(long word) {
+		int numHexDigits = 8*2;
+		StringBuffer buf = new StringBuffer(numHexDigits+2);
+		buf.append("0x");
+		String digits = Long.toHexString(word);
+		digits = digits.toUpperCase();
+		int padding = numHexDigits - digits.length();
+		// pad left with zeros
+		for (int i=1; i<=padding; i++) {
+			buf.append('0');
+		}
+		buf.append(digits);
+		return buf.toString();
+	}
+
+	/** Some targets only support ASCII or 8-bit chars/strings.  For example,
+	 *  C++ will probably want to return 0xFF here.
+	 */
+	public int getMaxCharValue(CodeGenerator generator) {
+		return Label.MAX_CHAR_VALUE;
+	}
+
+	/** Give target a chance to do some postprocessing on actions.
+	 *  Python for example will have to fix the indention.
+	 */
+	public List postProcessAction(List chunks, antlr.Token actionToken) {
+		return chunks;
+	}
+
+}
diff --git a/src/org/antlr/codegen/codegen.g b/src/org/antlr/codegen/codegen.g
new file mode 100644
index 0000000..f69ba45
--- /dev/null
+++ b/src/org/antlr/codegen/codegen.g
@@ -0,0 +1,1300 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.codegen;
+    import org.antlr.tool.*;
+    import org.antlr.analysis.*;
+    import org.antlr.misc.*;
+	import java.util.*;
+	import org.antlr.stringtemplate.*;
+    import antlr.TokenWithIndex;
+    import antlr.CommonToken;
+}
+
+/** Walk a grammar and generate code by gradually building up
+ *  a bigger and bigger StringTemplate.
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  June 15, 2004
+ */
+class CodeGenTreeWalker extends TreeParser;
+
+options {
+    // warning! ANTLR cannot see another directory to get vocabs, so I had
+    // to copy the ANTLRTokenTypes.txt file into this dir from ../tools!
+    // Yuck!  If you modify ../tools/antlr.g, make sure to copy the vocab here.
+	importVocab = ANTLR;
+    codeGenBitsetTestThreshold=999;
+    ASTLabelType=GrammarAST;
+}
+
+{
+	protected static final int RULE_BLOCK_NESTING_LEVEL = 0;
+	protected static final int OUTER_REWRITE_NESTING_LEVEL = 0;
+
+    protected String currentRuleName = null;
+    protected int blockNestingLevel = 0;
+    protected int rewriteBlockNestingLevel = 0;
+	protected int outerAltNum = 0;
+    protected StringTemplate currentBlockST = null;
+    protected boolean currentAltHasASTRewrite = false;
+    protected int rewriteTreeNestingLevel = 0;
+    protected Set rewriteRuleRefs = null;
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "codegen: "+ex.toString(),
+            ex);
+    }
+
+    public void reportError(String s) {
+        System.out.println("codegen: error: " + s);
+    }
+
+    protected CodeGenerator generator;
+    protected Grammar grammar;
+    protected StringTemplateGroup templates;
+
+    /** The overall lexer/parser template; simulate dynamically scoped
+     *  attributes by making this an instance var of the walker.
+     */
+    protected StringTemplate recognizerST;
+
+    protected StringTemplate outputFileST;
+    protected StringTemplate headerFileST;
+
+    protected String outputOption = "";
+
+	protected StringTemplate getWildcardST(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
+		String name = "wildcard";
+		if ( grammar.type==Grammar.LEXER ) {
+			name = "wildcardChar";
+		}
+		return getTokenElementST(name, name, elementAST, ast_suffix, label);
+	}
+
+	protected StringTemplate getRuleElementST(String name,
+										      String elementName,
+											  GrammarAST elementAST,
+    										  GrammarAST ast_suffix,
+    										  String label)
+	{
+		String suffix = getSTSuffix(ast_suffix,label);
+		name += suffix;
+		// if we're building trees and there is no label, gen a label
+		// unless we're in a synpred rule.
+		Rule r = grammar.getRule(currentRuleName);
+		if ( (grammar.buildAST()||suffix.length()>0) && label==null &&
+		     (r==null || !r.isSynPred) )
+		{
+			// we will need a label to do the AST or tracking, make one
+			label = generator.createUniqueLabel(elementName);
+			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
+			grammar.defineRuleRefLabel(currentRuleName, labelTok, elementAST);
+		}
+		StringTemplate elementST = templates.getInstanceOf(name);
+		if ( label!=null ) {
+			elementST.setAttribute("label", label);
+		}
+		return elementST;
+	}
+
+	protected StringTemplate getTokenElementST(String name,
+											   String elementName,
+											   GrammarAST elementAST,
+											   GrammarAST ast_suffix,
+											   String label)
+	{
+		String suffix = getSTSuffix(ast_suffix,label);
+		name += suffix;
+		// if we're building trees and there is no label, gen a label
+		// unless we're in a synpred rule.
+		Rule r = grammar.getRule(currentRuleName);
+		if ( (grammar.buildAST()||suffix.length()>0) && label==null &&
+		     (r==null || !r.isSynPred) )
+		{
+			label = generator.createUniqueLabel(elementName);
+			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
+			grammar.defineTokenRefLabel(currentRuleName, labelTok, elementAST);
+		}
+		StringTemplate elementST = templates.getInstanceOf(name);
+		if ( label!=null ) {
+			elementST.setAttribute("label", label);
+		}
+		return elementST;
+	}
+
+    public boolean isListLabel(String label) {
+		boolean hasListLabel=false;
+		if ( label!=null ) {
+			Rule r = grammar.getRule(currentRuleName);
+			String stName = null;
+			if ( r!=null ) {
+				Grammar.LabelElementPair pair = r.getLabel(label);
+				if ( pair!=null &&
+					 (pair.type==Grammar.TOKEN_LIST_LABEL||
+					  pair.type==Grammar.RULE_LIST_LABEL) )
+				{
+					hasListLabel=true;
+				}
+			}
+		}
+        return hasListLabel;
+    }
+
+	/** Return a non-empty template name suffix if the token is to be
+	 *  tracked, added to a tree, or both.
+	 */
+	protected String getSTSuffix(GrammarAST ast_suffix, String label) {
+		if ( grammar.type==Grammar.LEXER ) {
+			return "";
+		}
+		// handle list label stuff; make element use "Track"
+
+		String astPart = "";
+		String operatorPart = "";
+		String rewritePart = "";
+		String listLabelPart = "";
+		if ( grammar.buildAST() ) {
+			astPart = "AST";
+		}
+		if ( ast_suffix!=null ) {
+			if ( ast_suffix.getType()==ANTLRParser.ROOT ) {
+    			operatorPart = "RuleRoot";
+    		}
+    		else if ( ast_suffix.getType()==ANTLRParser.BANG ) {
+    			operatorPart = "Bang";
+    		}
+   		}
+		if ( currentAltHasASTRewrite ) {
+			rewritePart = "Track";
+		}
+		if ( isListLabel(label) ) {
+			listLabelPart = "AndListLabel";
+		}
+		String STsuffix = operatorPart+rewritePart+listLabelPart;
+		//System.out.println("suffix = "+STsuffix);
+
+    	return STsuffix;
+	}
+
+    /** Convert rewrite AST lists to target labels list */
+    protected List<String> getTokenTypesAsTargetLabels(Set<GrammarAST> refs) {
+        if ( refs==null || refs.size()==0 ) {
+            return null;
+        }
+        List<String> labels = new ArrayList<String>(refs.size());
+        for (GrammarAST t : refs) {
+            String label;
+            if ( t.getType()==ANTLRParser.RULE_REF ) {
+                label = t.getText();
+            }
+            else if ( t.getType()==ANTLRParser.LABEL ) {
+                label = t.getText();
+            }
+            else {
+                // must be char or string literal
+                label = generator.getTokenTypeAsTargetLabel(
+                            grammar.getTokenType(t.getText()));
+            }
+            labels.add(label);
+        }
+        return labels;
+    }
+
+    protected void init(Grammar g) {
+        this.grammar = g;
+        this.generator = grammar.getCodeGenerator();
+        this.templates = generator.getTemplates();
+    }
+}
+
+grammar[Grammar g,
+        StringTemplate recognizerST,
+        StringTemplate outputFileST,
+        StringTemplate headerFileST]
+{
+    init(g);
+    this.recognizerST = recognizerST;
+    this.outputFileST = outputFileST;
+    this.headerFileST = headerFileST;
+    String superClass = (String)g.getOption("superClass");
+    outputOption = (String)g.getOption("output");
+    recognizerST.setAttribute("superClass", superClass);
+    if ( g.type!=Grammar.LEXER ) {
+		recognizerST.setAttribute("ASTLabelType", g.getOption("ASTLabelType"));
+	}
+    if ( g.type==Grammar.TREE_PARSER && g.getOption("ASTLabelType")==null ) {
+		ErrorManager.grammarWarning(ErrorManager.MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR,
+								   g,
+								   null,
+								   g.name);
+	}
+    if ( g.type!=Grammar.TREE_PARSER ) {
+		recognizerST.setAttribute("labelType", g.getOption("TokenLabelType"));
+	}
+	recognizerST.setAttribute("numRules", grammar.getRules().size());
+	outputFileST.setAttribute("numRules", grammar.getRules().size());
+	headerFileST.setAttribute("numRules", grammar.getRules().size());
+}
+    :   ( #( LEXER_GRAMMAR grammarSpec )
+	    | #( PARSER_GRAMMAR grammarSpec )
+	    | #( TREE_GRAMMAR grammarSpec
+	       )
+	    | #( COMBINED_GRAMMAR grammarSpec )
+	    )
+    ;
+
+attrScope
+	:	#( "scope" ID ACTION )
+	;
+
+grammarSpec
+	:   name:ID
+		(cmt:DOC_COMMENT
+		 {
+		 outputFileST.setAttribute("docComment", #cmt.getText());
+		 headerFileST.setAttribute("docComment", #cmt.getText());
+		 }
+		)?
+		{
+		String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
+        String n = #name.getText()+suffix;
+		recognizerST.setAttribute("name", n);
+		outputFileST.setAttribute("name", n);
+		headerFileST.setAttribute("name", n);
+		recognizerST.setAttribute("scopes", grammar.getGlobalScopes());
+		headerFileST.setAttribute("scopes", grammar.getGlobalScopes());
+		}
+		( #(OPTIONS .) )?
+		( #(TOKENS .) )?
+        (attrScope)*
+        (AMPERSAND)*
+		rules[recognizerST]
+	;
+
+rules[StringTemplate recognizerST]
+{
+StringTemplate rST;
+}
+    :   (	(	{
+    			String ruleName = _t.getFirstChild().getText();
+    			Rule r = grammar.getRule(ruleName);
+    			}
+     		:
+     			// if synpred, only gen if used in a DFA
+    			{!r.isSynPred || grammar.synPredNamesUsedInDFA.contains(ruleName)}?
+    			rST=rule
+				{
+				if ( rST!=null ) {
+					recognizerST.setAttribute("rules", rST);
+					outputFileST.setAttribute("rules", rST);
+					headerFileST.setAttribute("rules", rST);
+				}
+				}
+    		|	RULE
+    		)
+   		)+
+    ;
+
+rule returns [StringTemplate code=null]
+{
+    String r;
+    String initAction = null;
+    StringTemplate b;
+	// get the dfa for the BLOCK
+    GrammarAST block=#rule.getFirstChildWithType(BLOCK);
+    DFA dfa=block.getLookaheadDFA();
+	// init blockNestingLevel so it's block level RULE_BLOCK_NESTING_LEVEL
+	// for alts of rule
+	blockNestingLevel = RULE_BLOCK_NESTING_LEVEL-1;
+	Rule ruleDescr = grammar.getRule(#rule.getFirstChild().getText());
+
+	// For syn preds, we don't want any AST code etc... in there.
+	// Save old templates ptr and restore later.  Base templates include Dbg.
+	StringTemplateGroup saveGroup = templates;
+	if ( ruleDescr.isSynPred ) {
+		templates = generator.getBaseTemplates();
+	}
+}
+    :   #( RULE id:ID {r=#id.getText(); currentRuleName = r;}
+		    (mod:modifier)?
+            #(ARG (ARG_ACTION)?)
+            #(RET (ARG_ACTION)?)
+			( #(OPTIONS .) )?
+			(ruleScopeSpec)?
+		    (AMPERSAND)*
+            b=block["ruleBlock", dfa]
+			{
+			String description =
+				grammar.grammarTreeToString(#rule.getFirstChildWithType(BLOCK),
+                                            false);
+			description =
+                generator.target.getTargetStringLiteralFromString(description);
+			b.setAttribute("description", description);
+			// do not generate lexer rules in combined grammar
+			String stName = null;
+			if ( ruleDescr.isSynPred ) {
+				stName = "synpredRule";
+			}
+			else if ( grammar.type==Grammar.LEXER ) {
+				if ( r.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
+					stName = "tokensRule";
+				}
+				else {
+					stName = "lexerRule";
+				}
+			}
+			else {
+				if ( !(grammar.type==Grammar.COMBINED &&
+					 Character.isUpperCase(r.charAt(0))) )
+				{
+					stName = "rule";
+				}
+			}
+			code = templates.getInstanceOf(stName);
+			if ( code.getName().equals("rule") ) {
+				code.setAttribute("emptyRule",
+					Boolean.valueOf(grammar.isEmptyRule(block)));
+			}
+			code.setAttribute("ruleDescriptor", ruleDescr);
+			String memo = (String)#rule.getOption("memoize");
+			if ( memo==null ) {
+				memo = (String)grammar.getOption("memoize");
+			}
+			if ( memo!=null && memo.equals("true") &&
+			     (stName.equals("rule")||stName.equals("lexerRule")) )
+			{
+            	code.setAttribute("memoize",
+            		Boolean.valueOf(memo!=null && memo.equals("true")));
+            }
+			}
+
+	     	(exceptionGroup[code])?
+	     	EOR
+         )
+        {
+        if ( code!=null ) {
+			if ( grammar.type==Grammar.LEXER ) {
+		    	boolean naked =
+		    		r.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
+		    	    (mod!=null&&mod.getText().equals(Grammar.FRAGMENT_RULE_MODIFIER));
+		    	code.setAttribute("nakedBlock", Boolean.valueOf(naked));
+			}
+			else {
+				description =
+					grammar.grammarTreeToString(#rule,false);
+				description =
+				    generator.target.getTargetStringLiteralFromString(description);
+				code.setAttribute("description", description);
+			}
+			Rule theRule = grammar.getRule(r);
+			generator.translateActionAttributeReferencesForSingleScope(
+				theRule,
+				theRule.getActions()
+			);
+			code.setAttribute("ruleName", r);
+			code.setAttribute("block", b);
+			if ( initAction!=null ) {
+				code.setAttribute("initAction", initAction);
+			}
+        }
+		templates = saveGroup;
+        }
+    ;
+
+modifier
+	:	"protected"
+	|	"public"
+	|	"private"
+	|	"fragment"
+	;
+
+ruleScopeSpec
+ 	:	#( "scope" (ACTION)? ( ID )* )
+ 	;
+
+block[String blockTemplateName, DFA dfa]
+     returns [StringTemplate code=null]
+{
+    StringTemplate decision = null;
+    if ( dfa!=null ) {
+        code = templates.getInstanceOf(blockTemplateName);
+        decision = generator.genLookaheadDecision(recognizerST,dfa);
+        code.setAttribute("decision", decision);
+        code.setAttribute("decisionNumber", dfa.getDecisionNumber());
+		code.setAttribute("maxK",dfa.getMaxLookaheadDepth());
+		code.setAttribute("maxAlt",dfa.getNumberOfAlts());
+    }
+    else {
+        code = templates.getInstanceOf(blockTemplateName+"SingleAlt");
+    }
+    blockNestingLevel++;
+    code.setAttribute("blockLevel", blockNestingLevel);
+    code.setAttribute("enclosingBlockLevel", blockNestingLevel-1);
+    StringTemplate alt = null;
+    StringTemplate rew = null;
+    StringTemplate sb = null;
+    GrammarAST r = null;
+    int altNum = 1;
+	if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+        this.outerAltNum=1;
+    }
+}
+    :   {#block.getSetValue()!=null}? sb=setBlock
+        {
+            code.setAttribute("alts",sb);
+            blockNestingLevel--;
+        }
+
+    |   #(  BLOCK
+    	    ( OPTIONS )? // ignore
+            ( alt=alternative {r=(GrammarAST)_t;} rew=rewrite
+              {
+              if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+              	this.outerAltNum++;
+              }
+              // add the rewrite code as just another element in the alt :)
+    		  if ( rew!=null ) {
+    		  	alt.setAttribute("elements.{el,line,pos}",
+    		  		rew, Utils.integer(r.getLine()), Utils.integer(r.getColumn()));
+    		  }
+    		  // add this alt to the list of alts for this block
+              code.setAttribute("alts",alt);
+              alt.setAttribute("altNum", Utils.integer(altNum));
+              alt.setAttribute("outerAlt",
+                  Boolean.valueOf(blockNestingLevel==RULE_BLOCK_NESTING_LEVEL));
+              altNum++;
+              }
+            )+
+            EOB
+         )
+    	{blockNestingLevel--;}
+    ;
+
+setBlock returns [StringTemplate code=null]
+{
+if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
+    Rule r = grammar.getRule(currentRuleName);
+    currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
+    if ( currentAltHasASTRewrite ) {
+        r.trackTokenReferenceInAlt(#setBlock, outerAltNum);
+    }
+}
+}
+    :   s:BLOCK
+        {
+        StringTemplate setcode =
+            getTokenElementST("matchSet", "set", #s, null, null);
+        int i = ((TokenWithIndex)#s.getToken()).getIndex();
+		setcode.setAttribute("elementIndex", i);
+		if ( grammar.type!=Grammar.LEXER ) {
+			generator.generateLocalFOLLOW(#s,"set",currentRuleName,i);
+        }
+        setcode.setAttribute("s",
+            generator.genSetExpr(templates,#s.getSetValue(),1,false));
+        StringTemplate altcode=templates.getInstanceOf("alt");
+		altcode.setAttribute("elements.{el,line,pos}",
+						     setcode,
+                             Utils.integer(#s.getLine()),
+                             Utils.integer(#s.getColumn())
+                            );
+        altcode.setAttribute("altNum", Utils.integer(1));
+        altcode.setAttribute("outerAlt",
+           Boolean.valueOf(blockNestingLevel==RULE_BLOCK_NESTING_LEVEL));
+        if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
+            altcode.setAttribute("autoAST", Boolean.valueOf(true));
+        }
+        code = altcode;
+        }
+    ;
+
+exceptionGroup[StringTemplate ruleST]
+	:	( exceptionHandler[ruleST] )+ (finallyClause[ruleST])?
+	|   finallyClause[ruleST]
+    ;
+
+exceptionHandler[StringTemplate ruleST]
+    :    #("catch" ARG_ACTION ACTION)
+    	{
+    	List chunks = generator.translateAction(currentRuleName,#ACTION);
+    	ruleST.setAttribute("exceptions.{decl,action}",#ARG_ACTION.getText(),chunks);
+    	}
+    ;
+
+finallyClause[StringTemplate ruleST]
+    :    #("finally" ACTION)
+    	{
+    	List chunks = generator.translateAction(currentRuleName,#ACTION);
+    	ruleST.setAttribute("finally",chunks);
+    	}
+    ;
+
+alternative returns [StringTemplate code=templates.getInstanceOf("alt")]
+{
+/*
+// TODO: can we use Rule.altsWithRewrites???
+if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+	GrammarAST aRewriteNode = #alternative.findFirstType(REWRITE);
+	if ( grammar.buildAST() &&
+		 (aRewriteNode!=null||
+		 (#alternative.getNextSibling()!=null &&
+		  #alternative.getNextSibling().getType()==REWRITE)) )
+	{
+		currentAltHasASTRewrite = true;
+	}
+	else {
+		currentAltHasASTRewrite = false;
+	}
+}
+*/
+if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
+    Rule r = grammar.getRule(currentRuleName);
+    currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
+}
+String description = grammar.grammarTreeToString(#alternative, false);
+description = generator.target.getTargetStringLiteralFromString(description);
+code.setAttribute("description", description);
+if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
+	code.setAttribute("autoAST", Boolean.valueOf(true));
+}
+StringTemplate e;
+}
+    :   #(	a:ALT
+    		(	{GrammarAST elAST=(GrammarAST)_t;}
+    			e=element[null,null]
+    			{
+    			if ( e!=null ) {
+					code.setAttribute("elements.{el,line,pos}",
+									  e,
+									  Utils.integer(elAST.getLine()),
+									  Utils.integer(elAST.getColumn())
+									 );
+    			}
+    			}
+    		)+
+    		EOA
+    	 )
+    ;
+
+element[GrammarAST label, GrammarAST astSuffix] returns [StringTemplate code=null]
+{
+    IntSet elements=null;
+    GrammarAST ast = null;
+}
+    :   #(ROOT code=element[label,#ROOT])
+
+    |   #(BANG code=element[label,#BANG])
+
+    |   #( n:NOT code=notElement[#n, label, astSuffix] )
+
+    |	#( ASSIGN alabel:ID code=element[#alabel,astSuffix] )
+
+    |	#( PLUS_ASSIGN label2:ID code=element[#label2,astSuffix] )
+
+    |   #(CHAR_RANGE a:CHAR_LITERAL b:CHAR_LITERAL)
+        {code = templates.getInstanceOf("charRangeRef");
+		 String low =
+		 	generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,a.getText());
+		 String high =
+		 	generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,b.getText());
+         code.setAttribute("a", low);
+         code.setAttribute("b", high);
+         if ( label!=null ) {
+             code.setAttribute("label", label.getText());
+         }
+        }
+
+    |   {#element.getSetValue()==null}? code=ebnf
+
+    |   code=atom[label, astSuffix]
+
+    |   code=tree
+
+    |   code=element_action
+
+    |   (sp:SEMPRED|gsp:GATED_SEMPRED {#sp=#gsp;})
+        {
+        code = templates.getInstanceOf("validateSemanticPredicate");
+        code.setAttribute("pred", generator.translateAction(currentRuleName,#sp));
+		String description =
+			generator.target.getTargetStringLiteralFromString(#sp.getText());
+		code.setAttribute("description", description);
+        }
+
+    |	SYN_SEMPRED // used only in lookahead; don't generate validating pred
+
+    |	BACKTRACK_SEMPRED
+
+    |   EPSILON
+    ;
+
+element_action returns [StringTemplate code=null]
+    :   act:ACTION
+        {
+        code = templates.getInstanceOf("execAction");
+        code.setAttribute("action", generator.translateAction(currentRuleName,#act));
+        }
+    ;
+
+notElement[GrammarAST n, GrammarAST label, GrammarAST astSuffix]
+returns [StringTemplate code=null]
+{
+    IntSet elements=null;
+    String labelText = null;
+    if ( label!=null ) {
+        labelText = label.getText();
+    }
+}
+    :   (assign_c:CHAR_LITERAL
+        {
+        int ttype=0;
+        if ( grammar.type==Grammar.LEXER ) {
+            ttype = Grammar.getCharValueFromGrammarCharLiteral(assign_c.getText());
+        }
+        else {
+            ttype = grammar.getTokenType(assign_c.getText());
+        }
+        elements = grammar.complement(ttype);
+        }
+    |   assign_s:STRING_LITERAL
+        {
+        int ttype=0;
+        if ( grammar.type==Grammar.LEXER ) {
+            // TODO: error!
+        }
+        else {
+            ttype = grammar.getTokenType(assign_s.getText());
+        }
+        elements = grammar.complement(ttype);
+        }
+    |   assign_t:TOKEN_REF
+        {
+        int ttype = grammar.getTokenType(assign_t.getText());
+        elements = grammar.complement(ttype);
+        }
+    |   assign_st:BLOCK
+        {
+        elements = assign_st.getSetValue();
+        elements = grammar.complement(elements);
+        }
+        )
+        {
+        code = getTokenElementST("matchSet",
+                                 "set",
+                                 (GrammarAST)n.getFirstChild(),
+                                 astSuffix,
+                                 labelText);
+        code.setAttribute("s",generator.genSetExpr(templates,elements,1,false));
+        int i = ((TokenWithIndex)n.getToken()).getIndex();
+        code.setAttribute("elementIndex", i);
+        if ( grammar.type!=Grammar.LEXER ) {
+            generator.generateLocalFOLLOW(n,"set",currentRuleName,i);
+        }
+        }
+    ;
+
+ebnf returns [StringTemplate code=null]
+{
+    DFA dfa=null;
+    GrammarAST b = (GrammarAST)#ebnf.getFirstChild();
+    GrammarAST eob = (GrammarAST)#b.getLastChild(); // loops will use EOB DFA
+}
+    :   (	{dfa = #ebnf.getLookaheadDFA();}
+			code=block["block", dfa]
+		|   {dfa = #ebnf.getLookaheadDFA();}
+			#( OPTIONAL code=block["optionalBlock", dfa] )
+		|   {dfa = #eob.getLookaheadDFA();}
+			#( CLOSURE code=block["closureBlock", dfa] )
+		|   {dfa = #eob.getLookaheadDFA();}
+			#( POSITIVE_CLOSURE code=block["positiveClosureBlock", dfa] )
+		)
+		{
+		String description = grammar.grammarTreeToString(#ebnf, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+    	code.setAttribute("description", description);
+    	}
+    ;
+
+tree returns [StringTemplate code=templates.getInstanceOf("tree")]
+{
+StringTemplate el=null, act=null;
+GrammarAST elAST=null, actAST=null;
+NFAState afterDOWN = (NFAState)tree_AST_in.NFATreeDownState.transition(0).target;
+LookaheadSet s = grammar.LOOK(afterDOWN);
+if ( s.member(Label.UP) ) {
+	// nullable child list if we can see the UP as the next token
+	// we need an "if ( input.LA(1)==Token.DOWN )" gate around
+	// the child list.
+	code.setAttribute("nullableChildList", "true");
+}
+}
+    :   #( TREE_BEGIN {elAST=(GrammarAST)_t;}
+    	   el=element[null,null]
+           {
+           code.setAttribute("root.{el,line,pos}",
+							  el,
+							  Utils.integer(elAST.getLine()),
+							  Utils.integer(elAST.getColumn())
+							  );
+           }
+           // push all the immediately-following actions out before children
+           // so actions aren't guarded by the "if (input.LA(1)==Token.DOWN)"
+           // guard in generated code.
+           (    options {greedy=true;}:
+                {actAST=(GrammarAST)_t;}
+                act=element_action
+                {
+                code.setAttribute("actionsAfterRoot.{el,line,pos}",
+                                  act,
+                                  Utils.integer(actAST.getLine()),
+                                  Utils.integer(actAST.getColumn())
+                );
+                }
+           )*
+           ( {elAST=(GrammarAST)_t;}
+    		 el=element[null,null]
+           	 {
+			 code.setAttribute("children.{el,line,pos}",
+							  el,
+							  Utils.integer(elAST.getLine()),
+							  Utils.integer(elAST.getColumn())
+							  );
+			 }
+           )*
+         )
+    ;
+
+atom[GrammarAST label, GrammarAST astSuffix] 
+    returns [StringTemplate code=null]
+{
+String labelText=null;
+if ( label!=null ) {
+    labelText = label.getText();
+}
+}
+    :   #( r:RULE_REF (rarg:ARG_ACTION)? )
+        {
+        grammar.checkRuleReference(#r, #rarg, currentRuleName);
+        Rule rdef = grammar.getRule(#r.getText());
+        // don't insert label=r() if $label.attr not used, no ret value, ...
+        if ( !rdef.getHasReturnValue() ) {
+            labelText = null;
+        }
+        code = getRuleElementST("ruleRef", #r.getText(), #r, astSuffix, labelText);
+		code.setAttribute("rule", r.getText());
+
+		if ( #rarg!=null ) {
+			List args = generator.translateAction(currentRuleName,#rarg);
+			code.setAttribute("args", args);
+		}
+        int i = ((TokenWithIndex)r.getToken()).getIndex();
+		code.setAttribute("elementIndex", i);
+		generator.generateLocalFOLLOW(#r,#r.getText(),currentRuleName,i);
+		#r.code = code;
+        }
+
+    |   #( t:TOKEN_REF (targ:ARG_ACTION)? )
+        {
+           grammar.checkRuleReference(#t, #targ, currentRuleName);
+		   if ( grammar.type==Grammar.LEXER ) {
+				if ( grammar.getTokenType(t.getText())==Label.EOF ) {
+					code = templates.getInstanceOf("lexerMatchEOF");
+				}
+			    else {
+					code = templates.getInstanceOf("lexerRuleRef");
+                    if ( isListLabel(labelText) ) {
+                        code = templates.getInstanceOf("lexerRuleRefAndListLabel");
+                    }
+					code.setAttribute("rule", t.getText());
+					if ( #targ!=null ) {
+						List args = generator.translateAction(currentRuleName,#targ);
+						code.setAttribute("args", args);
+					}
+				}
+                int i = ((TokenWithIndex)#t.getToken()).getIndex();
+			    code.setAttribute("elementIndex", i);
+			    if ( label!=null ) code.setAttribute("label", labelText);
+		   }
+		   else {
+				code = getTokenElementST("tokenRef", #t.getText(), #t, astSuffix, labelText);
+				String tokenLabel =
+				   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
+				code.setAttribute("token",tokenLabel);
+                int i = ((TokenWithIndex)#t.getToken()).getIndex();
+			    code.setAttribute("elementIndex", i);
+			    generator.generateLocalFOLLOW(#t,tokenLabel,currentRuleName,i);
+		   }
+		   #t.code = code;
+		}
+
+    |   c:CHAR_LITERAL
+        {
+		if ( grammar.type==Grammar.LEXER ) {
+			code = templates.getInstanceOf("charRef");
+			code.setAttribute("char",
+			   generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,c.getText()));
+			if ( label!=null ) {
+				code.setAttribute("label", labelText);
+			}
+		}
+		else { // else it's a token type reference
+			code = getTokenElementST("tokenRef", "char_literal", #c, astSuffix, labelText);
+			String tokenLabel = generator.getTokenTypeAsTargetLabel(grammar.getTokenType(c.getText()));
+			code.setAttribute("token",tokenLabel);
+            int i = ((TokenWithIndex)#c.getToken()).getIndex();
+			code.setAttribute("elementIndex", i);
+			generator.generateLocalFOLLOW(#c,tokenLabel,currentRuleName,i);
+		}
+        }
+
+    |   s:STRING_LITERAL
+        {
+		if ( grammar.type==Grammar.LEXER ) {
+			code = templates.getInstanceOf("lexerStringRef");
+			code.setAttribute("string",
+			   generator.target.getTargetStringLiteralFromANTLRStringLiteral(generator,s.getText()));
+			if ( label!=null ) {
+				code.setAttribute("label", labelText);
+			}
+		}
+		else { // else it's a token type reference
+			code = getTokenElementST("tokenRef", "string_literal", #s, astSuffix, labelText);
+			String tokenLabel =
+			   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(#s.getText()));
+			code.setAttribute("token",tokenLabel);
+            int i = ((TokenWithIndex)#s.getToken()).getIndex();
+			code.setAttribute("elementIndex", i);
+			generator.generateLocalFOLLOW(#s,tokenLabel,currentRuleName,i);
+		}
+		}
+
+    |   w:WILDCARD
+        {
+		code = getWildcardST(#w,astSuffix,labelText);
+		code.setAttribute("elementIndex", ((TokenWithIndex)#w.getToken()).getIndex());
+		}
+
+    |	code=set[label,astSuffix]
+    ;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
+
+
+set[GrammarAST label, GrammarAST astSuffix] returns [StringTemplate code=null]
+{
+String labelText=null;
+if ( label!=null ) {
+    labelText = label.getText();
+}
+}
+	:   s:BLOCK // only care that it's a BLOCK with setValue!=null
+        {
+        code = getTokenElementST("matchSet", "set", #s, astSuffix, labelText);
+        int i = ((TokenWithIndex)#s.getToken()).getIndex();
+		code.setAttribute("elementIndex", i);
+		if ( grammar.type!=Grammar.LEXER ) {
+			generator.generateLocalFOLLOW(#s,"set",currentRuleName,i);
+        }
+        code.setAttribute("s", generator.genSetExpr(templates,#s.getSetValue(),1,false));
+        }
+    ;
+
+setElement
+    :   c:CHAR_LITERAL
+    |   t:TOKEN_REF
+    |   s:STRING_LITERAL
+    |	#(CHAR_RANGE c1:CHAR_LITERAL c2:CHAR_LITERAL)
+    ;
+
+// REWRITE stuff
+
+rewrite returns [StringTemplate code=null]
+{
+StringTemplate alt;
+if ( #rewrite.getType()==REWRITE ) {
+	if ( generator.grammar.buildTemplate() ) {
+		code = templates.getInstanceOf("rewriteTemplate");
+	}
+	else {
+		code = templates.getInstanceOf("rewriteCode");
+		code.setAttribute("treeLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
+		code.setAttribute("rewriteBlockLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
+        code.setAttribute("referencedElementsDeep",
+                          getTokenTypesAsTargetLabels(#rewrite.rewriteRefsDeep));
+        Set<String> tokenLabels =
+            grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.TOKEN_LABEL);
+        Set<String> tokenListLabels =
+            grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.TOKEN_LIST_LABEL);
+        Set<String> ruleLabels =
+            grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.RULE_LABEL);
+        Set<String> ruleListLabels =
+            grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.RULE_LIST_LABEL);
+        // just in case they ref $r for "previous value", make a stream
+        // from retval.tree
+        StringTemplate retvalST = templates.getInstanceOf("prevRuleRootRef");
+        ruleLabels.add(retvalST.toString());
+        code.setAttribute("referencedTokenLabels", tokenLabels);
+        code.setAttribute("referencedTokenListLabels", tokenListLabels);
+        code.setAttribute("referencedRuleLabels", ruleLabels);
+        code.setAttribute("referencedRuleListLabels", ruleListLabels);
+	}
+}
+}
+	:	(
+			{rewriteRuleRefs = new HashSet();}
+			#( r:REWRITE (pred:SEMPRED)? alt=rewrite_alternative )
+			{
+            rewriteBlockNestingLevel = OUTER_REWRITE_NESTING_LEVEL;
+			List predChunks = null;
+			if ( #pred!=null ) {
+				//predText = #pred.getText();
+        		predChunks = generator.translateAction(currentRuleName,#pred);
+			}
+			String description =
+			    grammar.grammarTreeToString(#r,false);
+			description = generator.target.getTargetStringLiteralFromString(description);
+			code.setAttribute("alts.{pred,alt,description}",
+							  predChunks,
+							  alt,
+							  description);
+			pred=null;
+			}
+		)*
+	;
+
+rewrite_block[String blockTemplateName] returns [StringTemplate code=null]
+{
+rewriteBlockNestingLevel++;
+code = templates.getInstanceOf(blockTemplateName);
+StringTemplate save_currentBlockST = currentBlockST;
+currentBlockST = code;
+code.setAttribute("rewriteBlockLevel", rewriteBlockNestingLevel);
+StringTemplate alt=null;
+}
+    :   #(  BLOCK
+            {
+            currentBlockST.setAttribute("referencedElementsDeep",
+                getTokenTypesAsTargetLabels(#BLOCK.rewriteRefsDeep));
+            currentBlockST.setAttribute("referencedElements",
+                getTokenTypesAsTargetLabels(#BLOCK.rewriteRefsShallow));
+            }
+            alt=rewrite_alternative
+            EOB
+         )
+    	{
+    	code.setAttribute("alt", alt);
+    	rewriteBlockNestingLevel--;
+    	currentBlockST = save_currentBlockST;
+    	}
+    ;
+
+rewrite_alternative
+	returns [StringTemplate code=null]
+{
+StringTemplate el,st;
+}
+    :   {generator.grammar.buildAST()}?
+    	#(	a:ALT {code=templates.getInstanceOf("rewriteElementList");}
+			(	(	{GrammarAST elAST=(GrammarAST)_t;}
+    				el=rewrite_element
+					{code.setAttribute("elements.{el,line,pos}",
+					 					el,
+    							  		Utils.integer(elAST.getLine()),
+    							  		Utils.integer(elAST.getColumn())
+					 					);
+					}
+				)+
+    		|	EPSILON
+    			{code.setAttribute("elements.{el,line,pos}",
+    							   templates.getInstanceOf("rewriteEmptyAlt"),
+    							   Utils.integer(#a.getLine()),
+    							   Utils.integer(#a.getColumn())
+					 			   );
+				}
+    		)
+    		EOA
+    	 )
+    |	{generator.grammar.buildTemplate()}? code=rewrite_template
+    ;
+
+rewrite_element returns [StringTemplate code=null]
+{
+    IntSet elements=null;
+    GrammarAST ast = null;
+}
+    :   code=rewrite_atom[false]
+
+    |   code=rewrite_ebnf
+
+    |   code=rewrite_tree
+    ;
+
+rewrite_ebnf returns [StringTemplate code=null]
+    :   #( OPTIONAL code=rewrite_block["rewriteOptionalBlock"] )
+		{
+		String description = grammar.grammarTreeToString(#rewrite_ebnf, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+		code.setAttribute("description", description);
+		}
+    |   #( CLOSURE code=rewrite_block["rewriteClosureBlock"] )
+		{
+		String description = grammar.grammarTreeToString(#rewrite_ebnf, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+		code.setAttribute("description", description);
+		}
+    |   #( POSITIVE_CLOSURE code=rewrite_block["rewritePositiveClosureBlock"] )
+		{
+		String description = grammar.grammarTreeToString(#rewrite_ebnf, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+		code.setAttribute("description", description);
+		}
+    ;
+
+rewrite_tree returns [StringTemplate code=templates.getInstanceOf("rewriteTree")]
+{
+rewriteTreeNestingLevel++;
+code.setAttribute("treeLevel", rewriteTreeNestingLevel);
+code.setAttribute("enclosingTreeLevel", rewriteTreeNestingLevel-1);
+StringTemplate r, el;
+GrammarAST elAST=null;
+}
+	:   #(	TREE_BEGIN {elAST=(GrammarAST)_t;}
+			r=rewrite_atom[true]
+			{code.setAttribute("root.{el,line,pos}",
+							   r,
+							   Utils.integer(elAST.getLine()),
+							   Utils.integer(elAST.getColumn())
+							  );
+			}
+			( {elAST=(GrammarAST)_t;}
+			  el=rewrite_element
+			  {
+			  code.setAttribute("children.{el,line,pos}",
+							    el,
+							    Utils.integer(elAST.getLine()),
+							    Utils.integer(elAST.getColumn())
+							    );
+			  }
+			)*
+		)
+		{
+		String description = grammar.grammarTreeToString(#rewrite_tree, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+		code.setAttribute("description", description);
+    	rewriteTreeNestingLevel--;
+		}
+    ;
+
+rewrite_atom[boolean isRoot] returns [StringTemplate code=null]
+    :   r:RULE_REF
+    	{
+    	String ruleRefName = #r.getText();
+    	String stName = "rewriteRuleRef";
+    	if ( isRoot ) {
+    		stName += "Root";
+    	}
+    	code = templates.getInstanceOf(stName);
+    	code.setAttribute("rule", ruleRefName);
+    	if ( grammar.getRule(ruleRefName)==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
+									  grammar,
+									  ((GrammarAST)(#r)).getToken(),
+									  ruleRefName);
+    		code = new StringTemplate(); // blank; no code gen
+    	}
+    	else if ( grammar.getRule(currentRuleName)
+    			     .getRuleRefsInAlt(ruleRefName,outerAltNum)==null )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS,
+									  grammar,
+									  ((GrammarAST)(#r)).getToken(),
+									  ruleRefName);
+    		code = new StringTemplate(); // blank; no code gen
+    	}
+    	else {
+    		// track all rule refs as we must copy 2nd ref to rule and beyond
+    		if ( !rewriteRuleRefs.contains(ruleRefName) ) {
+	    		rewriteRuleRefs.add(ruleRefName);
+    		}
+		}
+    	}
+
+    |   ( #(TOKEN_REF (arg:ARG_ACTION)?) | CHAR_LITERAL | STRING_LITERAL )
+    	{
+    	String tokenName = #rewrite_atom.getText();
+    	String stName = "rewriteTokenRef";
+    	Rule rule = grammar.getRule(currentRuleName);
+    	Set tokenRefsInAlt = rule.getTokenRefsInAlt(outerAltNum);
+    	boolean imaginary = !tokenRefsInAlt.contains(tokenName);
+    	if ( imaginary ) {
+    		stName = "rewriteImaginaryTokenRef";
+    	}
+    	if ( isRoot ) {
+    		stName += "Root";
+    	}
+    	code = templates.getInstanceOf(stName);
+    	if ( #arg!=null ) {
+			List args = generator.translateAction(currentRuleName,#arg);
+			code.setAttribute("args", args);
+    	}
+		code.setAttribute("elementIndex", ((TokenWithIndex)#rewrite_atom.getToken()).getIndex());
+		int ttype = grammar.getTokenType(tokenName);
+		String tok = generator.getTokenTypeAsTargetLabel(ttype);
+    	code.setAttribute("token", tok);
+    	if ( grammar.getTokenType(tokenName)==Label.INVALID ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE,
+									  grammar,
+									  ((GrammarAST)(#rewrite_atom)).getToken(),
+									  tokenName);
+    		code = new StringTemplate(); // blank; no code gen
+    	}
+    	}
+
+    |	LABEL
+    	{
+    	String labelName = #LABEL.getText();
+    	Rule rule = grammar.getRule(currentRuleName);
+    	Grammar.LabelElementPair pair = rule.getLabel(labelName);
+    	if ( labelName.equals(currentRuleName) ) {
+    		// special case; ref to old value via $rule
+    		StringTemplate labelST = templates.getInstanceOf("prevRuleRootRef");
+    		code = templates.getInstanceOf("rewriteRuleLabelRef"+(isRoot?"Root":""));
+    		code.setAttribute("label", labelST);
+    	}
+    	else if ( pair==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE,
+									  grammar,
+									  ((GrammarAST)(#LABEL)).getToken(),
+									  labelName);
+			code = new StringTemplate();
+    	}
+    	else {
+			String stName = null;
+			switch ( pair.type ) {
+				case Grammar.TOKEN_LABEL :
+					stName = "rewriteTokenLabelRef";
+					break;
+				case Grammar.RULE_LABEL :
+					stName = "rewriteRuleLabelRef";
+					break;
+				case Grammar.TOKEN_LIST_LABEL :
+					stName = "rewriteTokenListLabelRef";
+					break;
+				case Grammar.RULE_LIST_LABEL :
+					stName = "rewriteRuleListLabelRef";
+					break;
+			}
+			if ( isRoot ) {
+				stName += "Root";
+			}
+			code = templates.getInstanceOf(stName);
+			code.setAttribute("label", labelName);
+		}
+    	}
+
+    |   ACTION
+        {
+        // actions in rewrite rules yield a tree object
+        String actText = #ACTION.getText();
+        List chunks = generator.translateAction(currentRuleName,#ACTION);
+		code = templates.getInstanceOf("rewriteNodeAction"+(isRoot?"Root":""));
+		code.setAttribute("action", chunks);
+        }
+    ;
+
+rewrite_template returns [StringTemplate code=null]
+    :	#( ALT EPSILON EOA ) {code=templates.getInstanceOf("rewriteEmptyTemplate");}
+   	|	#( TEMPLATE (id:ID|ind:ACTION)
+		   {
+		   if ( #id!=null && #id.getText().equals("template") ) {
+		   		code = templates.getInstanceOf("rewriteInlineTemplate");
+		   }
+		   else if ( #id!=null ) {
+		   		code = templates.getInstanceOf("rewriteExternalTemplate");
+		   		code.setAttribute("name", #id.getText());
+		   }
+		   else if ( #ind!=null ) { // must be %({expr})(args)
+		   		code = templates.getInstanceOf("rewriteIndirectTemplate");
+				List chunks=generator.translateAction(currentRuleName,#ind);
+		   		code.setAttribute("expr", chunks);
+		   }
+		   }
+	       #( ARGLIST
+	       	  ( #( ARG arg:ID a:ACTION
+		   		   {
+                   // must set alt num here rather than in define.g
+                   // because actions like %foo(name={$ID.text}) aren't
+                   // broken up yet into trees.
+				   #a.outerAltNum = this.outerAltNum;
+		   		   List chunks = generator.translateAction(currentRuleName,#a);
+		   		   code.setAttribute("args.{name,value}", #arg.getText(), chunks);
+		   		   }
+	             )
+	          )*
+	        )
+		   ( DOUBLE_QUOTE_STRING_LITERAL
+             {
+             String sl = #DOUBLE_QUOTE_STRING_LITERAL.getText();
+			 String t = sl.substring(1,sl.length()-1); // strip quotes
+			 t = generator.target.getTargetStringLiteralFromString(t);
+             code.setAttribute("template",t);
+             }
+		   | DOUBLE_ANGLE_STRING_LITERAL
+             {
+             String sl = #DOUBLE_ANGLE_STRING_LITERAL.getText();
+			 String t = sl.substring(2,sl.length()-2); // strip double angle quotes
+			 t = generator.target.getTargetStringLiteralFromString(t);
+             code.setAttribute("template",t);
+             }
+		   )?
+	     )
+
+	|	act:ACTION
+   		{
+        // set alt num for same reason as ARGLIST above
+        #act.outerAltNum = this.outerAltNum;
+   		code=templates.getInstanceOf("rewriteAction");
+   		code.setAttribute("action",
+   						  generator.translateAction(currentRuleName,#act));
+   		}
+	;
diff --git a/src/org/antlr/codegen/templates/ANTLRCore.sti b/src/org/antlr/codegen/templates/ANTLRCore.sti
new file mode 100644
index 0000000..89117a0
--- /dev/null
+++ b/src/org/antlr/codegen/templates/ANTLRCore.sti
@@ -0,0 +1,374 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+interface ANTLRCore;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals);
+
+/** The header file; make sure to define headerFileExtension() below */
+optional
+headerFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals);
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType,
+      filterMode);
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+       bitsets, ASTLabelType, superClass,
+       labelType, members);
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, labelType, ASTLabelType,
+           superClass, members);
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock);
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize);
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize);
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor);
+
+filteringNextToken();
+
+filteringActionGate();
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description);
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description);
+
+/** A (..)+ block with 0 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+positiveClosureBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+/** A (..)* block with 0 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+closureBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+optionalBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt);
+
+// E L E M E N T S
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex);
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex);
+
+listLabel(label,elem);
+
+/** match a character */
+charRef(char,label);
+
+/** match a character range */
+charRangeRef(a,b,label);
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode);
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode);
+
+/** Match a string literal */
+lexerStringRef(string,label);
+
+wildcard(label,elementIndex);
+
+wildcardAndListLabel(label,elementIndex);
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex);
+
+wildcardCharListLabel(label, elementIndex);
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args);
+
+/** ids+=ID */
+ruleRefAndListLabel(rule,label,elementIndex,args);
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex);
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex);
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex);
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList);
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description);
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState);
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ *
+ *  If a semPredState, don't force lookahead lookup; preds might not
+ *  need.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState);
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a
+ *  rule anything other than 'a' predicts exiting.
+ *
+ *  If a semPredState, don't force lookahead lookup; preds might not
+ *  need.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState);
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt);
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates);
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState);
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState);
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState);
+
+dfaEdgeSwitch(labels, targetState);
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description);
+
+/** Generate the tables and support code needed for the DFAState object
+ *  argument.  Unless there is a semantic predicate (or syn pred, which
+ *  become sem preds), all states should be encoded in the state tables.
+ *  Consequently, cyclicDFAState/cyclicDFAEdge,eotDFAEdge templates are
+ *  not used except for special DFA states that cannot be encoded as
+ *  a transition table.
+ */
+cyclicDFA(dfa);
+
+/** A special state in a cyclic DFA; special means has a semantic predicate
+ *  or it's a huge set of symbols to check.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState);
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.  Again, this is for special
+ *  states.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates);
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates);
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right);
+
+orPredicates(operands);
+
+notPredicate(pred);
+
+evalPredicate(pred,description);
+
+evalSynPredicate(pred,description);
+
+lookaheadTest(atom,k,atomAsInt);
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt);
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt);
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt);
+
+setTest(ranges);
+
+// A T T R I B U T E S
+
+parameterAttributeRef(attr);
+parameterSetAttributeRef(attr,expr);
+
+scopeAttributeRef(scope,attr,index,negIndex);
+scopeSetAttributeRef(scope,attr,expr,index,negIndex);
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope);
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr);
+
+returnAttributeRef(ruleDescriptor,attr);
+returnSetAttributeRef(ruleDescriptor,attr,expr);
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label);
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label);
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr);
+tokenLabelPropertyRef_type(scope,attr);
+tokenLabelPropertyRef_line(scope,attr);
+tokenLabelPropertyRef_pos(scope,attr);
+tokenLabelPropertyRef_channel(scope,attr);
+tokenLabelPropertyRef_index(scope,attr);
+tokenLabelPropertyRef_tree(scope,attr);
+
+ruleLabelPropertyRef_start(scope,attr);
+ruleLabelPropertyRef_stop(scope,attr);
+ruleLabelPropertyRef_tree(scope,attr);
+ruleLabelPropertyRef_text(scope,attr);
+ruleLabelPropertyRef_st(scope,attr);
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label);
+
+lexerRuleLabelPropertyRef_type(scope,attr);
+lexerRuleLabelPropertyRef_line(scope,attr);
+lexerRuleLabelPropertyRef_pos(scope,attr);
+lexerRuleLabelPropertyRef_channel(scope,attr);
+lexerRuleLabelPropertyRef_index(scope,attr);
+lexerRuleLabelPropertyRef_text(scope,attr);
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr);
+rulePropertyRef_stop(scope,attr);
+rulePropertyRef_tree(scope,attr);
+rulePropertyRef_text(scope,attr);
+rulePropertyRef_st(scope,attr);
+
+lexerRulePropertyRef_text(scope,attr);
+lexerRulePropertyRef_type(scope,attr);
+lexerRulePropertyRef_line(scope,attr);
+lexerRulePropertyRef_pos(scope,attr);
+/** Undefined, but present for consistency with Token attributes; set to -1 */
+lexerRulePropertyRef_index(scope,attr);
+lexerRulePropertyRef_channel(scope,attr);
+lexerRulePropertyRef_start(scope,attr);
+lexerRulePropertyRef_stop(scope,attr);
+
+ruleSetPropertyRef_tree(scope,attr,expr);
+ruleSetPropertyRef_st(scope,attr,expr);
+
+/** How to execute an action */
+execAction(action);
+
+// M I S C (properties, etc...)
+
+codeFileExtension();
+
+/** Your language needs a header file; e.g., ".h" */
+optional headerFileExtension();
+
+true();
+false();
diff --git a/src/org/antlr/codegen/templates/C/AST.stg b/src/org/antlr/codegen/templates/C/AST.stg
new file mode 100644
index 0000000..e1035ac
--- /dev/null
+++ b/src/org/antlr/codegen/templates/C/AST.stg
@@ -0,0 +1,616 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+/** Add an adaptor property that knows how to build trees */
+ at headerFile.members() ::= <<
+/* @headerFile.members() */
+pANTLR3_BASE_TREE_ADAPTOR	adaptor;
+pANTLR3_VECTOR_FACTORY		vectors;
+/* End @headerFile.members() */
+>>
+
+/** Install the tree adpator inteface pointer and anything else that 
+ *  tree parsers and producers require.
+ */
+ at genericParser.apifuncs() ::= <<
+<if(PARSER)>
+ADAPTOR	= ANTLR3_TREE_ADAPTORNew(instream->tstream->tokenSource->strFactory);<\n>
+<endif>
+ctx->vectors	= antlr3VectorFactoryNew(64);
+>>
+
+ at genericParser.cleanup() ::= <<
+ctx->vectors->close(ctx->vectors);
+<if(PARSER)>
+/* We created the adaptor so we must free it
+ */
+ADAPTOR->free(ADAPTOR);
+<endif>
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+/* @returnScope.ruleReturnMembers() */
+pANTLR3_BASE_TREE	tree;
+/* End @returnScope.ruleReturnMembers() */
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+pANTLR3_BASE_TREE root_0;<\n>
+>>
+
+ruleInitializations() ::= <<
+<super.ruleInitializations()>
+root_0 = NULL;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{pANTLR3_BASE_TREE <it.label.text>_tree;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{pANTLR3_BASE_TREE <it.label.text>_tree;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{pANTLR3_REWRITE_RULE_TOKEN_STREAM stream_<it>;}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{pANTLR3_REWRITE_RULE_SUBTREE_STREAM stream_<it>;}; separator="\n">
+>>
+
+ruleLabelInitializations() ::= <<
+<super.ruleLabelInitializations()>
+<ruleDescriptor.tokenLabels:{<it.label.text>_tree   = NULL;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<it.label.text>_tree   = NULL;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{stream_<it>   = antlr3RewriteRuleTokenStreamNewAE(ADAPTOR,(pANTLR3_UINT8)"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAE(ADAPTOR,(pANTLR3_UINT8)"rule <it>");}; separator="\n">
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.tree  = NULL;
+<endif>
+>>
+
+/**  a rule label including default value */
+ruleLabelInitVal(label) ::= <<
+<super.ruleLabelInitVal(...)>
+<label.label.text>.tree = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(backtracking)>
+if ( BACKTRACKING==0 ) {<\n>
+<endif>
+<if(!ruleDescriptor.isSynPred)>
+	retval.stop = LT(-1);<\n>
+<endif>
+	retval.tree = ADAPTOR->rulePostProcessing(ADAPTOR, root_0);
+	ADAPTOR->setTokenBoundaries(ADAPTOR, retval.tree, retval.start, retval.stop);
+<if(backtracking)>
+}
+<endif>
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{stream_<it>->free(stream_<it>);}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{stream_<it>->free(stream_<it>);}; separator="\n">
+<endif>
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<endif>
+<endif>
+>>
+
+ at alt.initializations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+root_0 = ADAPTOR->nil(ADAPTOR);<\n>
+<endif>
+<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
+<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
+ADAPTOR->addChild(ADAPTOR, root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
+<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
+root_0 = ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ID but track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) <endif>stream_<token>->add(stream_<token>, <label>, NULL);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( BACKTRACKING==0 ) <endif>ADAPTOR->addChild(ADAPTOR, root_0, ADAPTOR->create(ADAPTOR, <label>));})>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( BACKTRACKING==0 ) <endif>root_0 = ADAPTOR->becomeRoot(ADAPTOR, ADAPTOR->create(ADAPTOR, <label>), root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) <endif>ADAPTOR->addChild(ADAPTOR, root_0, <label>.tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) <endif>root_0 = ADAPTOR->becomeRoot(ADAPTOR, <label>.tree, root_0);
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) <endif>stream_<rule>->add(stream_<rule>, <label>.tree, NULL);
+>>
+
+/* How to accumulate lists when we are doing rewrite tracking...
+ */
+listLabelTrack(label) ::= <<
+/* listLabelTrack(label)
+ */
+if (list_<label> == NULL)
+{
+    list_<label>=ctx->vectors->newVector(ctx->vectors);
+}
+list_<label>->add(list_<label>, <label>.tree, NULL);
+>>
+
+/* How to accumulate lists of rule outputs (only allowed with AST
+ * option but if the user is going to walk the tree, they will want
+ * all their custom elements from rule returns.
+ *
+ * Normally, we use inline structures (which the compiler lays down
+ * code to copy from heap allocations. However, here we want to accumulate copies
+ * of the returned structures because we are adding them to a list. This only makes sense if the
+ * grammar is not rewriting the tree as a tree rewwrite onnly preserves the tree, not the object/structure
+ * returned from the rule. The rewrite will extract the tree pointer. However, if we are not going to 
+ * do a tree re-write, then the user may wish to iterate the structures returned by the rule in 
+ * action code and will expect the user defined returns[] elements to be available when they do this.
+ * Hence we cannot just preserve the tree that was returned. So, we must copy the local structure and provide 
+ * a function that can free the allocated space. We cannot know how to free user allocated elements and
+ * presume that the user will know to do this using their own factories for the structures they allocate.
+ */
+listLabelAST(label) ::= <<
+if (list_<label> == NULL)
+{
+    list_<label>=ctx->vectors->newVector(ctx->vectors);
+}
+{
+    RETURN_TYPE_<label> * tcopy;
+
+    tcopy = ANTLR3_MALLOC(sizeof(RETURN_TYPE_<label>)); /* Note no memory allocation checks! */
+    ANTLR3_MEMMOVE((void *)(tcopy), (const void *)&<label>, sizeof(RETURN_TYPE_<label>));
+    list_<label>->add(list_<label>, tcopy, freeScope);  /* Add whatever the return type is */<\n>
+}
+>>
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefTrack(...)>
+<listLabelTrack(...)>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabelAST(...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefBang(...)>
+<listLabelAST(...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabelAST(...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
+<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
+ADAPTOR->addChild(ADAPTOR, root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
+<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
+root_0 = ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+// there's got to be a better way
+
+// R e w r i t e
+
+rewriteCode(
+	alts, 
+	description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	rewriteBlockLevel, 
+	enclosingTreeLevel, 
+	treeLevel) ::=
+<<
+ 
+/* AST REWRITE
+ * elements          : <referencedElementsDeep; separator=", ">
+ * token labels      : <referencedTokenLabels; separator=", ">
+ * rule labels       : <referencedRuleLabels; separator=", ">
+ * token list labels : <referencedTokenListLabels; separator=", ">
+ * rule list labels  : <referencedRuleListLabels; separator=", ">
+ */
+<if(backtracking)>
+if ( BACKTRACKING==0 ) <\n>
+<endif>
+{
+	<rewriteCodeLabelsDecl()>
+	<rewriteCodeLabelsInit()>
+	root_0			    = ADAPTOR->nil(ADAPTOR);
+	<prevRuleRootRef()>.tree    = root_0;
+	<alts:rewriteAlt(); separator="else ">
+	<rewriteCodeLabelsFree()>
+
+}
+>>
+
+rewriteCodeLabelsDecl() ::= <<
+<referencedTokenLabels
+    :{pANTLR3_REWRITE_RULE_TOKEN_STREAM stream_<it>;};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{pANTLR3_REWRITE_RULE_TOKEN_STREAM stream_<it>;};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{pANTLR3_REWRITE_RULE_SUBTREE_STREAM stream_<it>;};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{pANTLR3_REWRITE_RULE_SUBTREE_STREAM stream_<it>;};
+    separator="\n"
+>
+>>
+
+rewriteCodeLabelsInit() ::= <<
+<referencedTokenLabels
+    :{stream_<it>=antlr3RewriteRuleTokenStreamNewAEE(ADAPTOR, (pANTLR3_UINT8)"token <it>", <it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{stream_<it>=antlr3RewriteRuleTokenStreamNewAEV(ADAPTOR, (pANTLR3_UINT8)"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels 
+    :{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAEE(ADAPTOR, (pANTLR3_UINT8)"token <it>", <it>.tree != NULL ? <it>.tree : NULL);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAEV(ADAPTOR, (pANTLR3_UINT8)"token <it>", list_<it>);};
+    separator="\n"
+>
+>>
+rewriteCodeLabelsFree() ::= <<
+<referencedTokenLabels
+    :{stream_<it>->free(stream_<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{stream_<it>->free(stream_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels 
+    :{stream_<it>->free(stream_<it>);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{stream_<it>->free(stream_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,
+	rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,		// elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+{
+	if ( <referencedElementsDeep:{el | stream_<el>->hasNext(stream_<el>)}; separator="||"> ) 
+	{
+		<alt>
+	}
+	<referencedElementsDeep:{el | stream_<el>->reset(stream_<el>);<\n>}>
+}<\n>
+>>
+
+rewriteClosureBlock(
+	alt,
+	rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,		// elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+{
+	while ( <referencedElements:{el | stream_<el>->hasNext(stream_<el>)}; separator="||"> ) 
+	{
+		<alt>
+	}
+	<referencedElements:{el | stream_<el>->reset(stream_<el>);<\n>}>
+}<\n>
+>>
+RewriteEarlyExitException() ::=
+<<
+CONSTRUCTEX();
+EXCEPTION->type         = ANTLR3_REWRITE_EARLY_EXCEPTION;
+EXCEPTION->name         = ANTLR3_REWRITE_EARLY_EXCEPTION_NAME;
+>>
+rewritePositiveClosureBlock(
+	alt,
+	rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,		// elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>->hasNext(stream_<el>)}; separator="||">) ) 
+{
+    <RewriteEarlyExitException()>
+}
+else
+{
+	while ( <referencedElements:{el | stream_<el>->hasNext(stream_<el>)}; separator="||"> ) {
+		<alt>
+	}
+	<referencedElements:{el | stream_<el>->reset(stream_<el>);<\n>}>
+}
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) 
+{
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = NULL; /* \<-- rewriteEmptyAlt()) */"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+    pANTLR3_BASE_TREE root_<treeLevel> = ADAPTOR->nil(ADAPTOR);
+    <root:rewriteElement()>
+    <children:rewriteElement()>
+    ADAPTOR->addChild(ADAPTOR, root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args) ::= <<
+<if(args)>
+ADAPTOR->addChildToken(ADAPTOR, root_<treeLevel>, ADAPTOR->createToken(ADAPTOR, <args; separator=", ">));<\n>
+<else>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<token>->next(stream_<token>));<\n>
+<endif>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label>->next(stream_<label>));<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label>->next(stream_<label>));<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = ADAPTOR->becomeRootToken(ADAPTOR, stream_<label>->next(stream_<label>), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args) ::= <<
+<if(args)>
+root_<treeLevel> = ADAPTOR->becomeRootToken(ADAPTOR, ADAPTOR->createToken(ADAPTOR, <args; separator=", ">), root_<treeLevel>);<\n>
+<else>
+root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, stream_<token>->next(stream_<token>), root_<treeLevel>);<\n>
+<endif>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
+<if(args)>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, ADAPTOR->createTypeTokenText(ADAPTOR, <token>, TOKTEXT(<args; separator=", ">)));<\n>
+<else>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, ADAPTOR->createTypeText(ADAPTOR, <token>, (pANTLR3_UINT8)"<token>"));<\n>
+<endif>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
+<if(args)>
+root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, ADAPTOR->createTypeTokenText(ADAPTOR, <token>, TOKTEXT(<args; separator=", ">)), root_<treeLevel>);<\n>
+<else>
+root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, ADAPTOR->createTypeText(ADAPTOR, <token>, (pANTLR3_UINT8)"<token>"), root_<treeLevel>);<\n>
+<endif>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule,dup) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<rule>->next(stream_<rule>));<\n>
+>>
+
+rewriteRuleRefRoot(rule,dup) ::= <<
+root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, stream_<rule>->next(stream_<rule>), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, <action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label>->next(stream_<label>));<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, (pANTLR3_BASE_TREE)(stream_<label>->next(stream_<label>)));<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, stream_<label>->nextNode(stream_<label>), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = ADAPTOR->becomeRoot((pANTLR3_BASE_TREE)(stream_<label>->nextNode(stream_<label>)), root_<treeLevel>);<\n>
+>>
diff --git a/src/org/antlr/codegen/templates/C/ASTDbg.stg b/src/org/antlr/codegen/templates/C/ASTDbg.stg
new file mode 100644
index 0000000..00f0525
--- /dev/null
+++ b/src/org/antlr/codegen/templates/C/ASTDbg.stg
@@ -0,0 +1,45 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+group CASTDbg;
+
+parserMembers() ::= <<
+protected TreeAdaptor adaptor =
+    new DebugTreeAdaptor(dbg,new CommonTreeAdaptor());
+public void setTreeAdaptor(TreeAdaptor adaptor) {
+    this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+}
+public TreeAdaptor getTreeAdaptor() {
+    return adaptor;
+}<\n>
+>>
+
+ at rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
diff --git a/src/org/antlr/codegen/templates/C/C.stg b/src/org/antlr/codegen/templates/C/C.stg
new file mode 100644
index 0000000..c57e21b
--- /dev/null
+++ b/src/org/antlr/codegen/templates/C/C.stg
@@ -0,0 +1,2860 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ Templates and C runtime Copyright (c) 2006-2007 Jim Idle
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ * This code generating template and the assocated C runtime was produced by:
+ * Jim Idle jimi|hereisanat|idle|dotgoeshere|ws. 
+ * If it does cause the destruction of the Universe, it will be pretty cool so long as 
+ * I am in a different one at the time. 
+ */
+group C implements ANTLRCore ;
+
+cTypeInitMap ::= [
+	"int"		    : "0",              // Integers     start out being 0
+	"long"		    : "0",              // Longs        start out being 0
+	"float"		    : "0.0",            // Floats       start out being 0
+	"double"	    : "0.0",            // Doubles      start out being 0
+	"ANTLR3_BOOLEAN"    : "ANTLR3_FALSE",   // Booleans     start out being Antlr C for false
+	"byte"		    : "0",              // Bytes        start out being 0
+	"short"		    : "0",              // Shorts       start out being 0
+	"char"		    : "0",              // Chars        start out being 0
+	default		    : "NULL"            // Anything other than an atomic type (above) is a NULL (probably NULL pointer).
+]
+
+leadIn(type) ::=
+<<
+/** \file
+ *  This <type> file was generated by $ANTLR version <ANTLRVersion>
+ *
+ *     -  From the grammar source file : <fileName>
+ *     -                            On : <generatedTimestamp>
+<if(LEXER)>
+ *     -                 for the lexer : <name>Lexer
+<endif>
+<if(PARSER)>
+ *     -                for the parser : <name>Parser
+<endif>
+<if(TREE_PARSER)>
+ *     -           for the tree parser : <name>TreeParser
+<endif>
+ *
+ * Editing it, at least manually, is not wise. 
+ *
+ * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
+ *
+ * View this file with tabs set to 8 (:set ts=8 in gvim) and indent at 4 (:set sw=4 in gvim)
+ *
+>>
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment, 
+            recognizer,
+            name, 
+            tokens, 
+            tokenNames, 
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewrite,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals
+            ) ::=
+<<
+<leadIn("C source")>
+*/
+<if(actions.(actionScope).header)>
+
+/* =============================================================================
+ * This is what the grammar programmer asked us to put at the top of every file.
+ */
+<actions.(actionScope).header>
+/* End of Header action.
+ * =============================================================================
+ */
+<endif>
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#include    "<name>.h"
+<actions.(actionScope).postinclude>
+/* ----------------------------------------- */
+
+<docComment>
+
+<if(literals)>
+/** String literals used by <name> that we must do things like MATCHS() with.
+ *  C will normally just lay down 8 bit characters, and you can use L"xxx" to
+ *  get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so
+ *  we perform this little trick of defining the literals as arrays of UINT32
+ *  and passing in the address of these.
+ */
+<literals:{static ANTLR3_UCHAR	lit_<i>[]  = <it>;}; separator="\n">
+
+<endif>
+
+
+/* Aids in accessing scopes for grammar programmers
+ */
+#undef	SCOPE_TYPE
+#undef	SCOPE_STACK
+#undef	SCOPE_TOP
+#define	SCOPE_TYPE(scope)   p<name>_##scope##_SCOPE
+#define SCOPE_STACK(scope)  p<name>_##scope##Stack
+#define	SCOPE_TOP(scope)    ctx->p<name>_##scope##Top
+#define	SCOPE_SIZE(scope)			(ctx->SCOPE_STACK(scope)->size(ctx->SCOPE_STACK(scope)))
+#define SCOPE_INSTANCE(scope, i)	(ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope),i))
+
+/* MACROS that hide the C interface implementations from the
+ * generated code, which makes it a little more understandable to the human eye.
+ * I am very much against using C pre-processor macros for function calls and bits
+ * of code as you cannot see what is happening when single stepping in debuggers
+ * and so on. The exception (in my book at least) is for generated code, where you are
+ * not maintaining it, but may wish to read and understand it. If you single step it, you know that input()
+ * hides some indirect calls, but is always refering to the input stream. This is
+ * probably more readable than ctx->input->istream->input(snarfle0->blarg) and allows me to rejig
+ * the runtime interfaces without changing the generated code too often, without
+ * confusing the reader of the generated output, who may not wish to know the gory
+ * details of the interface inheritence.
+ */
+<if(LEXER)>
+ 
+/* Macros for accessing things in a lexer
+ */
+#undef	    LEXER
+#undef	    RECOGNIZER		    
+#undef	    RULEMEMO		    
+#undef	    GETCHARINDEX
+#undef	    GETLINE
+#undef	    GETCHARPOSITIONINLINE
+#undef	    EMIT
+#undef	    EMITNEW
+#undef	    MATCHC
+#undef	    MATCHS
+#undef	    MATCHRANGE
+#undef	    LTOKEN
+#undef	    HASFAILED
+#undef	    FAILEDFLAG
+#undef	    INPUT
+#undef	    STRSTREAM
+#undef	    LA
+#undef	    HASEXCEPTION
+#undef	    EXCEPTION
+#undef	    CONSTRUCTEX
+#undef	    CONSUME
+#undef	    LRECOVER
+#undef	    MARK
+#undef	    REWIND
+#undef	    REWINDLAST
+#undef	    BACKTRACKING
+#undef		MATCHANY
+#undef		MEMOIZE
+#undef		HAVEPARSEDRULE
+#undef		GETTEXT
+#undef		INDEX
+#undef		SEEK
+#undef		PUSHSTREAM
+#undef		POPSTREAM
+#undef		SETTEXT
+#undef		SETTEXT8
+
+#define	    LEXER					ctx->pLexer
+#define	    RECOGNIZER			    LEXER->rec
+#define	    RULEMEMO				RECOGNIZER->ruleMemo
+#define	    GETCHARINDEX()			LEXER->getCharIndex(LEXER)
+#define	    GETLINE()				LEXER->getLine(LEXER)
+#define	    GETTEXT()				LEXER->getText(LEXER)
+#define	    GETCHARPOSITIONINLINE() LEXER->getCharPositionInLine(LEXER)
+#define	    EMIT()					LEXER->type = _type; LEXER->emit(LEXER)
+#define	    EMITNEW(t)				LEXER->emitNew(LEXER, t)
+#define	    MATCHC(c)				LEXER->matchc(LEXER, c)
+#define	    MATCHS(s)				LEXER->matchs(LEXER, s)
+#define	    MATCHRANGE(c1,c2)	    LEXER->matchRange(LEXER, c1, c2)
+#define	    MATCHANY()				LEXER->matchAny(LEXER)
+#define	    LTOKEN  				LEXER->token
+#define	    HASFAILED()				(RECOGNIZER->failed == ANTLR3_TRUE)
+#define	    BACKTRACKING			RECOGNIZER->backtracking
+#define	    FAILEDFLAG				RECOGNIZER->failed
+#define	    INPUT					LEXER->input
+#define	    STRSTREAM				INPUT
+#define		INDEX()					INPUT->istream->index(INPUT->istream)
+#define		SEEK(n)					INPUT->istream->seek(INPUT->istream, n)
+#define	    EOF_TOKEN				&(LEXER->tokSource->eofToken)
+#define	    HASEXCEPTION()			(RECOGNIZER->error == ANTLR3_TRUE)
+#define	    EXCEPTION				RECOGNIZER->exception
+#define	    CONSTRUCTEX()			RECOGNIZER->exConstruct(RECOGNIZER)
+#define	    LRECOVER()				LEXER->recover(LEXER)
+#define	    MARK()					INPUT->istream->mark(INPUT->istream)
+#define	    REWIND(m)				INPUT->istream->rewind(INPUT->istream, m)
+#define	    REWINDLAST()			INPUT->istream->rewindLast(INPUT->istream)
+#define		MEMOIZE(ri,si)			RECOGNIZER->memoize(RECOGNIZER, ri, si)
+#define		HAVEPARSEDRULE(r)		RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
+#define		PUSHSTREAM(str)			LEXER->pushCharStream(LEXER, str)
+#define		POPSTREAM()				LEXER->popCharStream(LEXER)
+#define		SETTEXT(str)			LEXER->text = str
+#define		USER1					LEXER->user1
+#define		USER2					LEXER->user2
+#define		USER3					LEXER->user3
+#define		CUSTOM					LEXER->custom
+
+/* If we have been told we can rely on the standard 8 bit or 16 bit input
+ * stream, then we can define our macros to use the direct pointers
+ * in the input object, which is much faster than indirect calls. This
+ * is really only significant to lexers wiht a lot of fragment rules (which
+ * do not place LA(1) in a temporary at the moment) and even then
+ * only if there is a lot of input (order of say 1M or so).
+ */
+#if	defined(ANTLR3_INLINE_INPUT_ASCII) || defined(ANTLR3_INLINE_INPUT_UTF16)
+
+# ifdef	ANTLR3_INLINE_INPUT_ASCII
+
+/* 8 bit "ASCII" (actually any 8 bit character set) */
+
+#  define	    NEXTCHAR			((pANTLR3_UINT8)(INPUT->nextChar))
+#  define	    DATAP				((pANTLR3_UINT8)(INPUT->data))
+
+# else
+
+#  define	    NEXTCHAR			((pANTLR3_UINT16)(INPUT->nextChar)) 
+#  define	    DATAP				((pANTLR3_UINT16)(INPUT->data))
+
+# endif
+
+# define	    LA(n) ((NEXTCHAR + n) > (DATAP + INPUT->sizeBuf) ? ANTLR3_CHARSTREAM_EOF : (ANTLR3_UCHAR)(*(NEXTCHAR + n - 1)))
+# define	    CONSUME()											\
+{																	\
+    if	(NEXTCHAR \< (DATAP + INPUT->sizeBuf))					\
+    {																\
+		INPUT->charPositionInLine++;								\
+		if  ((ANTLR3_UCHAR)(*NEXTCHAR) == INPUT->newlineChar)		\
+		{															\
+			INPUT->line++;										\
+			INPUT->charPositionInLine	= 0;						\
+			INPUT->currentLine		= (void *)(NEXTCHAR + 1);	\
+		}															\
+		INPUT->nextChar = (void *)(NEXTCHAR + 1);					\
+    }																\
+}
+
+#else
+
+// Pick up the input character by calling the input stream implementation.
+//
+#define	    CONSUME()				INPUT->istream->consume(INPUT->istream)
+#define	    LA(n)					INPUT->istream->_LA(INPUT->istream, n)
+
+#endif
+<endif>
+
+<if(PARSER)>
+/* Macros for accessing things in the parser
+ */
+ 
+#undef	    PARSER		    
+#undef	    RECOGNIZER		    
+#undef	    HAVEPARSEDRULE
+#undef		MEMOIZE
+#undef	    INPUT
+#undef	    STRSTREAM
+#undef	    HASEXCEPTION
+#undef	    EXCEPTION
+#undef	    MATCHT
+#undef	    MATCHANYT
+#undef	    FOLLOWSTACK
+#undef	    FOLLOWPUSH
+#undef	    FOLLOWPOP
+#undef	    PRECOVER
+#undef	    PREPORTERROR
+#undef	    LA
+#undef	    LT
+#undef	    CONSTRUCTEX
+#undef	    CONSUME
+#undef	    MARK
+#undef	    REWIND
+#undef	    REWINDLAST
+#undef	    PERRORRECOVERY
+#undef	    HASFAILED
+#undef	    FAILEDFLAG
+#undef	    RECOVERFROMMISMATCHEDSET
+#undef	    RECOVERFROMMISMATCHEDELEMENT
+#undef		INDEX
+#undef      ADAPTOR
+#undef		SEEK
+
+#define	    PARSER							ctx->pParser  
+#define	    RECOGNIZER						PARSER->rec
+#define	    HAVEPARSEDRULE(r)				RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
+#define		MEMOIZE(ri,si)					RECOGNIZER->memoize(RECOGNIZER, ri, si)
+#define	    INPUT							PARSER->tstream
+#define	    STRSTREAM						INPUT
+#define		INDEX()							INPUT->istream->index(INPUT->istream)
+#define	    HASEXCEPTION()					(RECOGNIZER->error == ANTLR3_TRUE)
+#define	    EXCEPTION						RECOGNIZER->exception
+#define	    MATCHT(t, fs)					RECOGNIZER->match(RECOGNIZER, t, fs)
+#define	    MATCHANYT()						RECOGNIZER->matchAny(RECOGNIZER)
+#define	    FOLLOWSTACK					    RECOGNIZER->following
+#define	    FOLLOWPUSH(x)					FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
+#define	    FOLLOWPOP()						FOLLOWSTACK->pop(FOLLOWSTACK)
+#define	    PRECOVER()						RECOGNIZER->recover(RECOGNIZER)
+#define	    PREPORTERROR()					RECOGNIZER->reportError(RECOGNIZER)
+#define	    LA(n)							INPUT->istream->_LA(INPUT->istream, n)
+#define	    LT(n)							INPUT->_LT(INPUT, n)
+#define	    CONSTRUCTEX()					RECOGNIZER->exConstruct(RECOGNIZER)
+#define	    CONSUME()						INPUT->istream->consume(INPUT->istream)
+#define	    MARK()							INPUT->istream->mark(INPUT->istream)
+#define	    REWIND(m)						INPUT->istream->rewind(INPUT->istream, m)
+#define	    REWINDLAST()					INPUT->istream->rewindLast(INPUT->istream)
+#define		SEEK(n)							INPUT->istream->seek(INPUT->istream, n)
+#define	    PERRORRECOVERY					RECOGNIZER->errorRecovery
+#define	    _fsp							RECOGNIZER->_fsp
+#define	    FAILEDFLAG						RECOGNIZER->failed
+#define	    HASFAILED()						(FAILEDFLAG == ANTLR3_TRUE)
+#define	    BACKTRACKING					RECOGNIZER->backtracking
+#define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
+#define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
+#define     ADAPTOR                         ctx->adaptor
+<endif>
+
+<if(TREE_PARSER)>
+/* Macros for accessing things in the parser
+ */
+ 
+#undef	    PARSER
+#undef	    RECOGNIZER		    
+#undef	    HAVEPARSEDRULE
+#undef	    INPUT
+#undef	    STRSTREAM
+#undef	    HASEXCEPTION
+#undef	    EXCEPTION
+#undef	    MATCHT
+#undef	    MATCHANYT
+#undef	    FOLLOWSTACK
+#undef	    FOLLOWPUSH
+#undef	    FOLLOWPOP
+#undef	    PRECOVER
+#undef	    PREPORTERROR
+#undef	    LA
+#undef	    LT
+#undef	    CONSTRUCTEX
+#undef	    CONSUME
+#undef	    MARK
+#undef	    REWIND
+#undef	    REWINDLAST
+#undef	    PERRORRECOVERY
+#undef	    HASFAILED
+#undef	    FAILEDFLAG
+#undef	    RECOVERFROMMISMATCHEDSET
+#undef	    RECOVERFROMMISMATCHEDELEMENT
+#undef	    BACKTRACKING
+#undef      ADAPTOR
+
+#define	    PARSER							ctx->pTreeParser  
+#define	    RECOGNIZER						PARSER->rec
+#define	    HAVEPARSEDRULE(r)				RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
+#define	    INPUT							PARSER->ctnstream
+#define	    STRSTREAM						INPUT->tnstream
+#define	    HASEXCEPTION()					(RECOGNIZER->error == ANTLR3_TRUE)
+#define	    EXCEPTION						RECOGNIZER->exception
+#define	    MATCHT(t, fs)					RECOGNIZER->match(RECOGNIZER, t, fs)
+#define	    MATCHANYT()						RECOGNIZER->matchAny(RECOGNIZER)
+#define	    FOLLOWSTACK					    RECOGNIZER->following
+#define	    FOLLOWPUSH(x)					FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
+#define	    FOLLOWPOP()						FOLLOWSTACK->pop(FOLLOWSTACK)
+#define	    PRECOVER()						RECOGNIZER->recover(RECOGNIZER)
+#define	    PREPORTERROR()					RECOGNIZER->reportError(RECOGNIZER)
+#define	    LA(n)							INPUT->tnstream->istream->_LA(INPUT->tnstream->istream, n)
+#define	    LT(n)							INPUT->tnstream->_LT(INPUT->tnstream, n)
+#define	    CONSTRUCTEX()					RECOGNIZER->exConstruct(RECOGNIZER)
+#define	    CONSUME()						INPUT->tnstream->istream->consume(INPUT->tnstream->istream)
+#define	    MARK()							INPUT->tnstream->istream->mark(INPUT->tnstream->istream)
+#define	    REWIND(m)						INPUT->tnstream->istream->rewind(INPUT->tnstream->istream, m)
+#define	    REWINDLAST(m)					INPUT->tnstream->istream->rewindLast(INPUT->tnstream->istream)
+#define	    PERRORRECOVERY					RECOGNIZER->errorRecovery
+#define	    _fsp							RECOGNIZER->_fsp
+#define	    FAILEDFLAG						RECOGNIZER->failed
+#define	    HASFAILED()						(FAILEDFLAG == ANTLR3_TRUE)
+#define	    BACKTRACKING					RECOGNIZER->backtracking
+#define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
+#define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
+#define     ADAPTOR                         INPUT->adaptor
+<endif>
+
+#define		TOKTEXT(tok, txt)				tok, (pANTLR3_UINT8)txt
+
+/* The 4 tokens defined below may well clash with your own #defines or token types. If so
+ * then for the present you must use different names for your defines as these are hard coded
+ * in the code generator. It would be better not to use such names internally, and maybe
+ * we can change this in a forthcoming release. I deliberately do not #undef these
+ * here as this will at least give you a redefined error somewhere if they clash.
+ */
+#define	    UP	    ANTLR3_TOKEN_UP
+#define	    DOWN    ANTLR3_TOKEN_DOWN
+#define	    EOR	    ANTLR3_TOKEN_EOR
+#define	    INVALID ANTLR3_TOKEN_INVALID
+
+
+/* =============================================================================
+ * Functions to create and destroy scopes. First come the rule scopes, followed
+ * by the global declared scopes.
+ */
+
+<rules: {r |<if(r.ruleDescriptor.ruleScope)>
+<ruleAttributeScopeFuncDecl(scope=r.ruleDescriptor.ruleScope)>
+<ruleAttributeScopeFuncs(scope=r.ruleDescriptor.ruleScope)>
+<endif>}>
+
+<recognizer.scopes:{<if(it.isDynamicGlobalScope)>
+<globalAttributeScopeFuncDecl(scope=it)>
+<globalAttributeScopeFuncs(scope=it)>
+<endif>}>
+
+/* ============================================================================= */
+
+/* =============================================================================
+ * Start of recognizer
+ */
+
+<recognizer>
+
+/* End of code
+ * =============================================================================
+ */
+
+>>
+headerFileExtension() ::= ".h"
+
+headerFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope, 
+            actions,
+            docComment, 
+            recognizer,
+            name, 
+            tokens, 
+            tokenNames, 
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewrite,
+            profile,
+            backtracking, 
+            synpreds, 
+            memoize, 
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            scopes,
+	    superClass,
+            trace,
+            literals
+        ) ::=
+<<
+<leadIn("C header")>
+<if(PARSER)>
+ * The parser <mainName()>
+<endif>
+<if(LEXER)>
+ * The lexer <mainName()>
+<endif>
+<if(TREE_PARSER)>
+ * The tree parser <mainName()>
+<endif>
+has the callable functions (rules) shown below,
+ * which will invoke the code for the associated rule in the source grammar
+ * assuming that the input stream is pointing to a token/text stream that could begin
+ * this rule.
+ * 
+ * For instance if you call the first (topmost) rule in a parser grammar, you will
+ * get the results of a full parse, but calling a rule half way through the grammar will
+ * allow you to pass part of a full token stream to the parser, such as for syntax checking
+ * in editors and so on.
+ *
+ * The parser entry points are called indirectly (by function pointer to function) via
+ * a parser context typedef p<name>, which is returned from a call to <name>New().
+ *
+<if(LEXER)>
+ * As this is a generated lexer, it is unlikely you will call it 'manually'. However
+ * the entry points are provided anyway.
+ *
+<endif>
+ * The entry points for <name> are  as follows:
+ *
+ * <rules: {r | <if(!r.ruleDescriptor.isSynPred)> - <headerReturnType(ruleDescriptor=r.ruleDescriptor,...)>      p<name>-><r.ruleDescriptor.name>(p<name>)<endif>}; separator="\n * ">
+ *
+ * The return type for any particular rule is of course determined by the source
+ * grammar file.
+ */
+#ifndef	_<name>_H
+#define _<name>_H
+<actions.(actionScope).preincludes>
+/* =============================================================================
+ * Standard antlr3 C runtime definitions
+ */
+#include    \<antlr3.h>
+
+/* End of standard antlr 3 runtime definitions
+ * =============================================================================
+ */
+ <actions.(actionScope).includes>
+<actions.(actionScope).header>
+
+#ifdef	WIN32
+// Disable: Unreferenced parameter,                - Rules with parameters that are not used
+//          constant conditional,                  - ANTLR realizes that a prediction is always true (synpred usually)
+//          initialized but unused variable        - tree rewrite vairables declared but not needed
+//          Unreferenced local variable            - lexer rulle decalres but does not always use _type
+//          potentially unitialized variable used  - retval always returned from a rule 
+//
+// These are only really displayed at warning level /W4 but that is the code ideal I am aiming at
+// and the codegen must generate some of these warnings by necessity, apart from 4100, which is
+// usually generated when a parser rule is given a parameter that it does not use. Mostly though
+// this is a matter of orthogonality hence I disable that one.
+//
+#pragma warning( disable : 4100 )
+#pragma warning( disable : 4101 )
+#pragma warning( disable : 4127 )
+#pragma warning( disable : 4189 )
+#pragma warning( disable : 4701 )
+#endif
+<if(backtracking)>
+
+/* ========================
+ * BACKTRACKING IS ENABLED
+ * ========================
+ */
+<endif>
+
+<rules:{r |<headerReturnScope(ruleDescriptor=r.ruleDescriptor,...)>}>
+
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(scope=it)><endif>}>
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(scope=it)><endif>}>
+<rules:{r |<ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)>}>
+<rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}>
+
+
+/** Context tracking structure for <mainName()>
+ */
+typedef struct <name>_Ctx_struct
+{
+    /** Built in ANTLR3 context tracker contains all the generic elements
+     *  required for context tracking.
+     */
+<if(PARSER)>
+    pANTLR3_PARSER   pParser;
+<endif>
+<if(LEXER)>
+    pANTLR3_LEXER    pLexer;
+<endif>
+<if(TREE_PARSER)>
+    pANTLR3_TREE_PARSER	    pTreeParser;
+<endif>
+<scopes:{<if(it.isDynamicGlobalScope)>
+    <globalAttributeScopeDef(scope=it)>
+<endif>}; separator="\n\n">
+<rules: {r |<if(r.ruleDescriptor.ruleScope)>
+    <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)>
+<endif>}>
+
+<if(LEXER)>
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*m<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);<endif>}; separator="\n";>
+<endif>
+<if(PARSER)>
+    <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
+<endif>
+<if(TREE_PARSER)>
+    <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
+<endif>
+    unsigned char * (*getGrammarFileName)();
+    void	    (*free)   (struct <name>_Ctx_struct * ctx);
+    <@members>
+    <@end>
+    <actions.(actionScope).context>
+}
+    <name>, * p<name>;
+
+<if(LEXER)>
+/* Function protoypes for the lexer functions that external translation units
+ * may wish to call.
+ */
+ANTLR3_API p<name> <name>New         (pANTLR3_INPUT_STREAM     instream);
+<endif>
+<if(PARSER)>
+/* Function protoypes for the parser functions that external translation units
+ * may wish to call.
+ */
+ANTLR3_API p<name> <name>New         (pANTLR3_COMMON_TOKEN_STREAM     instream);
+<endif>
+<if(TREE_PARSER)>
+/* Function protoypes for the treeparser functions that external translation units
+ * may wish to call.
+ */
+ANTLR3_API p<name> <name>New         (pANTLR3_COMMON_TREE_NODE_STREAM     instream);
+<endif>
+/** Symbolic definitions of all the tokens that the <grammarType()> will work with.
+ * \{
+ *
+ * Antlr will define EOF, but we can't use that as it it is too common in
+ * in C header files and that would be confusing. There is no way to filter this out at the moment
+ * so we just undef it here for now. That isn't the value we get back from C recognizers
+ * anyway. We are looking for ANTLR3_TOKEN_EOF.
+ */
+#ifdef	EOF
+#undef	EOF
+#endif
+#ifdef	Tokens
+#undef	Tokens
+#endif 
+<tokens:{#define <it.name>      <it.type>}; separator="\n">
+#ifdef	EOF
+#undef	EOF
+#define	EOF	ANTLR3_TOKEN_EOF
+#endif
+
+/* End of token definitions for <name>
+ * =============================================================================
+ */
+/** \} */
+
+#endif
+/* END - Note:Keep extra linefeed to satisfy UNIX systems */
+
+>>
+
+grammarType() ::= <<
+<if(PARSER)>
+parser
+<endif>
+<if(LEXER)>
+lexer
+<endif>
+<if(TREE_PARSER)>
+tree parser
+<endif>
+>>
+
+mainName() ::= <<
+<if(PARSER)>
+<name>
+<endif>
+<if(LEXER)>
+<name>
+<endif>
+<if(TREE_PARSER)>
+<name>
+<endif>
+>>
+
+headerReturnScope(ruleDescriptor) ::= "<returnScope(...)>"
+
+headerReturnType(ruleDescriptor) ::= "<returnType()>"
+
+// Produce the lexer output
+//
+lexer(  grammar,
+		name,
+        tokens,
+        scopes,
+        rules, 
+        numRules, 
+        labelType="pANTLR3_COMMON_TOKEN",
+        filterMode) ::= <<
+
+<if(filterMode)>
+/* Forward declare implementation function for ANTLR3_TOKEN_SOURCE interface when
+ * this is a fliter mode lexer.
+ */
+static pANTLR3_COMMON_TOKEN <name>NextToken   (pANTLR3_TOKEN_SOURCE toksource);
+
+/* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering
+ * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather
+ * than just BACKTRACKING. IN some cases this might generate code akin to:
+ *   if (BACKTRACKING) if (BACKTRACKING > 1) memoize.
+ * However, I assume that the C compilers/optimizers are smart enough to work this one out
+ * these days - Jim
+ */
+#undef		MEMOIZE
+#define		MEMOIZE(ri,si)			if (BACKTRACKING>1) { RECOGNIZER->memoize(RECOGNIZER, ri, si) }
+#undef		HAVEPARSEDRULE
+#define		HAVEPARSEDRULE(r)		if (BACKTRACKING>1) { RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) }
+<endif>
+
+/* Forward declare the locally static matching functions we have generated and any predicate functions.
+ */
+<rules:{r | static ANTLR3_INLINE <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<if(!r.ruleDescriptor.isSynPred)>m<endif><r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
+static void	<name>Free(p<name> ctx);
+
+/* =========================================================================
+ * Lexer matching rules end.
+ * =========================================================================
+ */
+
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+
+<actions.lexer.members>
+
+static void
+<name>Free  (p<name> ctx)
+{
+<if(memoize)>
+    RULEMEMO->free(RULEMEMO);
+<endif>
+    LEXER->free(LEXER);
+    
+    ANTLR3_FREE(ctx);
+}
+
+/** \brief Name of the gramar file that generated this code
+ */
+static unsigned char fileName[] = "<fileName>";
+
+/** \brief Return the name of the grammar file that generated this code.
+ */
+static unsigned char * getGrammarFileName()
+{
+	return fileName;
+}
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+
+/** \brief Create a new lexer called <name>
+ *
+ * \param[in] instream Pointer to an initialized input stream
+ *
+ * \return 
+ *     - Success p<name> initialized for the lex start
+ *     - Fail (p<name>)(ANTLR3_ERR_NOMEM)
+ */
+ANTLR3_API p<name> <name>New         (pANTLR3_INPUT_STREAM     instream)
+{
+    p<name> lexCtx; /* Context structure we will build and return   */
+
+    lexCtx = (p<name>) ANTLR3_MALLOC(sizeof(<name>));
+
+    if  (lexCtx == NULL)
+    {
+        /* Failed to allocate memory for lexer context */
+        return  (p<name>)ANTLR3_ERR_NOMEM;
+    }
+
+    /* -------------------------------------------------------------------
+     * Memory for basic structure is allocated, now to fill in
+     * in base ANTLR3 structures. We intialize the function pointers
+     * for the standard ANTLR3 lexer function set, but upon return
+     * from here, the programmer may set the pointers to provide custom
+     * implementations of each function. 
+     *
+     * We don't use the macros defined in <name>.h here so you can get a sense
+     * of what goes where.
+     */
+    
+    /* Create a base lexer, using the supplied input stream
+     */
+    lexCtx->pLexer	= antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream);
+
+    /* Check that we allocated the memory correctly
+     */
+    if	(lexCtx->pLexer == (pANTLR3_LEXER)ANTLR3_ERR_NOMEM)
+    {
+	ANTLR3_FREE(lexCtx);
+	return  (p<name>)ANTLR3_ERR_NOMEM;
+    }
+<if(memoize)>
+    /* Create a LIST for recording rule memos.
+     */
+    lexCtx->pLexer->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
+<endif>
+
+    /* Install the implementation of our <name> interface
+     */
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)>lexCtx->m<r.ruleDescriptor.name>	= m<r.ruleDescriptor.name>;<endif>}; separator="\n";>
+    
+    /** When the nextToken() call is made to this lexer's pANTLR3_TOKEN_SOURCE
+     *  it will call mTokens() in this generated code, and will pass it the ctx
+     * pointer of this lexer, not the context of the base lexer, so store that now.
+     */
+    lexCtx->pLexer->ctx	    = lexCtx;
+    
+    /** Install the token matching function
+     */
+    lexCtx->pLexer->mTokens = (void (*) (void *))(mTokens);
+    
+    lexCtx->getGrammarFileName	= getGrammarFileName;
+    lexCtx->free		= <name>Free;
+    
+<if(filterMode)>
+    /* We have filter mode turned on, so install the filtering nextToken function
+     */
+    lexCtx->pLexer->tokSource->nextToken = <name>NextToken;
+<endif>
+
+	 <actions.lexer.apifuncs>
+	 
+    /* Return the newly built lexer to the caller
+     */
+    return  lexCtx;
+}
+<if(cyclicDFAs)>
+
+/* =========================================================================
+ * DFA tables for the lexer
+ */
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+/* =========================================================================
+ * End of DFA tables for the lexer
+ */
+<endif> 
+
+/* =========================================================================
+ * Functions to match the lexer grammar defined tokens from the input stream
+ */
+
+<rules; separator="\n\n">
+
+/* =========================================================================
+ * Lexer matching rules end.
+ * =========================================================================
+ */
+<if(synpreds)>
+
+/* =========================================================================
+ * Lexer syntactic predicates
+ */
+<synpreds:{p | <lexerSynpred(predname=p)>}>
+/* =========================================================================
+ * Lexer syntactic predicates end.
+ * =========================================================================
+ */
+<endif>
+
+/* End of Lexer code
+ * ================================================
+ * ================================================
+ */ 
+
+>>
+
+
+filteringNextToken() ::= <<
+/** An override of the lexer's nextToken() method that backtracks over mTokens() looking
+ *  for matches in lexer filterMode.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  BACKTRACKING needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at BACKTRACKING==1.
+ */
+static pANTLR3_COMMON_TOKEN 
+<name>NextToken(pANTLR3_TOKEN_SOURCE toksource) 
+{
+    pANTLR3_LEXER   lexer;
+
+    lexer   = (pANTLR3_LEXER)(toksource->super);
+    
+    /* Get rid of any previous token (token factory takes care of
+     * any deallocation when this token is finally used up.
+     */
+    lexer		->token	    = NULL;
+    lexer->rec	->error	    = ANTLR3_FALSE;	    /* Start out without an exception	*/
+    lexer->rec	->failed    = ANTLR3_FALSE;
+
+    /* Record the start of the token in our input stream.
+     */
+    lexer->tokenStartCharIndex			= lexer->input->istream->index(lexer->input->istream);
+    lexer->tokenStartCharPositionInLine	= lexer->input->getCharPositionInLine(lexer->input);
+    lexer->tokenStartLine				= lexer->input->getLine(lexer->input);
+    lexer->text							= NULL;
+
+    /* Now call the matching rules and see if we can generate a new token
+     */
+    for	(;;)
+    {
+		if  (lexer->input->istream->_LA(lexer->input->istream, 1) == ANTLR3_CHARSTREAM_EOF)
+		{
+			/* Reached the end of the stream, nothing more to do.
+			 */
+			pANTLR3_COMMON_TOKEN    teof = &(toksource->eofToken);
+
+			teof->setStartIndex (teof, lexer->getCharIndex(lexer));
+			teof->setStopIndex  (teof, lexer->getCharIndex(lexer));
+			teof->setLine		(teof, lexer->getLine(lexer));
+			return  teof;
+		}
+		
+		lexer->token		= NULL;
+		lexer->rec->error	= ANTLR3_FALSE;	    /* Start out without an exception	*/
+		
+		{
+			ANTLR3_UINT64   m;
+		    
+			m							= lexer->input->istream->mark(lexer->input->istream);
+			lexer->rec->backtracking	= 1;				/* No exceptions */
+			lexer->rec->failed			= ANTLR3_FALSE;
+		 
+			/* Call the generated lexer, see if it can get a new token together.
+			 */
+			lexer->mTokens(lexer->ctx);   
+    		lexer->rec->backtracking	= 0;
+	    	    
+    		<! mTokens backtracks with synpred at BACKTRACKING==2
+				and we set the synpredgate to allow actions at level 1. !>
+	               
+			if	(lexer->rec->failed == ANTLR3_TRUE)
+			{
+				lexer->input->istream->rewind(lexer->input->istream, m);
+				lexer->input->istream->consume(lexer->input->istream); <! advance one char and try again !>
+			}
+			else
+			{
+				lexer->emit(lexer);					/* Assemble the token and emit it to the stream */
+				return	lexer->token;
+			}	
+		}
+    }
+}
+>>
+
+filteringActionGate() ::= "BACKTRACKING==1"
+
+/** How to generate a parser */
+genericParser(  grammar,
+				name, 
+                scopes, 
+                tokens, 
+                tokenNames, 
+                rules, 
+                numRules,
+                bitsets,
+                inputStreamType,
+                superClass,
+                ASTLabelType="pANTLR3_BASE_TREE",
+                labelType,
+				members
+              ) ::= <<
+
+/** \brief Table of all token names in symbolic order, mainly used for
+ *         error reporting.
+ */
+static pANTLR3_UINT8   <name>TokenNames[]
+     = {
+        (pANTLR3_UINT8) "\<invalid>",       /* String to print to indicate an invalid token */
+        (pANTLR3_UINT8) "\<EOR>",
+        (pANTLR3_UINT8) "\<DOWN>", 
+        (pANTLR3_UINT8) "\<UP>", 
+        <tokenNames:{(pANTLR3_UINT8) <it>}; separator=",\n">
+       };
+
+    <@members>
+
+    <@end>
+
+/* Forward declare the locally static matching functions we have generated.
+ */
+<rules:{r | static <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
+static void	<name>Free(p<name> ctx);
+
+/* Function to initialize bitset APIs
+ */
+static	void <name>LoadFollowSets();
+
+/* For use in tree output where we are accumulating rule labels via label += ruleRef
+ * we need a function that knows how to free a return scope when the list is destroyed. 
+ * We cannot just use ANTLR3_FREE because in debug tracking mode, this is a macro.
+ */
+static	void ANTLR3_CDECL freeScope(void * scope)
+{
+    ANTLR3_FREE(scope);
+}
+
+/** \brief Name of the gramar file that generated this code
+ */
+static unsigned char fileName[] = "<fileName>";
+
+/** \brief Return the name of the grammar file that generated this code.
+ */
+static unsigned char * getGrammarFileName()
+{
+	return fileName;
+}
+/** \brief Create a new <name> parser and retrun a context for it.
+ *
+ * \param[in] instream Pointer to an input stream interface.
+ *
+ * \return Pointer to new parser context upon success.
+ */
+ANTLR3_API p<name>
+<name>New   (<inputStreamType> instream)
+{
+    p<name> ctx;	    /* Context structure we will build and return   */
+    
+    ctx	= (p<name>) ANTLR3_MALLOC(sizeof(<name>));
+    
+    if	(ctx == NULL)
+    {
+	/* Failed to allocate memory for parser context */
+        return  (p<name>)ANTLR3_ERR_NOMEM;
+    }
+    
+    /* -------------------------------------------------------------------
+     * Memory for basic structure is allocated, now to fill in
+     * the base ANTLR3 structures. We intialize the function pointers
+     * for the standard ANTLR3 parser function set, but upon return
+     * from here, the programmer may set the pointers to provide custom
+     * implementations of each function. 
+     *
+     * We don't use the macros defined in <name>.h here, in order that you can get a sense
+     * of what goes where.
+     */
+
+<if(PARSER)>
+    /* Create a base parser/recognizer, using the supplied token stream
+     */
+    ctx->pParser	    = antlr3ParserNewStream(ANTLR3_SIZE_HINT, instream->tstream);
+<endif>
+<if(TREE_PARSER)>
+    /* Create a base Tree parser/recognizer, using the supplied tree node stream
+     */
+    ctx->pTreeParser		= antlr3TreeParserNewStream(ANTLR3_SIZE_HINT, instream);
+<endif>
+
+    /* Install the implementation of our <name> interface
+     */
+    <rules:{r | ctx-><r.ruleDescriptor.name>	= <r.ruleDescriptor.name>;}; separator="\n";>
+
+    ctx->free			= <name>Free;
+    ctx->getGrammarFileName	= getGrammarFileName;
+    
+    /* Install the scope pushing methods.
+     */
+    <rules: {r |<if(r.ruleDescriptor.ruleScope)>
+<ruleAttributeScope(scope=r.ruleDescriptor.ruleScope)><\n>
+<endif>}>
+    <recognizer.scopes:{<if(it.isDynamicGlobalScope)>
+<globalAttributeScope(scope=it)><\n>
+<endif>}>
+    <@apifuncs>
+
+    <@end>
+    
+    <actions.parser.apifuncs>
+    <actions.treeparser.apifuncs>
+<if(memoize)>
+    /* Create a LIST for recording rule memos.
+     */
+<if(TREE_PARSER)>
+    ctx->pTreeParser->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
+<else>
+    ctx->pParser->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
+<endif>
+<endif>	
+    /* Install the token table
+     */
+    RECOGNIZER->tokenNames   = <name>TokenNames;
+    
+    /* Initialize the follow bit sets
+     */
+    <name>LoadFollowSets();
+    
+    /* Return the newly built parser to the caller
+     */
+    return  ctx;
+}
+
+/** Free the parser resources
+ */
+ static void
+ <name>Free(p<name> ctx)
+ {
+    /* Free any scope memory
+     */
+    <rules: {r |<if(r.ruleDescriptor.ruleScope)><ruleAttributeScopeFree(scope=r.ruleDescriptor.ruleScope)><\n><endif>}>
+    <recognizer.scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeFree(scope=it)><\n><endif>}>
+    
+    <@cleanup>
+    <@end>
+<if(TREE_PARSER)>
+    ctx->pTreeParser->free(ctx->pTreeParser);<\n>
+<else>
+    ctx->pParser->free(ctx->pParser);<\n>
+<endif>
+    ANTLR3_FREE(ctx);
+
+    /* Everything is released, so we can return
+     */
+    return;
+ }
+ 
+/** Return token names used by this <grammarType()>
+ *
+ * The returned pointer is used as an index into the token names table (using the token 
+ * number as the index).
+ * 
+ * \return Pointer to first char * in the table.
+ */
+static pANTLR3_UINT8    *getTokenNames() 
+{
+        return <name>TokenNames; 
+}
+
+    <members>
+    
+/* Declare the bitsets
+ */
+<bitsets:bitsetDeclare(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+     
+/** Load up the static bitsets for following set for error recovery.
+ *  \remark
+ *  These are static after the parser is generated, hence they are static
+ *  delcarations in the parser and are thread safe after initialization.
+ */
+static
+void <name>LoadFollowSets()
+{
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+    return;
+}
+
+<if(cyclicDFAs)>
+
+/* =========================================================================
+ * DFA tables for the parser
+ */
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+/* =========================================================================
+ * End of DFA tables for the parser
+ */
+<endif> 
+ 
+/* ==============================================
+ * Parsing rules
+ */
+<rules; separator="\n\n">
+/* End of parsing rules
+ * ==============================================
+ */
+
+/* ==============================================
+ * Syntactic predicates
+ */
+<synpreds:{p | <synpred(predname=p)>}>
+/* End of syntactic predicates
+ * ==============================================
+ */
+
+ 
+ 
+
+
+>>
+
+parser(	grammar, 
+		name, 
+		scopes, 
+		tokens, 
+		tokenNames, 
+		rules, 
+		numRules,
+		bitsets, 
+		ASTLabelType, 
+		superClass="Parser", 
+		labelType="pANTLR3_COMMON_TOKEN", 
+		members={<actions.parser.members>}
+		) ::= <<
+<genericParser(inputStreamType="pANTLR3_COMMON_TOKEN_STREAM", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(	grammar, 
+			name, 
+			scopes, 
+			tokens, 
+			tokenNames, 
+			globalAction, 
+			rules, 
+			numRules, 
+			bitsets, 
+			labelType={<ASTLabelType>}, 
+			ASTLabelType="pANTLR3_BASE_TREE", 
+			superClass="TreeParser", 
+			members={<actions.treeparser.members>}
+			) ::= <<
+<genericParser(inputStreamType="pANTLR3_COMMON_TREE_NODE_STREAM", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+static void <ruleName>_fragment(p<name> ctx <ruleDescriptor.parameterScope:parameterScope(scope=it)>) 
+{   
+<if(trace)>
+    printf("enter <ruleName> %d failed = %d, backtracking = %d\\n",input.LT(1),failed,BACKTRACKING);
+    <block>
+    printf("exit <ruleName> %d, failed = %d, backtracking = %d\\n",input.LT(1),failed,BACKTRACKING);
+    
+<else>
+    <block>
+<endif>
+<ruleCleanUp()>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(predname) ::= <<
+static ANTLR3_BOOLEAN <predname>(p<name> ctx) 
+{
+    ANTLR3_UINT64   start;
+    ANTLR3_BOOLEAN  success;
+
+    BACKTRACKING++;
+    <@start()>
+    start	= MARK();
+    <predname>_fragment(ctx);	    // can never throw exception
+    success	= !(FAILEDFLAG);
+    REWIND(start);
+    <@stop()>
+    BACKTRACKING--;
+    FAILEDFLAG	= ANTLR3_FALSE;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(predname) ::= <<
+<synpred(predname)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( (BACKTRACKING>0) && (HAVEPARSEDRULE(<ruleDescriptor.index>)) )
+{
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!ruleDescriptor.isSynPred)>
+	retval.start = 0;
+	<scopeClean()><\n>
+<endif>
+<endif>
+    return <ruleReturnValue()>; 
+}
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>
+if (HASFAILED())
+{
+    <scopeClean()>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if (BACKTRACKING>0)
+{
+    FAILEDFLAG = <true()>; 
+    <scopeClean()>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+/** 
+ * $ANTLR start <ruleName>
+ * <fileName>:<description>
+ */
+static <returnType()>
+<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope(scope=it)>)
+{   
+    <if(trace)>printf("enter <ruleName> %s failed=%d, backtracking=%d\n", LT(1), BACKTRACKING);<endif>
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.declarations>
+    <ruleLabelDefs()>
+    <ruleInitializations()>
+    <ruleDescriptor.actions.init>
+    <ruleMemoization(name=ruleName)>
+    <ruleLabelInitializations()>
+    <@preamble()>
+    {
+        <block>
+    }
+    
+    <ruleCleanUp()>
+<if(exceptions)>
+    if	(HASEXCEPTION())
+    {
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+    }
+    else
+    {
+	<(ruleDescriptor.actions.after):execAction()>
+    }
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    if (HASEXCEPTION())
+    {
+        PREPORTERROR();
+        PRECOVER();
+    }
+<if(ruleDescriptor.actions.after)>
+    else
+    {
+	<(ruleDescriptor.actions.after):execAction()>
+    }<\n>
+<endif>
+<endif>
+<endif>
+<endif>
+    <if(trace)>System.out.println("exit <ruleName> "+LT(1)+" failed="+failed+" backtracking="+BACKTRACKING);<endif>
+    <memoize()>
+    <finally>
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+/* $ANTLR end <ruleName> */
+>>
+
+catch(decl,action) ::= <<
+/* catch(decl,action)
+ */
+if  ((HASEXCEPTION()) && (EXCEPTION->type == <e.decl>) )
+{
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval;<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+ANTLR3_UINT64 <ruleDescriptor.name>_StartIndex;
+<endif>
+>>
+
+ruleInitializations() ::= <<
+/* Initialize rule variables
+ */
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex = INDEX();<\n>
+<endif>
+<ruleDescriptor.useScopes:{<scopeTop(sname=it)> = <scopePush(sname=it)>;}; separator="\n">
+<ruleDescriptor.ruleScope:{<scopeTop(sname=it.name)> = <scopePush(sname=it.name)>;}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<labelType>    <it.label.text>;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{pANTLR3_VECTOR    list_<it.label.text>;}; separator="\n"
+>
+<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
+    :ruleLabelDef(label=it); separator="\n"
+>
+>>
+
+ruleLabelInitializations() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<it.label.text>       = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text>     = NULL;}; separator="\n"
+>
+<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
+    :ruleLabelInitVal(label=it); separator="\n"
+>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!ruleDescriptor.isSynPred)>
+retval.start = LT(1);<\n>
+<endif>
+<endif>
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text>;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{ANTLR3_UINT32 <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{pANTLR3_INT_TRIE list_<it.label.text>;}; separator="\n"
+>
+>>
+
+lexerRuleLabelInit() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text> = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text> = antlr3IntTrieNew(31);}; separator="\n"
+>
+>>
+
+lexerRuleLabelFree() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text> = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text>->free(list_<it.label.text>);}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( BACKTRACKING>0 ) { MEMOIZE(<ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+
+// This is where rules clean up and exit
+//
+goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */
+rule<ruleDescriptor.name>Ex: ;
+<scopeClean()>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+<if(!ruleDescriptor.isSynPred)>
+retval.stop = LT(-1);<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+scopeClean() ::= <<
+<ruleDescriptor.useScopes:{<scopePop(sname=it)>}; separator="\n">
+<ruleDescriptor.ruleScope:{<scopePop(sname=it.name)>}; separator="\n">
+
+>>
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules, which do not produce tokens.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+//   Comes from: <block.description>
+/** \brief Lexer rule generated by ANTLR3
+ *
+ * $ANTLR start <ruleName>
+ *
+ * Looks to match the characters the constitute the token <ruleName>
+ * from the attached input stream.
+ *
+ *
+ * \remark
+ *  - lexer->error == ANTLR3_TRUE if an exception was thrown.
+ */
+static ANTLR3_INLINE
+void m<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope(scope=it)>)
+{
+	ANTLR3_UINT32	_type;
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.declarations>
+    <lexerRuleLabelDefs()>
+    <if(trace)>System.out.println("enter <ruleName> '"+(char)LA(1)+"' line="+GETLINE()+":"+GETCHARPOSITIONINLINE()+" failed="+failed+" backtracking="+BACKTRACKING);<endif>
+
+<if(nakedBlock)>
+    <ruleMemoization(name=ruleName)>
+    <lexerRuleLabelInit()>
+    <ruleDescriptor.actions.init>
+        
+    <block><\n>
+<else>
+    <ruleMemoization(name=ruleName)>   
+    <lexerRuleLabelInit()>
+    _type	    = <ruleName>;
+       
+    <ruleDescriptor.actions.init>
+    
+    <block>
+	LEXER->type = _type;
+<endif>
+    <if(trace)> fprintf(stderr, "exit <ruleName> '%c' line=%d:%d failed = %d, backtracking =%d\n",LA(1),GETLINE(),GETCHARPOSITIONINLINE(),failed,BACKTRACKING);<endif>
+    <ruleCleanUp()>
+    <lexerRuleLabelFree()>
+    <(ruleDescriptor.actions.after):execAction()>
+    <memoize>
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+/** This is the entry point in to the lexer from an object that
+ *  wants to generate the next token, such as a pCOMMON_TOKEN_STREAM
+ */
+static void 
+mTokens(p<name> ctx)
+{
+    <block><\n>
+    
+    goto ruleTokensEx; /* Prevent compiler warnings */
+ruleTokensEx: ;
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+
+// <fileName>:<description>
+{
+    int alt<decisionNumber>=<maxAlt>;
+    <decls>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    <@prebranch()>
+    switch (alt<decisionNumber>) 
+    {
+	<alts:altSwitchCase()>
+    }
+    <@postbranch()>
+}
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+{
+    //  <fileName>:<description>
+    
+    ANTLR3_UINT32 alt<decisionNumber>;
+
+    alt<decisionNumber>=<maxAlt>;
+
+    <decls>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+    {
+	<alts:altSwitchCase()>
+    }
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+{
+    int cnt<decisionNumber>=0;
+    <decls>
+    <@preloop()>
+
+    for (;;)
+    {
+        int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>) 
+	{
+	    <alts:altSwitchCase()>
+	    default:
+	    
+		if ( cnt<decisionNumber> >= 1 )
+		{
+		    goto loop<decisionNumber>;
+		}
+		<ruleBacktrackFailure()>
+		<earlyExitEx()>
+		<@earlyExitException()>
+		goto rule<ruleDescriptor.name>Ex;
+	}
+	cnt<decisionNumber>++;
+    }
+    loop<decisionNumber>: ;	/* Jump to here if this rule does not match */
+    <@postloop()>
+}
+>>
+
+earlyExitEx() ::= <<
+/* mismatchedSetEx()
+ */
+CONSTRUCTEX();
+EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION;
+EXCEPTION->name = ANTLR3_EARLY_EXIT_NAME;
+<\n>
+>>
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+
+// <fileName>:<description>
+<decls>
+
+<@preloop()>
+for (;;)
+{
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+    {
+	<alts:altSwitchCase()>
+	default:
+	    goto loop<decisionNumber>;	/* break out of the loop */
+	    break;
+    }
+}
+loop<decisionNumber>: ; /* Jump out to here if this rule does not match */
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by antlr before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i>:
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+// <fileName>:<description>
+{
+    <@declarations()>
+    <@initializations()>
+    <elements:element()>
+    <@cleanup()>
+}
+>>
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex) ::= <<
+<if(label)>
+<label> = (<labelType>)LT(1);<\n>
+<endif>
+MATCHT(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>); 
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> == NULL)
+{
+    list_<label>=ctx->vectors->newVector(ctx->vectors);
+}
+list_<label>->add(list_<label>, <elem>, NULL);
+>>
+
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = LA(1);<\n>
+<endif>
+MATCHC(<char>); 
+<checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = LA(1);<\n>
+<endif>
+MATCHRANGE(<a>, <b>); 
+<checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= LA(1);<\n>
+<else>
+<label>=(<labelType>)LT(1);<\n>
+<endif>
+<endif>
+if ( <s> )
+{
+    CONSUME();
+    <postmatchCode>
+<if(!LEXER)>
+    PERRORRECOVERY=ANTLR3_FALSE;
+<endif>
+    <if(backtracking)>FAILEDFLAG=ANTLR3_FALSE;<\n><endif>
+}
+else 
+{
+    <ruleBacktrackFailure()>
+    <mismatchedSetEx()>
+    <@mismatchedSetException()>
+<if(LEXER)>
+    LRECOVER();
+<else>
+    RECOVERFROMMISMATCHEDSET(&FOLLOW_set_in_<ruleName><elementIndex>);
+<endif>
+    goto rule<ruleDescriptor.name>Ex;
+}<\n>
+>>
+
+mismatchedSetEx() ::= <<
+CONSTRUCTEX();
+EXCEPTION->type         = ANTLR3_MISMATCHED_SET_EXCEPTION;
+EXCEPTION->name         = ANTLR3_MISMATCHED_SET_NAME;
+<if(PARSER)>
+EXCEPTION->expectingSet = &FOLLOW_set_in_<ruleName><elementIndex>;
+<endif>
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int <label>Start = GETCHARINDEX();
+MATCHS(<string>); 
+<checkRuleBacktrackFailure()>
+<labelType> <label> = LEXER->tokFactory->newToken(LEXER->tokFactory);
+<label>->setType(<label>, ANTLR3_TOKEN_INVALID);
+<label>->setStartIndex(<label>, <label>Start);
+<label>->setStopIndex(<label>, GETCHARINDEX()-1);
+<label>->input = INPUT->tnstream->istream;
+<else>
+MATCHS(<string>); 
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label>=(<labelType>)LT(1);<\n>
+<endif>
+MATCHANYT(); 
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = LA(1);<\n>
+<endif>
+MATCHANY(); 
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args) ::= <<
+FOLLOWPUSH(FOLLOW_<rule>_in_<ruleName><elementIndex>);
+<if(label)>
+<label>=<rule>(ctx<if(args)>, <args; separator=", "><endif>);<\n>
+<else>
+<rule>(ctx<if(args)>, <args; separator=", "><endif>);<\n>
+<endif>
+FOLLOWPOP();
+if  (HASEXCEPTION())
+{
+    goto rule<ruleDescriptor.name>Ex;
+}
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex) ::= <<
+/* <description> */
+<if(label)>
+{
+    ANTLR3_UINT64 <label>Start<elementIndex> = GETCHARINDEX();
+    m<rule>(ctx <if(args)>, <endif><args; separator=", ">); 
+    <checkRuleBacktrackFailure()>
+    <label> = LEXER->tokFactory->newToken(LEXER->tokFactory);
+    <label>->setType(<label>, ANTLR3_TOKEN_INVALID);
+    <label>->setStartIndex(<label>, <label>Start<elementIndex>);
+    <label>->setStopIndex(<label>, GETCHARINDEX()-1);
+    <label>->input = INPUT;
+}
+<else>
+m<rule>(ctx <if(args)>, <endif><args; separator=", ">); 
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+{
+    ANTLR3_UINT64 <label>Start<elementIndex>;
+    <labelType> <label>;
+    <label>Start<elementIndex> = GETCHARINDEX();
+    MATCHC(ANTLR3_CHARSTREAM_EOF); 
+    <checkRuleBacktrackFailure()>
+    <label> = LEXER->tokFactory->newToken(LEXER->tokFactory);
+    <label>->setType(<label>, ANTLR3_TOKEN_EOF);
+    <label>->setStartIndex(<label>, <label>Start<elementIndex>);
+    <label>->setStopIndex(<label>, GETCHARINDEX()-1);
+    <label>->input = INPUT->tnstream->istream;
+}
+<else>
+    MATCHC(ANTLR3_CHARSTREAM_EOF); 
+    <checkRuleBacktrackFailure()>
+    <endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( LA(1)==ANTLR3_TOKEN_DOWN ) {
+    MATCHT(ANTLR3_TOKEN_DOWN, NULL); 
+    <checkRuleBacktrackFailure()>
+    <children:element()>
+    MATCHT(ANTLR3_TOKEN_UP, NULL); 
+    <checkRuleBacktrackFailure()>
+}
+<else>
+MATCHT(ANTLR3_TOKEN_DOWN, NULL); 
+<checkRuleBacktrackFailure()>
+<children:element()>
+MATCHT(ANTLR3_TOKEN_UP, NULL); 
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) 
+{
+    <ruleBacktrackFailure()>
+    <newFPE(...)>
+}
+>>
+
+newFPE() ::= <<
+    CONSTRUCTEX();
+    EXCEPTION->type         = ANTLR3_FAILED_PREDICATE_EXCEPTION;
+    EXCEPTION->message      = "<description>";
+    EXCEPTION->ruleName	 = "<ruleName>";
+    <\n>
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+
+{
+    int LA<decisionNumber>_<stateNumber> = LA(<k>);
+    <edges; separator="\nelse ">
+    else 
+    {
+<if(eotPredictsAlt)>
+        alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+        <ruleBacktrackFailure()>
+    
+        <newNVException()>    
+        goto rule<ruleDescriptor.name>Ex;
+
+<endif>
+    }
+}
+>>
+
+newNVException() ::= <<
+CONSTRUCTEX();
+EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
+EXCEPTION->message      = "<description>";
+EXCEPTION->decisionNum  = <decisionNumber>;
+EXCEPTION->state        = <stateNumber>;
+<@noViableAltException()>
+<\n>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+    int LA<decisionNumber>_<stateNumber> = LA(<k>);
+    <edges; separator="\nelse ">
+}
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+
+dfaLoopbackStateDecls()::= <<
+ANTLR3_UINT32   LA<decisionNumber>_<stateNumber>;
+>>
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+   /* dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)
+    */
+    int LA<decisionNumber>_<stateNumber> = LA(<k>);
+    <edges; separator="\nelse "><\n>
+    <if(eotPredictsAlt)>
+    <if(!edges)>
+	alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+	<else>    
+    else 
+    {
+	alt<decisionNumber>=<eotPredictsAlt>;
+    }<\n>
+    <endif>
+    <endif>
+}
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) 
+{
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( LA(<k>) ) 
+{
+<edges; separator="\n">
+
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    <newNVException()>
+    goto rule<ruleDescriptor.name>Ex;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( LA(<k>) ) 
+{
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( LA(<k>) ) 
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+	{
+		<targetState>
+	}
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, INPUT->istream, &cdfa<decisionNumber>);
+>>
+
+/* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits)
+ * which are then used to statically initialize the dfa structure, which means that there
+ * is no runtime initialization whatsoever, other than anything the C compiler might
+ * need to generate. In general the C compiler will lay out memory such that there is no 
+ * runtime code required.
+ */
+cyclicDFA(dfa) ::= <<
+/** Static dfa state tables for Cyclic dfa:
+ *    <dfa.description>
+ */
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
+    {
+	<dfa.eot; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
+    {
+	<dfa.eof; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
+    {
+	<dfa.min; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
+    {
+	<dfa.max; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
+    {
+	<dfa.accept; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
+    {	
+	<dfa.special; wrap="\n", separator=", ", null="-1">
+    };
+
+/** Used when there is no transition table entry for a particular state */
+#define dfa<dfa.decisionNumber>_T_empty	    NULL
+
+<dfa.edgeTransitionClassMap.keys:{ table |
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_T<i0>[] =
+    {
+	<table; separator=", ", wrap="\n", null="-1">
+    };}; null = "">
+
+/* Transition tables are a table of sub tables, with some tables
+ * reused for efficiency.
+ */
+static const ANTLR3_INT32 * const dfa<dfa.decisionNumber>_transitions[] =
+{
+    <dfa.transitionEdgeTables:{xref|dfa<dfa.decisionNumber>_T<xref>}; separator=", ", wrap="\n", null="_empty">	
+};
+
+<if(dfa.specialStateSTs)>
+static ANTLR3_INT32 dfa<dfa.decisionNumber>_sst(p<name> ctx, pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s)
+{
+    ANTLR3_INT32    _s;
+    
+    _s	    = s;
+    switch  (s)
+    {
+    <dfa.specialStateSTs:{state |
+    case <i0>:
+    
+	<state>}; separator="\n">
+    }
+<if(backtracking)>
+    if (BACKTRACKING > 0)
+    {
+	FAILEDFLAG = ANTLR3_TRUE;
+	return	-1;
+    }
+<endif>
+    
+    CONSTRUCTEX();
+    EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
+    EXCEPTION->message      = "<dfa.description>";
+    EXCEPTION->decisionNum  = <dfa.decisionNumber>;
+    EXCEPTION->state        = _s;
+    <@noViableAltException()>
+    return -1;
+}
+<endif>
+
+<@errorMethod()>
+
+/* Declare tracking structure for Cyclic DFA <dfa.decisionNumber>
+ */
+static
+ANTLR3_CYCLIC_DFA cdfa<dfa.decisionNumber>
+    =	{
+	    <dfa.decisionNumber>,		    /* Decision number of this dfa	    */
+	    /* Which decision this represents:   */
+	    (const pANTLR3_UCHAR)"<dfa.description>",	
+<if(dfa.specialStateSTs)>
+	    (CDFA_SPECIAL_FUNC) dfa<dfa.decisionNumber>_sst,
+<else>
+	    (CDFA_SPECIAL_FUNC) antlr3dfaspecialStateTransition,	/* Default special state transition function	*/
+<endif>
+
+	    antlr3dfaspecialTransition,		/* DFA specialTransition is currently just a default function in the runtime */
+	    antlr3dfapredict,			/* DFA simulator function is in the runtime */
+	    dfa<dfa.decisionNumber>_eot,	    /* EOT table			    */
+	    dfa<dfa.decisionNumber>_eof,	    /* EOF table			    */
+	    dfa<dfa.decisionNumber>_min,	    /* Minimum tokens for each state    */
+	    dfa<dfa.decisionNumber>_max,	    /* Maximum tokens for each state    */
+	    dfa<dfa.decisionNumber>_accept,	/* Accept table			    */
+	    dfa<dfa.decisionNumber>_special,	/* Special transition states	    */
+	    dfa<dfa.decisionNumber>_transitions	/* Table of transition tables	    */
+
+	};	    
+/* End of Cyclic DFA <dfa.decisionNumber>
+ * ---------------------
+ */
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+{
+    ANTLR3_UINT32 LA<decisionNumber>_<stateNumber>;<\n>
+    ANTLR3_UINT32 index<decisionNumber>_<stateNumber>;<\n>
+
+	LA<decisionNumber>_<stateNumber> = LA(1);<\n>
+    <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+    index<decisionNumber>_<stateNumber> = INDEX();<\n>
+    REWINDLAST();<\n>
+    <endif>
+    s = -1;
+    <edges; separator="\nelse ">
+	<if(semPredState)> <! return input cursor to state before we rewound !>
+	SEEK(index<decisionNumber>_<stateNumber>);<\n>
+	<endif>
+    if ( s>=0 ) 
+    {
+	return s;
+    }
+}
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
+{
+    s = <targetStateNumber>;
+}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+ s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "( (<left>) && (<right>) )"
+
+orPredicates(operands) ::= "((<first(operands)>)<rest(operands):{o | ||(<o>)}>)"
+
+notPredicate(pred) ::= "!( <evalPredicate(...)> )"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+evalSynPredicate(pred,description) ::= "<pred>(ctx)"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+((LA<decisionNumber>_<stateNumber> \>= <lower>) && (LA<decisionNumber>_<stateNumber> \<= <upper>))
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "((LA(<k>) \>= <lower>) && (LA(<k>) \<= <upper>))"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+makeScopeSet() ::= <<
+/* makeScopeSet() 
+ */
+ /** Definition of the <scope.name> scope variable tracking
+ *  structure. An instance of this structure is created by calling
+ *  <name>_<scope.name>Push().
+ */
+typedef struct  <scopeStruct(sname=scope.name,...)>_struct
+{
+    /** Function that the user may provide to be called when the
+     *  scope is destroyed (so you can free pANTLR3_HASH_TABLES and so on)
+     *
+     * \param POinter to an instance of this typedef/struct
+     */
+    void    (ANTLR3_CDECL *free)	(struct <scopeStruct(sname=scope.name,...)>_struct * frame);
+    
+    /* =============================================================================
+     * Programmer defined variables...
+     */
+    <scope.attributes:{<it.decl>;}; separator="\n">
+
+    /* End of programmer defined variables
+     * =============================================================================
+     */
+} 
+    <scopeStruct(sname=scope.name,...)>, * <scopeType(sname=scope.name,...)>;
+
+>>
+
+globalAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeDecl(scope)
+ */
+<makeScopeSet(...)>
+<endif>
+>>
+
+ruleAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeDecl(scope)
+ */
+<makeScopeSet(...)>
+<endif>
+>>
+
+globalAttributeScopeFuncDecl(scope) ::= 
+<<
+/* globalAttributeScopeFuncDecl(scope)
+ */
+<if(scope.attributes)>
+/* -----------------------------------------------------------------------------
+ * Function declaration for creating a <name>_<scope.name> scope set 
+ */
+static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
+/* ----------------------------------------------------------------------------- */
+
+<endif>
+>>
+
+globalAttributeScopeFuncMacro(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeFuncMacro(scope)
+ */
+/** Macro for popping the top value from a <scopeStack(sname=scope.name)>
+ */
+#define <scopePopName(sname=scope.name,...)>()  SCOPE_TOP(<scope.name>) = ctx-><scopeStack(sname=scope.name,...)>->pop(ctx-><scopeStack(sname=scope.name,...)>)
+
+<endif>
+>>
+
+ruleAttributeScopeFuncDecl(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeFuncDecl(scope)
+ */
+/* -----------------------------------------------------------------------------
+ * Function declarations for creating a <name>_<scope.name> scope set 
+ */
+static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
+/* ----------------------------------------------------------------------------- */
+
+<endif>
+>>
+
+ruleAttributeScopeFuncMacro(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeFuncMacro(scope)
+ */
+/** Macro for popping the top value from a <scopeStack(sname=scope.name,...)>
+ */
+#define <scopePopName(sname=scope.name,...)>()  SCOPE_TOP(<scope.name>) = ctx-><scopeStack(sname=scope.name,...)>->pop(ctx-><scopeStack(sname=scope.name)>)
+
+<endif>
+>>
+globalAttributeScopeDef(scope) ::= 
+<<
+/* globalAttributeScopeDef(scope)
+ */
+<if(scope.attributes)>
+/** Pointer to the  <scope.name> stack for use by <scopePushName(sname=scope.name)>()
+ *  and <scopePopName(sname=scope.name,...)>()
+ */
+pANTLR3_STACK <scopeStack(sname=scope.name)>;
+/** Pointer to the top of the stack for the global scope <scopeStack(sname=scope.name)>
+ */
+<scopeType(sname=scope.name,...)>    (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
+<scopeType(sname=scope.name,...)>    <scopeTopDecl(sname=scope.name,...)>;
+
+<endif>
+>>
+
+ruleAttributeScopeDef(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeDef(scope)
+ */
+/** Pointer to the  <scope.name> stack for use by <scopePushName(sname=scope.name)>()
+ *  and <scopePopName(sname=scope.name,...)>()
+ */
+pANTLR3_STACK <scopeStack(sname=scope.name,...)>;
+<scopeType(sname=scope.name,...)>   (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
+<scopeType(sname=scope.name,...)>   <scopeTopDecl(sname=scope.name,...)>;
+
+<endif>
+>>
+
+globalAttributeScopeFuncs(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeFuncs(scope)
+ */
+<attributeFuncs(scope)>
+<endif>
+>>
+
+ruleAttributeScopeFuncs(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeFuncs(scope)
+ */
+<attributeFuncs(scope)>
+<endif>
+>>
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScope(scope)  
+ */
+ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
+ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(ANTLR3_SIZE_HINT);
+<scopeTop(sname=scope.name,...)>      = NULL;
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= 
+<<
+<if(scope.attributes)>
+/* ruleAttributeScope(scope)
+ */
+ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
+ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(ANTLR3_SIZE_HINT);
+<scopeTop(sname=scope.name,...)>      = NULL;
+<endif>
+>>
+globalAttributeScopeFree(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScope(scope)  
+ */
+ctx-><scopeStack(sname=scope.name,...)>-\>free(ctx-><scopeStack(sname=scope.name,...)>);
+<endif>
+>>
+
+ruleAttributeScopeFree(scope) ::= 
+<<
+<if(scope.attributes)>
+/* ruleAttributeScope(scope)
+ */
+ctx-><scopeStack(sname=scope.name,...)>-\>free(ctx-><scopeStack(sname=scope.name,...)>);
+<endif>
+>>
+
+scopeTopDecl(sname) ::= <<
+p<name>_<sname>Top
+>>
+
+scopeTop(sname) ::= <<
+ctx-><scopeTopDecl(sname=sname,...)>
+>>
+
+scopePop(sname) ::= <<
+<scopePopName(sname=sname,...)>();
+>>
+
+scopePush(sname) ::= <<
+p<name>_<sname>Push(ctx)
+>>
+
+scopePopName(sname) ::= <<
+p<name>_<sname>Pop
+>>
+
+scopePushName(sname) ::= <<
+p<name>_<sname>Push
+>>
+
+scopeType(sname) ::= <<
+p<name>_<sname>_SCOPE
+>>
+
+scopeStruct(sname) ::= <<
+<name>_<sname>_SCOPE
+>>
+
+scopeStack(sname) ::= <<
+p<name>_<sname>Stack
+>>
+
+attributeFuncs(scope) ::= <<
+<if(scope.attributes)>
+/* attributeFuncs(scope)
+ */
+
+static void ANTLR3_CDECL <scope.name>Free(void * data)
+{
+    ANTLR3_FREE(data);
+}
+
+/** \brief Allocate initial memory for a <name> <scope.name> scope variable stack entry and
+ *         add it to the top of the stack.
+ *
+ * \remark
+ * By default the structure is freed with ANTLR_FREE(), but you can use the
+ * the \@init action to install a pointer to a custom free() routine by
+ * adding the code: 
+ * \code 
+ *   <scopeTop(sname=scope.name)>->free = myroutine;
+ * \endcode
+ *
+ * With lots of comments of course! The routine should be declared in
+ * \@members { } as: 
+ * \code
+ *   void ANTLR3_CDECL myfunc( <scopeType(sname=scope.name)> ptr). 
+ * \endcode
+ *
+ * It should perform any custom freeing stuff that you need (call ANTLR_FREE, not free()
+ * then free the entry it is given with: 
+ * \code
+ *   ANTLR3_FREE(ptr);
+ * \endcode
+ * 
+ */ 
+static <scopeType(sname=scope.name)>
+<scopePushName(sname=scope.name)>(p<name> ctx)
+{
+    /* Pointer used to create a new set of attributes
+     */
+    <scopeType(sname=scope.name)>      newAttributes;
+
+    /* Allocate the memory for a new structure
+     */
+    newAttributes = (<scopeType(sname=scope.name)>) ANTLR3_MALLOC(sizeof(<scopeStruct(sname=scope.name)>));
+
+    if  (newAttributes != NULL)
+    {
+	/* Standard ANTLR3 library implementation
+	 */
+	ctx-><scopeStack(sname=scope.name)>->push(ctx-><scopeStack(sname=scope.name)>, newAttributes, <scope.name>Free);
+	
+	/* Return value is the pointer to the new entry, which may be used locally
+	 * without de-referencing via the context.
+	 */
+    }
+ 
+    /* Calling routine will throw an exeception if this
+     * fails and this pointer is NULL.
+     */
+    return  newAttributes;
+}<\n>
+
+<endif>
+>>
+
+returnType() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<name>_<ruleDescriptor.name>_return
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+<else>
+ANTLR3_BOOLEAN
+<endif>
+>>
+
+/** Generate the C type associated with a single or multiple return
+ *  value(s).
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<name>_<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "0".
+ */
+initValue(typeName) ::= <<
+<cTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label  */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text>;
+#undef	RETURN_TYPE_<label.label.text>
+#define	RETURN_TYPE_<label.label.text> <ruleLabelType(referencedRule=label.referencedRule)><\n>
+>>
+/**  Rule label default value */
+ruleLabelInitVal(label) ::= <<
+<if(label.referencedRule.hasSingleReturnValue)>
+<label.label.text> = <initValue(label.referencedRule.singleValueReturnType)>;
+<endif>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+typedef struct <returnType(...)>_struct
+{
+<if(!TREE_PARSER)>
+    /** Generic return elements for ANTLR3 rules that are not in tree parsers or returning trees
+     */
+    pANTLR3_COMMON_TOKEN    start;
+    pANTLR3_COMMON_TOKEN    stop;
+<else>
+    pANTLR3_BASE_TREE       start;
+<endif>
+    <@ruleReturnMembers()>   
+    <ruleDescriptor.returnScope.attributes:{<it.decl>;}; separator="\n">
+}
+    <returnType(...)>;<\n><\n>
+<endif>
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;"
+
+/** Note that the scopeAttributeRef does not have access to the
+ * grammar name directly
+ */
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), ctx->SCOPE_STACK(scope)->size(ctx->SCOPE_STACK(scope))-<negIndex>-1)-><attr.name>
+<else>
+<if(index)>
+((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), (ANTLR3_UINT64)<index>))-><attr.name>
+<else>
+SCOPE_TOP(<scope>)-><attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), ctx->SCOPE_STACK(scope)->size(ctx->SCOPE_STACK(scope))-<negIndex>-1)-><attr.name> = <expr>;
+<else>
+<if(index)>
+((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), (ANTLR3_UINT64)<index>))-><attr.name> = <expr>;
+<else>
+SCOPE_TOP(<scope>)-><attr.name>=<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "ctx->SCOPE_STACK(<scope>)"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>.<attr.name>
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>=<expr>;
+<else>
+<attr.name>=<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+//
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>->getText(<scope>)"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>->getType(<scope>)"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>->getLine(<scope>)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>->getCharPositionInLine(<scope>)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>->getChannel(<scope>)"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>->getTokenIndex(<scope>)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>->tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+STRSTREAM->toStringSS(STRSTREAM, <scope>.start, <scope>.start)
+<else>
+STRSTREAM->toStringTT(STRSTREAM, <scope>.start, <scope>.stop)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>->getType(<scope>)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>->getLine(<scope>)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>->getCharPositionInLine(<scope>)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>->getChannel(<scope>)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>->getTokenIndex(<scope>)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>->getText(<scope>)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval).start"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval).stop"
+rulePropertyRef_tree(scope,attr) ::= "((<labelType>)retval).tree"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+INPUT->toStringSS(INPUT, ADAPTOR->getTokenStartIndex(ADAPTOR, retval.start), ADAPTOR->getTokenStopIndex(ADAPTOR, retval.start))
+<else>
+STRSTREAM->toStringTT(STRSTREAM, retval.start, LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "LEXER->getText(LEXER)"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "LEXER->tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "LEXER->tokenStartCharPositionInLine"
+lexerRulePropertyRef_channel(scope,attr) ::= "LEXER->channel"
+lexerRulePropertyRef_start(scope,attr) ::= "LEXER->tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(LEXER->getCharIndex(LEXER)-1)"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+
+
+// setting $st and $tree is allowed in local rule. everything else is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "((<labelType>)retval).tree=<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> ) 
+{
+    <action>
+}
+<else>
+if ( BACKTRACKING == 0 ) 
+{
+    <action>
+}
+<endif>
+<else>
+{
+    <action>
+}
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+bitsetDeclare(name, words64) ::= <<
+
+/** Bitset defining follow set for error recovery in rule state: <name>  */
+static	ANTLR3_BITWORD <name>_bits[]	= { <words64:{ANTLR3_UINT64_LIT(<it>)}; separator=", "> };
+static  ANTLR3_BITSET <name>	= { <name>_bits, <length(words64)>	};
+>>
+
+bitset(name, words64) ::= <<
+antlr3BitsetSetAPI(&<name>);<\n>
+>>
+
+codeFileExtension() ::= ".c"
+
+true() ::= "ANTLR3_TRUE"
+false() ::= "ANTLR3_FALSE"
diff --git a/src/org/antlr/codegen/templates/C/Dbg.stg b/src/org/antlr/codegen/templates/C/Dbg.stg
new file mode 100644
index 0000000..0121588
--- /dev/null
+++ b/src/org/antlr/codegen/templates/C/Dbg.stg
@@ -0,0 +1,184 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group CDbg;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+import org.antlr.runtime.debug.*;
+>>
+
+ at genericParser.members() ::= <<
+public static final String[] ruleNames = new String[] {
+    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
+};<\n>
+public int ruleLevel = 0;
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+<if(profile)>
+        this(input, new Profiler(null));
+        Profiler p = (Profiler)dbg;
+        p.setParser(this);
+<else>
+        super(input);
+<endif><\n>
+<if(memoize)>
+        ruleMemo = new Map[<numRules>+1];<\n><! index from 1..n !>
+<endif>
+}
+<if(profile)>
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+    ((Profiler)dbg).examineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
+    return super.alreadyParsedRule(input, ruleIndex);
+}<\n>
+public void memoize(IntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
+    super.memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+    super(input, dbg);
+}<\n>
+protected boolean evalPredicate(boolean result, String predicate) {
+    dbg.semanticPredicate(result, predicate);
+    return result;
+}<\n>
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.preamble() ::= <<
+try { dbg.enterRule("<ruleName>");
+if ( ruleLevel==0 ) {dbg.commence();}
+ruleLevel++;
+dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
+>>
+
+ at rule.postamble() ::= <<
+dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
+}
+finally {
+    dbg.exitRule("<ruleName>");
+    ruleLevel--;
+    if ( ruleLevel==0 ) {dbg.terminate();}
+}<\n>
+>>
+
+ at synpred.start() ::= "dbg.beginBacktrack(backtracking);"
+
+ at synpred.stop() ::= "dbg.endBacktrack(backtracking, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { dbg.enterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally {dbg.exitSubRule(<decisionNumber>);}<\n>"
+
+enterDecision() ::=
+    "try { dbg.enterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally {dbg.exitDecision(<decisionNumber>);}<\n>"
+
+enterAlt(n) ::= "dbg.enterAlt(<n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "dbg.recognitionException(eee);<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "dbg.location(<it.line>,<it.pos>);"
+
+ at matchSet.mismatchedSetException() ::=
+    "dbg.recognitionException(mse);"
+
+ at dfaState.noViableAltException() ::= "dbg.recognitionException(nvae);"
+
+ at dfaStateSwitch.noViableAltException() ::= "dbg.recognitionException(nvae);"
+
+dfaDecision(decisionNumber,description) ::= <<
+try {
+    isCyclicDecision = true;
+    <super.dfaDecision(...)>
+}
+catch (NoViableAltException nvae) {
+    dbg.recognitionException(nvae);
+    throw nvae;
+}
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+public void error(NoViableAltException nvae) {
+    dbg.recognitionException(nvae);
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+evalPredicate(<pred>,"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/C/ST.stg b/src/org/antlr/codegen/templates/C/ST.stg
new file mode 100644
index 0000000..b0e5c41
--- /dev/null
+++ b/src/org/antlr/codegen/templates/C/ST.stg
@@ -0,0 +1,163 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+group ST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+import org.antlr.stringtemplate.*;
+import org.antlr.stringtemplate.language.*;
+import java.util.HashMap;
+>>
+
+/** Add this to each rule's return value struct */
+ at returnScope.ruleReturnMembers() ::= <<
+public StringTemplate st;
+public StringTemplate getTemplate() { return st; }
+public String toString() { return st==null?null:st.toString(); }
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+protected StringTemplateGroup templateLib =
+  new StringTemplateGroup("<name>Templates", AngleBracketTemplateLexer.class);
+
+public void setTemplateLib(StringTemplateGroup templateLib) {
+  this.templateLib = templateLib;
+}
+public StringTemplateGroup getTemplateLib() {
+  return templateLib;
+}
+/** allows convenient multi-value initialization:
+ *  "new STAttrMap().put(...).put(...)"
+ */
+public static class STAttrMap extends HashMap {
+  public STAttrMap put(String attrName, Object value) {
+    super.put(attrName, value);
+    return this;
+  }
+  public STAttrMap put(String attrName, int value) {
+    super.put(attrName, new Integer(value));
+    return this;
+  }
+}
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTemplate()",...)>
+>>
+
+rewriteTemplate(alts) ::= <<
+
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( backtracking==0 ) {
+  <alts:rewriteTemplateAlt(); separator="else ">
+  <if(rewrite)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewrite)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+((TokenRewriteStream)input.getTokenStream()).replace(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start),
+  retval.st);
+<else>
+((TokenRewriteStream)input).replace(
+  ((Token)retval.start).getTokenIndex(),
+  input.LT(-1).getTokenIndex(),
+  retval.st);
+<endif>
+>>
+
+rewriteTemplateAlt() ::= <<
+// <it.description>
+<if(it.pred)>
+if (<it.pred>) {
+    retval.st = <it.alt>;
+}<\n>
+<else>
+{
+    retval.st = <it.alt>;
+}<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+null;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <<
+templateLib.getInstanceOf("<name>"<if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <<
+templateLib.getInstanceOf(<expr><if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <<
+new StringTemplate(templateLib, "<template>"<if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>).setAttribute("<attrName>",<expr>);
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+new StringTemplate(templateLib,<stringExpr>)
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/AST.stg b/src/org/antlr/codegen/templates/CSharp/AST.stg
new file mode 100644
index 0000000..2f698b0
--- /dev/null
+++ b/src/org/antlr/codegen/templates/CSharp/AST.stg
@@ -0,0 +1,465 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+using Antlr.Runtime.Tree;<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected ITreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
+public ITreeAdaptor TreeAdaptor
+{
+    get { return this.adaptor; }
+    set { this.adaptor = value; }
+}
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+internal <ASTLabelType> tree;
+override public object Tree
+{
+	get { return tree; }
+}
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(backtracking)>
+if ( backtracking==0 )
+{
+<endif>
+	retval.tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+	adaptor.SetTokenBoundaries(retval.Tree, retval.start, retval.stop);
+<if(backtracking)>
+}
+<endif>
+<endif>
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+root_0 = (<ASTLabelType>)adaptor.GetNilNode();<\n>
+<endif>
+<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ID but track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.AddChild(root_0, adaptor.Create(<label>));})>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(adaptor.Create(<label>), root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.AddChild(root_0, <label>.Tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>stream_<rule>.Add(<label>.Tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+// TODO: ugh, am i really missing the combinations for Track and ListLabel?
+// there's got to be a better way
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements:          <referencedElementsDeep; separator=", ">
+// token labels:      <referencedTokenLabels; separator=", ">
+// rule labels:       <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels:  <referencedRuleListLabels; separator=", ">
+<if(backtracking)>
+if ( backtracking==0 ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.GetNilNode();
+<alts:rewriteAlt(); separator="else ">
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor, "token <it>", <it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", (<it>!=null ? <it>.Tree : null));};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.HasNext()}; separator=" || ">) ) {
+    throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>)
+{
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args) ::= <<
+adaptor.AddChild(root_<treeLevel>, <if(args)>adaptor.Create(<token>,<args; separator=", ">)<else>stream_<token>.Next()<endif>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.Next(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<if(args)>adaptor.Create(<token>,<args; separator=", ">)<else>stream_<token>.Next()<endif>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, adaptor.Create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>));<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(adaptor.Create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>), root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.Tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<rule>.Next());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.AddChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, ((<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)stream_<label>.Next()).Tree);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg b/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
new file mode 100644
index 0000000..5dc1610
--- /dev/null
+++ b/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
@@ -0,0 +1,44 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : C# by code generator.
+ */
+group ASTDbg;
+
+parserMembers() ::= <<
+protected ITreeAdaptor adaptor = new DebugTreeAdaptor(dbg, new CommonTreeAdaptor());
+public ITreeAdaptor TreeAdaptor
+{
+	get { return this.adaptor; }
+	set { this.adaptor = new DebugTreeAdaptor(dbg, value); }
+}<\n>
+>>
+
+ at rewriteElement.pregen() ::= "dbg.Location(<e.line>,<e.pos>);"
diff --git a/src/org/antlr/codegen/templates/CSharp/CSharp.stg b/src/org/antlr/codegen/templates/CSharp/CSharp.stg
new file mode 100644
index 0000000..b13a211
--- /dev/null
+++ b/src/org/antlr/codegen/templates/CSharp/CSharp.stg
@@ -0,0 +1,1368 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group CSharp implements ANTLRCore;
+
+csharpTypeInitMap ::= [
+	"int":"0",
+	"uint":"0",
+	"long":"0",
+	"ulong":"0",
+	"float":"0.0",
+	"double":"0.0",
+	"bool":"false",
+	"byte":"0",
+	"sbyte":"0",
+	"short":"0",
+	"ushort":"0",
+	"char":"char.MinValue",
+	default:"null" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs, 
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<if(actions.(actionScope).namespace)>
+namespace <actions.(actionScope).namespace>
+{
+<endif>
+
+<actions.(actionScope).header>
+
+<@imports>
+using System;
+using Antlr.Runtime;
+<if(TREE_PARSER)>
+using Antlr.Runtime.Tree;
+<endif>
+using IList 		= System.Collections.IList;
+using ArrayList 	= System.Collections.ArrayList;
+using Stack 		= Antlr.Runtime.Collections.StackList;
+
+<if(backtracking)>
+using IDictionary	= System.Collections.IDictionary;
+using Hashtable 	= System.Collections.Hashtable;
+<endif>
+
+
+<@end>
+
+<docComment>
+<recognizer>
+<if(actions.(actionScope).namespace)>
+}
+<endif>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode) ::= <<
+public class <name> : Lexer 
+{
+    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <actions.lexer.members>
+
+    public <name>() <! needed by subclasses !>
+    {
+		InitializeCyclicDFAs();
+    }
+    public <name>(ICharStream input) 
+		: base(input)
+	{
+		InitializeCyclicDFAs();
+<if(backtracking)>
+        ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+    }
+    
+    override public string GrammarFileName
+    {
+    	get { return "<fileName>";} 
+    }
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+   	<synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+	private void InitializeCyclicDFAs()
+	{
+	    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
+	}
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+    
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+override public IToken NextToken() 
+{
+    while (true) 
+	{
+        if ( input.LA(1) == (int)CharStreamConstants.EOF ) 
+		{
+            return Token.EOF_TOKEN;
+        }
+
+	    token = null;
+		channel = Token.DEFAULT_CHANNEL;
+        tokenStartCharIndex = input.Index();
+        tokenStartCharPositionInLine = input.CharPositionInLine;
+        tokenStartLine = input.Line;
+	    text = null;
+        try 
+		{
+            int m = input.Mark();
+            backtracking = 1; <! means we won't throw slow exception !>
+            failed = false;
+            mTokens();
+            backtracking = 0;
+<!
+			mTokens backtracks with synpred at backtracking==2
+            and we set the synpredgate to allow actions at level 1. 
+!>
+            if ( failed ) 
+			{
+	            input.Rewind(m);
+                input.Consume(); <! // advance one char and try again !>
+            }
+            else 
+			{
+				Emit();
+                return token;
+            }
+        }
+        catch (RecognitionException re) 
+		{
+            // shouldn't happen in backtracking mode, but...
+            ReportError(re);
+            Recover(re);
+        }
+    }
+}
+
+override public void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
+{
+	if ( backtracking > 1 ) 
+		base.Memoize(input, ruleIndex, ruleStartIndex);
+}
+
+override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+	if ( backtracking>1 ) 
+		return base.AlreadyParsedRule(input, ruleIndex);
+	return false;
+}
+>>
+
+filteringActionGate() ::= "(backtracking == 1)"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              ASTLabelType="object", labelType, members) ::= <<
+public class <name> : <@superClassName><superClass><@end> 
+{
+    public static readonly string[] tokenNames = new string[] 
+	{
+        "\<invalid>", 
+		"\<EOR>", 
+		"\<DOWN>", 
+		"\<UP>", 
+		<tokenNames; separator=", \n">
+    };
+
+    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <@members>
+   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+
+    public <name>(<inputStreamType> input) 
+		: base(input)
+	{
+		InitializeCyclicDFAs();
+<if(backtracking)>
+        ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+    }
+    <@end>
+
+    override public string[] TokenNames
+	{
+		get { return tokenNames; }
+	}
+
+    override public string GrammarFileName
+	{
+		get { return "<fileName>"; }
+	}
+
+    <members>
+
+    <rules; separator="\n\n">
+
+   	<synpreds:{p | <synpred(p)>}>
+
+   	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+	private void InitializeCyclicDFAs()
+	{
+    	<cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
+	}
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+}
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="IToken", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) //throws RecognitionException
+{   
+<if(trace)>
+    TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try
+    {
+        <block>
+    }
+    finally
+    {
+        TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpredDecls(name) ::= <<
+SynPredPointer <name>;<\n>
+>>
+
+synpred(name) ::= <<
+public bool <name>() 
+{
+    backtracking++;
+    <@start()>
+    int start = input.Mark();
+    try 
+    {
+        <name>_fragment(); // can never throw exception
+    }
+    catch (RecognitionException re) 
+    {
+        Console.Error.WriteLine("impossible: "+re);
+    }
+    bool success = !failed;
+    input.Rewind(start);
+    <@stop()>
+    backtracking--;
+    failed = false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( (backtracking > 0) && AlreadyParsedRule(input, <ruleDescriptor.index>) ) 
+{
+	return <ruleReturnValue()>; 
+}
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if ( backtracking > 0 ) {failed = true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start <ruleName>
+// <fileName>:<description>
+public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [1]
+{   
+    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try 
+	{
+	    <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (RecognitionException re) 
+	{
+        ReportError(re);
+        Recover(input,re);
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally 
+	{
+        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+// $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) 
+{
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = new <returnType()>();
+retval.start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.Index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Push(new <it.name>_scope());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<labelType> <it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{IList list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{IList list_<it.label.text> = null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( backtracking > 0 ) 
+{
+	Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); 
+}
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start <ruleName> 
+public void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [2]
+{
+    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleDeclarations()>
+    try 
+	{
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        int _type = <ruleName>;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        this.type = _type;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally 
+	{
+        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+    }
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+override public void mTokens() // throws RecognitionException 
+{
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) 
+{
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) 
+{
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber> = 0;
+<decls>
+<@preloop()>
+do 
+{
+    int alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+	{
+		<alts:altSwitchCase()>
+		default:
+		    if ( cnt<decisionNumber> >= 1 ) goto loop<decisionNumber>;
+		    <ruleBacktrackFailure()>
+	            EarlyExitException eee =
+	                new EarlyExitException(<decisionNumber>, input);
+	            <@earlyExitException()>
+	            throw eee;
+    }
+    cnt<decisionNumber>++;
+} while (true);
+
+loop<decisionNumber>:
+	;	// Stops C# compiler whinging that label 'loop<decisionNumber>' has no statements
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+do 
+{
+    int alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+	{
+		<alts:altSwitchCase()>
+		default:
+		    goto loop<decisionNumber>;
+    }
+} while (true);
+
+loop<decisionNumber>:
+	;	// Stops C# compiler whinging that label 'loop<decisionNumber>' has no statements
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+// <fileName>:<description>
+{
+	<@declarations()>
+	<elements:element()>
+	<@cleanup()>
+}
+>>
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex) ::= <<
+<if(label)>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+Match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> == null) list_<label> = new ArrayList();
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) 
+{
+    input.Consume();
+    <postmatchCode>
+<if(!LEXER)>
+    errorRecovery = false;
+<endif>
+    <if(backtracking)>failed = false;<endif>
+}
+else 
+{
+    <ruleBacktrackFailure()>
+    MismatchedSetException mse =
+        new MismatchedSetException(null,input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    Recover(mse);
+<else>
+    RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+<endif>
+    throw mse;
+}<\n>
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int <label>Start = CharIndex;
+Match(<string>); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, CharIndex-1);
+<else>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args) ::= <<
+PushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
+<if(label)>
+<label> = <rule>(<args; separator=", ">);<\n>
+<else>
+<rule>(<args; separator=", ">);<\n>
+<endif>
+followingStackPointer_--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
+<else>
+m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+Match(EOF); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1) == Token.DOWN )
+{
+    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) 
+{
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else 
+{
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
+        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <@noViableAltException()>
+    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else 
+{
+    alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>)
+{
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+<edges; separator="\n">
+	default:
+<if(eotPredictsAlt)>
+    	alt<decisionNumber> = <eotPredictsAlt>;
+    	break;
+<else>
+	    <ruleBacktrackFailure()>
+	    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
+	        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+	    <@noViableAltException()>
+	    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+	default:
+    	alt<decisionNumber> = <eotPredictsAlt>;
+    	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+	{
+    <targetState>
+    }
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
+>>
+
+/* Dump DFA tables.
+ */
+cyclicDFA(dfa) ::= <<
+static readonly short[] DFA<dfa.decisionNumber>_eot = {
+    <dfa.eot:{n|<n>}; null="-1", wrap="\n", separator=", ">
+    };
+static readonly short[] DFA<dfa.decisionNumber>_eof = {
+    <dfa.eof:{n|<n>}; null="-1", wrap="\n", separator=", ">
+    };
+static readonly int[] DFA<dfa.decisionNumber>_min = {
+    <dfa.min:{n|<n>}; null="0", wrap="\n", separator=", ">
+    };
+static readonly int[] DFA<dfa.decisionNumber>_max = {
+    <dfa.max:{n|<n>}; null="0", wrap="\n", separator=", ">
+    };
+static readonly short[] DFA<dfa.decisionNumber>_accept = {
+    <dfa.accept:{n|<n>}; null="-1", wrap="\n", separator=", ">
+    };
+static readonly short[] DFA<dfa.decisionNumber>_special = {
+    <dfa.special:{n|<n>}; null="-1", wrap="\n", separator=", ">
+    };
+
+static readonly short[] dfa<dfa.decisionNumber>_transition_null = null;
+<dfa.edgeTransitionClassMap.keys:{table |
+static readonly short[] dfa<dfa.decisionNumber>_transition<i0> = \{
+	<table; separator=", ", wrap="\n    ", null="-1">
+	\};}>
+
+static readonly short[][] DFA<dfa.decisionNumber>_transition = {
+	<dfa.transitionEdgeTables:{whichTable|dfa<dfa.decisionNumber>_transition<whichTable>}; null="_null", separator=",\n">
+    };
+
+protected class DFA<dfa.decisionNumber> : DFA
+{
+    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) 
+    {
+        this.recognizer = recognizer;
+        this.decisionNumber = <dfa.decisionNumber>;
+        this.eot = DFA<dfa.decisionNumber>_eot;
+        this.eof = DFA<dfa.decisionNumber>_eof;
+        this.min = DFA<dfa.decisionNumber>_min;
+        this.max = DFA<dfa.decisionNumber>_max;
+        this.accept     = DFA<dfa.decisionNumber>_accept;
+        this.special    = DFA<dfa.decisionNumber>_special;
+        this.transition = DFA<dfa.decisionNumber>_transition;
+    }
+
+    override public string Description
+    {
+        get { return "<dfa.description>"; }
+    }
+
+    <@errorMethod()>
+}<\n>
+<if(dfa.specialStateSTs)>
+
+protected internal int DFA<dfa.decisionNumber>_SpecialStateTransition(DFA dfa, int s, IIntStream input) //throws NoViableAltException
+{
+	int _s = s;
+    switch ( s )
+    {
+    <dfa.specialStateSTs:{state |
+       	case <i0> : <! compressed special state numbers 0..n-1 !>
+           	<state>}; separator="\n">
+    }
+<if(backtracking)>
+    if (backtracking > 0) {failed = true; return -1;}<\n>
+<endif>
+    NoViableAltException nvae =
+        new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
+    dfa.Error(nvae);
+    throw nvae;
+}<\n>
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+int index<decisionNumber>_<stateNumber> = input.Index();
+input.Rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.Seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s >= 0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>) { s = <targetStateNumber>; }<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left> && <right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | || <o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber> \>= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) \>= <lower> && input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope 
+{
+    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope 
+{
+    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.name>_return
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<csharpTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public class <returnType()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope 
+{
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+};
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name> = <expr>;
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name> = <expr>;
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name> = <expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>.<attr.name>
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> = <expr>;
+<else>
+<attr.name> = <expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.Text"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.Type"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.Line"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.CharPositionInLine"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.Channel"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.TokenIndex"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(<scope>.start),
+  input.TreeAdaptor.GetTokenStopIndex(<scope>.start) )
+<else>
+input.ToString(<scope>.start,<scope>.stop)
+<endif>
+>>
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.Type"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.Line"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.CharPositionInLine"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.Channel"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.TokenIndex"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.Text"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(retval.start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.start) )
+<else>
+input.ToString(retval.start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "channel"
+lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree = <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st = <expr>;"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> )
+{
+  <action>
+}
+<else>
+if ( backtracking == 0 ) 
+{
+  <action>
+}
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{<it>UL};separator=",">});<\n>
+>>
+
+codeFileExtension() ::= ".cs"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/src/org/antlr/codegen/templates/CSharp/Dbg.stg b/src/org/antlr/codegen/templates/CSharp/Dbg.stg
new file mode 100644
index 0000000..f000d01
--- /dev/null
+++ b/src/org/antlr/codegen/templates/CSharp/Dbg.stg
@@ -0,0 +1,192 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.Runtime.Debug;
+>>
+
+ at genericParser.members() ::= <<
+public static readonly string[] ruleNames = new string[] {
+    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
+};<\n>
+public int ruleLevel = 0;
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) : <if(profile)>this(input, new Profiler(null))<else>base(input)<endif>
+{
+<if(profile)>
+        Profiler p = (Profiler)dbg;
+        p.Parser = this;
+<endif><\n>
+<if(memoize)>
+    ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+}
+<if(profile)>
+override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+    ((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
+    return base.AlreadyParsedRule(input, ruleIndex);
+}<\n>
+override public void Memoize(IIntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
+    base.Memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+public <name>(<inputStreamType> input, IDebugEventListener dbg)
+	: base(input, dbg)
+{
+}<\n>
+protected bool EvalPredicate(bool result, string predicate) 
+{
+    dbg.SemanticPredicate(result, predicate);
+    return result;
+}<\n>
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.preamble() ::= <<
+try 
+{
+	dbg.EnterRule("<ruleName>");
+	if ( ruleLevel==0 ) {dbg.Commence();}
+	ruleLevel++;
+	dbg.Location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
+>>
+
+ at rule.postamble() ::= <<
+dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
+}
+finally
+{
+    dbg.ExitRule("<ruleName>");
+    ruleLevel--;
+    if ( ruleLevel==0 ) {dbg.Terminate();}
+}<\n>
+>>
+
+ at synpred.start() ::= "dbg.BeginBacktrack(backtracking);"
+
+ at synpred.stop() ::= "dbg.EndBacktrack(backtracking, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { dbg.EnterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally { dbg.ExitSubRule(<decisionNumber>); }<\n>"
+
+enterDecision() ::=
+    "try { dbg.EnterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally { dbg.ExitDecision(<decisionNumber>); }<\n>"
+
+enterAlt(n) ::= "dbg.EnterAlt(<n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "dbg.RecognitionException(eee);<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "dbg.Location(<it.line>,<it.pos>);"
+
+ at matchSet.mismatchedSetException() ::=
+    "dbg.RecognitionException(mse);"
+
+ at dfaState.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
+
+ at dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
+
+dfaDecision(decisionNumber,description) ::= <<
+try 
+{
+    isCyclicDecision = true;
+    <super.dfaDecision(...)>
+}
+catch (NoViableAltException nvae) 
+{
+    dbg.RecognitionException(nvae);
+    throw nvae;
+}
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+public virtual void Error(NoViableAltException nvae) 
+{
+    dbg.RecognitionException(nvae);
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+EvalPredicate(<pred>,"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/ST.stg b/src/org/antlr/codegen/templates/CSharp/ST.stg
new file mode 100644
index 0000000..a37814a
--- /dev/null
+++ b/src/org/antlr/codegen/templates/CSharp/ST.stg
@@ -0,0 +1,169 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+group ST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.StringTemplate;
+using Antlr.StringTemplate.Language;
+using Hashtable = System.Collections.Hashtable;
+
+>>
+
+/** Add this to each rule's return value struct */
+ at returnScope.ruleReturnMembers() ::= <<
+public StringTemplate st;
+public override object Template 		{ get { return st; } }
+public override string ToString() 		{ return (st == null) ? null : st.ToString(); }
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+protected StringTemplateGroup templateLib =
+  new StringTemplateGroup("<name>Templates", typeof(AngleBracketTemplateLexer));
+
+public StringTemplateGroup TemplateLib
+{
+ 	get { return this.templateLib; }
+ 	set { this.templateLib = value; }
+}
+
+/// \<summary> Allows convenient multi-value initialization:
+///  "new STAttrMap().Add(...).Add(...)"
+/// \</summary>
+protected class STAttrMap : Hashtable
+{
+  public STAttrMap Add(string attrName, object value) 
+  {
+    base.Add(attrName, value);
+    return this;
+  }
+  public STAttrMap Add(string attrName, int value) 
+  {
+    base.Add(attrName, value);
+    return this;
+  }
+}
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Template",...)>
+>>
+
+rewriteTemplate(alts) ::= <<
+
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( backtracking==0 )
+{
+  <alts:rewriteTemplateAlt(); separator="else ">
+  <if(rewrite)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewrite)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+((TokenRewriteStream)input.TokenStream).Replace(
+  input.TreeAdaptor.GetTokenStartIndex(retval.start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.start),
+  retval.st);
+<else>
+((TokenRewriteStream)input).Replace(
+  ((Token)retval.start).TokenIndex,
+  input.LT(-1).TokenIndex,
+  retval.st);
+<endif>
+>>
+
+rewriteTemplateAlt() ::= <<
+// <it.description>
+<if(it.pred)>
+if (<it.pred>) {
+    retval.st = <it.alt>;
+}<\n>
+<else>
+{
+    retval.st = <it.alt>;
+}<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+null;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <<
+templateLib.GetInstanceOf("<name>"<if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <<
+templateLib.GetInstanceOf(<expr><if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <<
+new StringTemplate(templateLib, "<template>"<if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>).SetAttribute("<attrName>",<expr>);
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+new StringTemplate(templateLib,<stringExpr>)
+>>
diff --git a/src/org/antlr/codegen/templates/Java/AST.stg b/src/org/antlr/codegen/templates/Java/AST.stg
new file mode 100644
index 0000000..e01b59d
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Java/AST.stg
@@ -0,0 +1,460 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+import org.antlr.runtime.tree.*;<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected TreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
+public void setTreeAdaptor(TreeAdaptor adaptor) {
+    this.adaptor = adaptor;
+}
+public TreeAdaptor getTreeAdaptor() {
+    return adaptor;
+}
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+<ASTLabelType> tree;
+public Object getTree() { return tree; }
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{RewriteRuleTokenStream stream_<it>=new RewriteRuleTokenStream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(backtracking)>
+if ( backtracking==0 ) {<\n>
+<endif>
+    retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+    adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
+<if(backtracking)>
+}
+<endif>
+<endif>
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+root_0 = (<ASTLabelType>)adaptor.nil();<\n>
+<endif>
+<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ID but track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.addChild(root_0, adaptor.create(<label>));})>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(adaptor.create(<label>), root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.addChild(root_0, <label>.getTree());
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_0);
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( backtracking==0 ) <endif>stream_<rule>.add(<label>.getTree());
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+// TODO: ugh, am i really missing the combinations for Track and ListLabel?
+// there's got to be a better way
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+<if(backtracking)>
+if ( backtracking==0 ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.nil();
+<alts:rewriteAlt(); separator="else ">
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{RewriteRuleTokenStream stream_<it>=new RewriteRuleTokenStream(adaptor,"token <it>",<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{RewriteRuleTokenStream stream_<it>=new RewriteRuleTokenStream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",<it>!=null?<it>.tree:null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator="||"> ) {
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.hasNext()}; separator="||">) ) {
+    throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args) ::= <<
+adaptor.addChild(root_<treeLevel>, <if(args)>adaptor.create(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.next());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.next());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.next(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<if(args)>adaptor.create(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, adaptor.create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>));<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(adaptor.create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>), root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<rule>.next());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.addChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.next());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, ((<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)stream_<label>.next()).getTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
+>>
diff --git a/src/org/antlr/codegen/templates/Java/ASTDbg.stg b/src/org/antlr/codegen/templates/Java/ASTDbg.stg
new file mode 100644
index 0000000..3abe396
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Java/ASTDbg.stg
@@ -0,0 +1,65 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+group ASTDbg;
+
+parserMembers() ::= <<
+protected DebugTreeAdaptor adaptor =
+	  new DebugTreeAdaptor(null,new CommonTreeAdaptor());
+public void setTreeAdaptor(TreeAdaptor adaptor) {
+    this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+}
+public TreeAdaptor getTreeAdaptor() {
+    return adaptor;
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy =
+    new DebugEventSocketProxy(this, port, adaptor);
+setDebugListener(proxy);
+adaptor.setDebugEventListener(proxy);
+try {
+    proxy.handshake();
+}
+catch (IOException ioe) {
+    reportError(ioe);
+}
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+    super(input, dbg);
+    adaptor.setDebugEventListener(dbg);
+}<\n>
+>>
+
+ at rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
diff --git a/src/org/antlr/codegen/templates/Java/Dbg.stg b/src/org/antlr/codegen/templates/Java/Dbg.stg
new file mode 100644
index 0000000..9633c69
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Java/Dbg.stg
@@ -0,0 +1,210 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+import org.antlr.runtime.debug.*;
+import java.io.IOException;
+>>
+
+ at genericParser.members() ::= <<
+public static final String[] ruleNames = new String[] {
+    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
+};<\n>
+public int ruleLevel = 0;
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input, int port) {
+<if(profile)>
+        this(input, new Profiler(null));
+        Profiler p = (Profiler)dbg;
+        p.setParser(this);
+<else>
+        super(input, port);
+        <createListenerAndHandshake()>
+<endif><\n>
+<if(memoize)>
+        ruleMemo = new Map[<numRules>+1];<\n><! index from 1..n !>
+<endif>
+}
+public <name>(<inputStreamType> input) {
+    this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT);
+}
+<ctorForPredefinedListener()>
+<if(profile)>
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+    ((Profiler)dbg).examineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
+    return super.alreadyParsedRule(input, ruleIndex);
+}<\n>
+public void memoize(IntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
+    super.memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+protected boolean evalPredicate(boolean result, String predicate) {
+    dbg.semanticPredicate(result, predicate);
+    return result;
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy proxy =
+    new DebugEventSocketProxy(this, port, input.getTreeAdaptor());
+<else>
+DebugEventSocketProxy proxy =
+    new DebugEventSocketProxy(this, port, null);
+<endif>
+setDebugListener(proxy);
+try {
+    proxy.handshake();
+}
+catch (IOException ioe) {
+    reportError(ioe);
+}
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+    super(input, dbg);
+}<\n>
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.preamble() ::= <<
+try { dbg.enterRule("<ruleName>");
+if ( ruleLevel==0 ) {dbg.commence();}
+ruleLevel++;
+dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
+>>
+
+ at rule.postamble() ::= <<
+dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
+}
+finally {
+    dbg.exitRule("<ruleName>");
+    ruleLevel--;
+    if ( ruleLevel==0 ) {dbg.terminate();}
+}<\n>
+>>
+
+ at synpred.start() ::= "dbg.beginBacktrack(backtracking);"
+
+ at synpred.stop() ::= "dbg.endBacktrack(backtracking, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { dbg.enterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally {dbg.exitSubRule(<decisionNumber>);}<\n>"
+
+enterDecision() ::=
+    "try { dbg.enterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally {dbg.exitDecision(<decisionNumber>);}<\n>"
+
+enterAlt(n) ::= "dbg.enterAlt(<n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "dbg.recognitionException(eee);<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "dbg.location(<it.line>,<it.pos>);"
+
+ at matchSet.mismatchedSetException() ::=
+    "dbg.recognitionException(mse);"
+
+ at dfaState.noViableAltException() ::= "dbg.recognitionException(nvae);"
+
+ at dfaStateSwitch.noViableAltException() ::= "dbg.recognitionException(nvae);"
+
+dfaDecision(decisionNumber,description) ::= <<
+try {
+    isCyclicDecision = true;
+    <super.dfaDecision(...)>
+}
+catch (NoViableAltException nvae) {
+    dbg.recognitionException(nvae);
+    throw nvae;
+}
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+public void error(NoViableAltException nvae) {
+    dbg.recognitionException(nvae);
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+evalPredicate(<pred>,"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/Java/Java.stg b/src/org/antlr/codegen/templates/Java/Java.stg
new file mode 100644
index 0000000..92f6b75
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Java/Java.stg
@@ -0,0 +1,1251 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group Java implements ANTLRCore;
+
+javaTypeInitMap ::= [
+	"int":"0",
+	"long":"0",
+	"float":"0.0",
+	"double":"0.0",
+	"boolean":"false",
+	"byte":"0",
+	"short":"0",
+	"char":"0",
+	default:"null" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<actions.(actionScope).header>
+
+<@imports>
+import org.antlr.runtime.*;
+<if(TREE_PARSER)>
+import org.antlr.runtime.tree.*;
+<endif>
+import java.util.Stack;
+import java.util.List;
+import java.util.ArrayList;
+<if(backtracking)>
+import java.util.Map;
+import java.util.HashMap;
+<endif>
+<@end>
+
+<docComment>
+<recognizer>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode) ::= <<
+public class <name> extends Lexer {
+    <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <actions.lexer.members>
+    public <name>() {;} <! needed by subclasses !>
+    public <name>(CharStream input) {
+        super(input);
+<if(backtracking)>
+        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+    }
+    public String getGrammarFileName() { return "<fileName>"; }
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+public Token nextToken() {
+    while (true) {
+        if ( input.LA(1)==CharStream.EOF ) {
+            return Token.EOF_TOKEN;
+        }
+        token = null;
+	channel = Token.DEFAULT_CHANNEL;
+        tokenStartCharIndex = input.index();
+        tokenStartCharPositionInLine = input.getCharPositionInLine();
+        tokenStartLine = input.getLine();
+	text = null;
+        try {
+            int m = input.mark();
+            backtracking=1; <! means we won't throw slow exception !>
+            failed=false;
+            mTokens();
+            backtracking=0;
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if ( failed ) {
+                input.rewind(m);
+                input.consume(); <! advance one char and try again !>
+            }
+            else {
+                emit();
+                return token;
+            }
+        }
+        catch (RecognitionException re) {
+            // shouldn't happen in backtracking mode, but...
+            reportError(re);
+            recover(re);
+        }
+    }
+}
+
+public void memoize(IntStream input,
+		int ruleIndex,
+		int ruleStartIndex)
+{
+if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+}
+
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+return false;
+}
+>>
+
+filteringActionGate() ::= "backtracking==1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              ASTLabelType="Object", labelType, members) ::= <<
+public class <name> extends <@superClassName><superClass><@end> {
+    public static final String[] tokenNames = new String[] {
+        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+    };
+    <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <@members>
+   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+    public <name>(<inputStreamType> input) {
+        super(input);
+<if(backtracking)>
+        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+    }
+    <@end>
+
+    public String[] getTokenNames() { return tokenNames; }
+    public String getGrammarFileName() { return "<fileName>"; }
+
+    <members>
+
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <synpred(p)>}>
+
+    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+}
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {   
+<if(trace)>
+    traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try {
+        <block>
+    }
+    finally {
+        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(name) ::= <<
+public final boolean <name>() {
+    backtracking++;
+    <@start()>
+    int start = input.mark();
+    try {
+        <name>_fragment(); // can never throw exception
+    } catch (RecognitionException re) {
+        System.err.println("impossible: "+re);
+    }
+    boolean success = !failed;
+    input.rewind(start);
+    <@stop()>
+    backtracking--;
+    failed=false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (backtracking>0) {failed=true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start <ruleName>
+// <fileName>:<description>
+public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try {
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (RecognitionException re) {
+        reportError(re);
+        recover(input,re);
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally {
+        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+// $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = new <returnType()>();
+retval.start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<labelType> <it.label.text>=null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start <ruleName>
+public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        int _type = <ruleName>;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        this.type = _type;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally {
+        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+public void mTokens() throws RecognitionException {
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+	default :
+	    if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
+	    <ruleBacktrackFailure()>
+            EarlyExitException eee =
+                new EarlyExitException(<decisionNumber>, input);
+            <@earlyExitException()>
+            throw eee;
+    }
+    cnt<decisionNumber>++;
+} while (true);
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+	default :
+	    break loop<decisionNumber>;
+    }
+} while (true);
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<@cleanup()>
+}
+>>
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label>==null) list_<label>=new ArrayList();
+list_<label>.add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) {
+    input.consume();
+    <postmatchCode>
+<if(!LEXER)>
+    errorRecovery=false;
+<endif>
+    <if(backtracking)>failed=false;<endif>
+}
+else {
+    <ruleBacktrackFailure()>
+    MismatchedSetException mse =
+        new MismatchedSetException(null,input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    recover(mse);
+<else>
+    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+<endif>
+    throw mse;
+}<\n>
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int <label>Start = getCharIndex();
+match(<string>); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
+<else>
+match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+matchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+matchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args) ::= <<
+pushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
+<if(label)>
+<label>=<rule>(<args; separator=", ">);<\n>
+<else>
+<rule>(<args; separator=", ">);<\n>
+<endif>
+_fsp--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = getCharIndex();
+m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<else>
+m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = getCharIndex();
+match(EOF); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<else>
+match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException nvae =
+        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <@noViableAltException()>
+    throw nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+    alt<decisionNumber>=<eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException nvae =
+        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <@noViableAltException()>
+    throw nvae;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+    {
+    <targetState>
+    }
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.predict(input);
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+static final String DFA<dfa.decisionNumber>_eotS =
+    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">";
+static final String DFA<dfa.decisionNumber>_eofS =
+    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">";
+static final String DFA<dfa.decisionNumber>_minS =
+    "<dfa.javaCompressedMin; wrap="\"+\n    \"">";
+static final String DFA<dfa.decisionNumber>_maxS =
+    "<dfa.javaCompressedMax; wrap="\"+\n    \"">";
+static final String DFA<dfa.decisionNumber>_acceptS =
+    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">";
+static final String DFA<dfa.decisionNumber>_specialS =
+    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>";
+static final String[] DFA<dfa.decisionNumber>_transitionS = {
+        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+};
+
+static final short[] DFA<dfa.decisionNumber>_eot = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+static final short[] DFA<dfa.decisionNumber>_eof = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+static final char[] DFA<dfa.decisionNumber>_min = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+static final char[] DFA<dfa.decisionNumber>_max = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+static final short[] DFA<dfa.decisionNumber>_accept = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+static final short[] DFA<dfa.decisionNumber>_special = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+static final short[][] DFA<dfa.decisionNumber>_transition;
+
+static {
+    int numStates = DFA<dfa.decisionNumber>_transitionS.length;
+    DFA<dfa.decisionNumber>_transition = new short[numStates][];
+    for (int i=0; i\<numStates; i++) {
+        DFA<dfa.decisionNumber>_transition[i] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
+    }
+}
+
+class DFA<dfa.decisionNumber> extends DFA {
+
+    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) {
+        this.recognizer = recognizer;
+        this.decisionNumber = <dfa.decisionNumber>;
+        this.eot = DFA<dfa.decisionNumber>_eot;
+        this.eof = DFA<dfa.decisionNumber>_eof;
+        this.min = DFA<dfa.decisionNumber>_min;
+        this.max = DFA<dfa.decisionNumber>_max;
+        this.accept = DFA<dfa.decisionNumber>_accept;
+        this.special = DFA<dfa.decisionNumber>_special;
+        this.transition = DFA<dfa.decisionNumber>_transition;
+    }
+    public String getDescription() {
+        return "<dfa.description>";
+    }
+    <@errorMethod()>
+<if(dfa.specialStateSTs)>
+    public int specialStateTransition(int s, IntStream input) throws NoViableAltException {
+    	int _s = s;
+        switch ( s ) {
+        <dfa.specialStateSTs:{state |
+        case <i0> : <! compressed special state numbers 0..n-1 !>
+            <state>}; separator="\n">
+        }
+<if(backtracking)>
+        if (backtracking>0) {failed=true; return -1;}<\n>
+<endif>
+        NoViableAltException nvae =
+            new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input);
+        error(nvae);
+        throw nvae;
+    }<\n>
+<endif>
+}<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+int index<decisionNumber>_<stateNumber> = input.index();
+input.rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+    <scope.attributes:{<it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+    <scope.attributes:{<it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.name>_return
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<javaTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public static class <returnType()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+};
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name> =<expr>;
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name> =<expr>;
+<else>
+((<scope>_scope)<scope>_stack.peek()).<attr.name> =<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>.<attr.name>
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>;
+<else>
+<attr.name> =<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))
+<else>
+input.toString(<scope>.start,<scope>.stop)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start))
+<else>
+input.toString(retval.start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "channel"
+lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+if ( backtracking==0 ) {
+  <action>
+}
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static final BitSet <name> = new BitSet(new long[]{<words64:{<it>L};separator=",">});<\n>
+>>
+
+codeFileExtension() ::= ".java"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/src/org/antlr/codegen/templates/Java/ST.stg b/src/org/antlr/codegen/templates/Java/ST.stg
new file mode 100644
index 0000000..dbe70eb
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Java/ST.stg
@@ -0,0 +1,163 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+group ST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+import org.antlr.stringtemplate.*;
+import org.antlr.stringtemplate.language.*;
+import java.util.HashMap;
+>>
+
+/** Add this to each rule's return value struct */
+ at returnScope.ruleReturnMembers() ::= <<
+public StringTemplate st;
+public Object getTemplate() { return st; }
+public String toString() { return st==null?null:st.toString(); }
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+protected StringTemplateGroup templateLib =
+  new StringTemplateGroup("<name>Templates", AngleBracketTemplateLexer.class);
+
+public void setTemplateLib(StringTemplateGroup templateLib) {
+  this.templateLib = templateLib;
+}
+public StringTemplateGroup getTemplateLib() {
+  return templateLib;
+}
+/** allows convenient multi-value initialization:
+ *  "new STAttrMap().put(...).put(...)"
+ */
+public static class STAttrMap extends HashMap {
+  public STAttrMap put(String attrName, Object value) {
+    super.put(attrName, value);
+    return this;
+  }
+  public STAttrMap put(String attrName, int value) {
+    super.put(attrName, new Integer(value));
+    return this;
+  }
+}
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTemplate()",...)>
+>>
+
+rewriteTemplate(alts) ::= <<
+
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( backtracking==0 ) {
+  <alts:rewriteTemplateAlt(); separator="else ">
+  <if(rewrite)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewrite)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+((TokenRewriteStream)input.getTokenStream()).replace(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start),
+  retval.st);
+<else>
+((TokenRewriteStream)input).replace(
+  ((Token)retval.start).getTokenIndex(),
+  input.LT(-1).getTokenIndex(),
+  retval.st);
+<endif>
+>>
+
+rewriteTemplateAlt() ::= <<
+// <it.description>
+<if(it.pred)>
+if (<it.pred>) {
+    retval.st = <it.alt>;
+}<\n>
+<else>
+{
+    retval.st = <it.alt>;
+}<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+null;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <<
+templateLib.getInstanceOf("<name>"<if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <<
+templateLib.getInstanceOf(<expr><if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <<
+new StringTemplate(templateLib, "<template>"<if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>).setAttribute("<attrName>",<expr>);
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+new StringTemplate(templateLib,<stringExpr>)
+>>
diff --git a/src/org/antlr/codegen/templates/ObjC/AST.stg b/src/org/antlr/codegen/templates/ObjC/AST.stg
new file mode 100644
index 0000000..4512d81
--- /dev/null
+++ b/src/org/antlr/codegen/templates/ObjC/AST.stg
@@ -0,0 +1,615 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+#import \<ANTLR/ANTLR.h><\n>
+<endif>
+>>
+
+ at parserHeaderFile.ivars() ::= <<
+<@super.ivars()>
+<parserIVars()>
+>>
+
+ at parserHeaderFile.methodsdecl() ::= <<
+<@super.methodsdecl()>
+<parserMethodsDecl()>
+>>
+
+ at genericParser.methods() ::= <<
+<@super.methods()>
+<parserMethods()>
+>>
+
+/** additional init code for tree support */
+ at genericParser.init() ::= <<
+<@super.init()>
+[self setTreeAdaptor:[[[ANTLRCommonTreeAdaptor alloc] init] autorelease]];
+>>
+
+
+ at genericParser.dealloc() ::= <<
+[self setTreeAdaptor:nil];
+<@super.dealloc()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserIVars() ::= <<
+id\<ANTLRTreeAdaptor> treeAdaptor;
+>>
+
+/** Declaration of additional tree support methods - go in interface of parserHeaderFile() */
+parserMethodsDecl() ::= <<
+- (id\<ANTLRTreeAdaptor>) treeAdaptor;
+- (void) setTreeAdaptor:(id\<ANTLRTreeAdaptor>)theTreeAdaptor;
+>>
+
+/** Definition of addition tree support methods - go in implementation of genericParser() */
+parserMethods() ::= <<
+- (id\<ANTLRTreeAdaptor>) treeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id\<ANTLRTreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		[aTreeAdaptor retain];
+		[treeAdaptor release];
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+>>
+
+/** addition ivars for returnscopes */
+ at returnScopeInterface.ivars() ::= <<
+<recognizer.ASTLabelType; null="id"> tree;
+>>
+
+/** the interface of returnScope methods */
+ at returnScopeInterface.methods() ::= <<
+- (<recognizer.ASTLabelType; null="id">) tree;
+- (void) setTree:(<recognizer.ASTLabelType; null="id">)aTree;
+>>
+
+/** the implementation of returnScope methods */
+ at returnScope.methods() ::= <<
+- (<ASTLabelType>) tree
+{
+	return tree;
+}
+- (void) setTree:(<ASTLabelType>)aTree
+{
+	if (tree != aTree) {
+		[aTree retain];
+		[tree release];
+		tree = aTree;
+	}
+}
+
+- (void) dealloc
+{
+    [self setTree:nil];
+    [super dealloc];
+}
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = nil;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{<ASTLabelType> _<it.label.text>_tree = nil;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> _<it.label.text>_tree = nil;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{ANTLRRewriteRuleTokenStream *_stream_<it>=[[ANTLRRewriteRuleTokenStream alloc] initWithTreeAdaptor:treeAdaptor description:@"token <it>"];}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{ANTLRRewriteRuleSubtreeStream *_stream_<it>=[[ANTLRRewriteRuleSubtreeStream alloc] initWithTreeAdaptor:treeAdaptor description:@"rule <it>"];}; separator="\n">
+
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<[ruleDescriptor.allTokenRefsInAltsWithRewrites,ruleDescriptor.allRuleRefsInAltsWithRewrites]
+    :{[_stream_<it> release];}; separator="\n">
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(backtracking)>
+if ( backtracking == 0 ) {<\n>
+<endif>
+    [_<prevRuleRootRef()> setTree:(<ASTLabelType>)[treeAdaptor postProcessTree:root_0]];
+    [treeAdaptor setBoundariesForTree:[_<prevRuleRootRef()> tree] fromToken:[_<prevRuleRootRef()> start] toToken:[_<prevRuleRootRef()> stop]];
+<if(backtracking)>
+}
+<endif>
+<endif>
+[root_0 release];
+>>
+
+rewriteCodeLabelsCleanUp() ::= <<
+<referencedTokenLabels
+    :{[_stream_<it> release];};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{[_stream_<it> release];};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{[_stream_<it> release];};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{[_stream_<it> release];};
+    separator="\n"
+>
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+root_0 = (<ASTLabelType>)[treeAdaptor newEmptyTree];<\n>
+<endif>
+<endif>
+>>
+
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking == 0 ) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+[treeAdaptor addChild:_<label>_tree toTree:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking == 0 ) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefBang(...)>
+<listLabel(...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(...)>
+>>
+
+/** ID but track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking == 0 ) <endif>[_stream_<token> addElement:_<label>];<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list list_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefTrack(...)>
+<listLabel(...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+[treeAdaptor addChild:_<label>_tree toTree:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+})>
+>>
+
+matchSetRoot(s,label,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+})>
+>>
+
+matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (backtracking == 0) <endif>[treeAdaptor addChild:[_<label> tree] toTree:root_0];
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (backtracking == 0) <endif>root_0 = (<ASTLabelType>)[treeAdaptor makeNode:[_<label> tree] parentOf:root_0];
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (backtracking == 0) <endif>[_stream_<rule> addElement:[_<label> tree]];
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefTrack(...)>
+<listLabel(...)>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefBang(...)>
+<listLabel(...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+[treeAdaptor addChild:_<label>_tree toTree:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+wildcardRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+// TODO: ugh, am i really missing the combinations for Track and ListLabel?
+// there's got to be a better way
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+<if(backtracking)>
+if (backtracking == 0) {<\n>
+<endif>
+int i_0 = 0;
+root_0 = (<ASTLabelType>)[treeAdaptor newEmptyTree];
+[_<prevRuleRootRef()> setTree:root_0];
+<rewriteCodeLabels()>
+<alts:rewriteAlt(); separator="else ">
+<if(backtracking)>
+}
+<endif>
+<rewriteCodeLabelsCleanUp()>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{ANTLRRewriteRuleTokenStream *_stream_<it>=[[ANTLRRewriteRuleTokenStream alloc] initWithTreeAdaptor:treeAdaptor description:@"token <it>" element:_<it>];};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{ANTLRRewriteRuleTokenStream *_stream_<it>=[[ANTLRRewriteRuleTokenStream alloc] initWithTreeAdaptor:treeAdaptor description:@"token <it>" elements:_<it>_list];};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{ANTLRRewriteRuleSubtreeStream *_stream_<it>=[[ANTLRRewriteRuleSubtreeStream alloc] initWithTreeAdaptor:treeAdaptor description:@"token <it>" element:_<it>!=nil?[_<it> tree]:nil];};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{ANTLRRewriteRuleSubtreeStream *_stream_<it>=[[ANTLRRewriteRuleSubtreeStream alloc] initWithTreeAdaptor:treeAdaptor description:@"token <it>" elements:_list_<it>];};
+    separator="\n"
+>
+>>
+
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description> ;
+if ( <referencedElementsDeep:{el | [_stream_<el> hasNext]}; separator="||"> ) {
+	<alt>
+}
+<referencedElementsDeep:{el | [_stream_<el> reset];<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | [_stream_<el> hasNext]}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | [_stream_<el> reset];<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+{
+if ( !(<referencedElements:{el | [_stream_<el> hasNext]}; separator="||">) ) {
+    @throw [NSException exceptionWithName:@"RewriteEarlyExitException" reason:nil userInfo:nil];
+}
+while ( <referencedElements:{el | [_stream_<el> hasNext]}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | [_stream_<el> reset];<\n>}>
+}
+>>
+
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = nil;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)[treeAdaptor newEmptyTree];
+<root:rewriteElement()>
+<children:rewriteElement()>
+[treeAdaptor addChild:root_<treeLevel> toTree:root_<enclosingTreeLevel>];
+[root_<treeLevel> release];
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args) ::= <<
+<if(args)>
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:_<token>]; // TODO: args: <args; separator=", ">
+<endif>
+[treeAdaptor addChild:<if(args)>_<token>_tree<else>[_stream_<token> next]<endif> toTree:root_<treeLevel>];
+<if(args)>
+[_<token>_tree release];<\n>
+<endif>
+<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+[treeAdaptor addChild:[_stream_<label> next] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+[treeAdaptor addChild:[_stream_<label> next] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[_stream_<label> next] parentOf:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^(ID ...) */
+rewriteTokenRefRoot(token,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[_stream_<token> next] parentOf:root_<treeLevel>];<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
+<if(first(rest(args)))><! got two arguments - means create from token with custom text!>
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token> text:<first(rest(args))>];
+[treeAdaptor addChild:_<token>_tree toTree:root_<treeLevel>];
+[_<token>_tree release];<\n>
+<else><! at most one argument !>
+<if(first(args))>
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token>];
+[treeAdaptor addChild:_<token>_tree toTree:root_<treeLevel>];
+[_<token>_tree release];<\n>
+<else><! no argument at all !>
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithTokenType:<token> text:[tokenNames objectAtIndex:<token>]];
+[treeAdaptor addChild:_<token>_tree toTree:root_<treeLevel>];
+[_<token>_tree release];<\n>
+<endif><! one arg !>
+<endif><! two args !>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
+<if(first(rest(args)))><! got two arguments - means create from token with custom text!>
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token> text:<first(rest(args))>];
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:_<token>_tree parentOf:root_<treeLevel>];
+[_<token>_tree release];<\n>
+<else><! at most one argument !>
+<if(first(args))>
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token>];
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:_<token>_tree parentOf:root_<treeLevel>];
+[_<token>_tree release];<\n>
+<else><! no argument at all !>id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithTokenType:<token> text:[tokenNames objectAtIndex:<token>]];
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:_<token>_tree parentOf:root_<treeLevel>];
+[_<token>_tree release];<\n>
+<endif><! one arg !>
+<endif><! two args !>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule,dup) ::= <<
+[treeAdaptor addChild:[_stream_<rule> next] toTree:root_<treeLevel>];<\n>
+<endif>
+>>
+
+rewriteRuleRefRoot(rule,dup) ::= <<
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:(id\<ANTLRTree>)[_stream_<rule> next] parentOf:root_<treeLevel>];<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+[treeAdaptor addChild:<action> toTree:root_<treeLevel>];<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:<action> parentOf:root_<treeLevel>];<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+[treeAdaptor addChild:[_<label> tree] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+[treeAdaptor addChild:[(ANTLR<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope *)[_stream_<label> next] tree] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[_<label> tree] parentOf:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[(ANTLR<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope *)[_stream_<label> next] tree] parentOf:root_<treeLevel>];<\n>
+>>
diff --git a/src/org/antlr/codegen/templates/ObjC/ASTDbg.stg b/src/org/antlr/codegen/templates/ObjC/ASTDbg.stg
new file mode 100644
index 0000000..06b7ec5
--- /dev/null
+++ b/src/org/antlr/codegen/templates/ObjC/ASTDbg.stg
@@ -0,0 +1,46 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+group ASTDbg;
+
+/*
+parserMembers() ::= <<
+protected TreeAdaptor adaptor =
+    new DebugTreeAdaptor(dbg,new CommonTreeAdaptor());
+public void setTreeAdaptor(TreeAdaptor adaptor) {
+    this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+}
+public TreeAdaptor getTreeAdaptor() {
+    return adaptor;
+}<\n>
+>>
+*/
+
+ at treeParserHeaderFile.superClassName ::= "ANTLRDebugTreeParser"
+
+ at rewriteElement.pregen() ::= "[debugListener locationLine:<e.line> column:<e.pos>];"
diff --git a/src/org/antlr/codegen/templates/ObjC/Dbg.stg b/src/org/antlr/codegen/templates/ObjC/Dbg.stg
new file mode 100644
index 0000000..99cc040
--- /dev/null
+++ b/src/org/antlr/codegen/templates/ObjC/Dbg.stg
@@ -0,0 +1,178 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Objective-C output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at headerFile.imports() ::= <<
+<@super.imports()>
+#import \<ANTLR/ANTLRDebug.h>
+>>
+
+ at parserHeaderFile.ivars() ::= <<
+int ruleLevel;
+NSArray *ruleNames;
+>>
+
+ at parserHeaderFile.methodsdecl() ::= <<
+-(BOOL) evalPredicate:(NSString *)predicate matched:(BOOL)result;<\n>
+>>
+
+ at genericParser.init() ::= <<
+ruleNames = [[NSArray alloc] initWithObjects:<rules:{rST | @"<rST.ruleName>"}; separator=", ", wrap="\n	">, nil];<\n>
+>>
+
+ at genericParser.dealloc() ::= <<
+[ruleNames release];<\n>
+>>
+
+ at genericParser.methods() ::= <<
+-(BOOL) evalPredicate:(NSString *)predicate matched:(BOOL)result
+{
+	[debugListener semanticPredicate:predicate matched:result];
+	return result;
+}<\n>
+>>
+
+/* bug: can't use @super.superClassName()> */
+ at parserHeaderFile.superClassName() ::= "ANTLRDebug<if(TREE_PARSER)>Tree<endif>Parser"
+
+ at rule.preamble() ::= <<
+ at try { [debugListener enterRule:@"<ruleName>"];
+if ( ruleLevel==0 ) [debugListener commence];
+ruleLevel++;
+[debugListener locationLine:<ruleDescriptor.tree.line> column:<ruleDescriptor.tree.column>];<\n>
+>>
+
+ at rule.postamble() ::= <<
+[debugListener locationLine:<ruleDescriptor.EORNode.line> column:<ruleDescriptor.EORNode.column>];<\n>
+}
+ at finally {
+    [debugListener exitRule:@"<ruleName>"];
+    ruleLevel--;
+    if ( ruleLevel==0 ) [debugListener terminate];
+}<\n>
+>>
+
+/* these are handled in the runtime for now.
+ * stinks, but that's the easiest way to avoid having to generate two
+ * methods for each synpred
+
+ at synpred.start() ::= "[debugListener beginBacktrack:backtracking];"
+
+ at synpred.stop() ::= "[debugListener endBacktrack:backtracking wasSuccessful:success];"
+
+ */
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "@try { [debugListener enterSubRule:<decisionNumber>];<\n>"
+
+exitSubRule() ::=
+    "} @finally { [debugListener exitSubRule:<decisionNumber>]; }<\n>"
+
+enterDecision() ::=
+    "@try { [debugListener enterDecision:<decisionNumber>];<\n>"
+
+exitDecision() ::=
+    "} @finally { [debugListener exitDecision:<decisionNumber>]; }<\n>"
+
+enterAlt(n) ::= "[debugListener enterAlt:<n>];<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "[debugListener recognitionException:eee];<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "[debugListener locationLine:<it.line> column:<it.pos>];"
+
+ at matchSet.mismatchedSetException() ::=
+    "[debugListener recognitionException:mse];"
+
+ at dfaState.noViableAltException() ::= "[debugListener recognitionException:nvae];"
+
+ at dfaStateSwitch.noViableAltException() ::= "[debugListener recognitionException:nvae];"
+
+dfaDecision(decisionNumber,description) ::= <<
+ at try {
+    // isCyclicDecision is only necessary for the Profiler. Which I didn't do, yet.
+    // isCyclicDecision = YES;
+    <super.dfaDecision(...)>
+}
+ at catch (ANTLRNoViableAltException *nvae) {
+    [debugListener recognitionException:nvae];
+    @throw nvae;
+}
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+-(void) error:(ANTLRNoViableAltException *)nvae
+{
+    [[recognizer debugListener] recognitionException:nvae];
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+[self evalPredicate:@"<description>" result:<pred>];
+>>
diff --git a/src/org/antlr/codegen/templates/ObjC/ObjC.stg b/src/org/antlr/codegen/templates/ObjC/ObjC.stg
new file mode 100644
index 0000000..83449d8
--- /dev/null
+++ b/src/org/antlr/codegen/templates/ObjC/ObjC.stg
@@ -0,0 +1,1458 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006,2007 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+	Template group file for the Objective C code generator.
+	Heavily based on Java.stg
+
+	Written by Kay Roepke <kroepke(at)classdump.org>
+
+	This file is part of ANTLR and subject to the same license as ANTLR itself.
+*/
+
+group ObjC implements ANTLRCore;
+
+objcTypeInitMap ::= [
+	"int":"0",
+	"long":"0",
+	"float":"0.0",
+	"double":"0.0",
+	"boolean":"false",
+	"byte":"0",
+	"short":"0",
+	"char":"0",
+	"id":"nil",
+	default:"nil" // anything other than an atomic type
+]
+
+className() ::= "<name><! if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parser<endif !>"
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass,literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@imports>
+#import "<name><!if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parser<endif!>.h"
+<@end>
+
+<docComment>
+<recognizer>
+>>
+
+
+headerFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass,literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@imports>
+#import \<Cocoa/Cocoa.h>
+#import \<ANTLR/ANTLR.h>
+<@end>
+
+<actions.(actionScope).header>
+
+<if(LEXER)>
+<lexerHeaderFile(...)>
+<endif>
+<if(PARSER)>
+<parserHeaderFile(...)>
+<endif>
+<if(TREE_PARSER)>
+<treeParserHeaderFile(...)>
+<endif>
+>>
+
+lexerHeaderFile() ::=
+<<
+<cyclicDFAs:cyclicDFAInterface()>
+
+#pragma mark Rule return scopes start
+<rules:{rule |
+<rule.ruleDescriptor:returnScopeInterface(ruleDescriptor=rule.ruleDescriptor)>
+}>
+#pragma mark Rule return scopes end
+
+#pragma mark Tokens
+<tokens:{#define <name>_<it.name>	<it.type>}; separator="\n">
+
+ at interface <className()> : ANTLRLexer {
+	<cyclicDFAs:{dfa | <name>DFA<dfa.decisionNumber> *dfa<dfa.decisionNumber>;}; separator="\n">
+	<synpreds:{p | SEL <p>SyntacticPredicate;}; separator="\n">
+	<actions.lexer.ivars>
+}
+
+<actions.lexer.methodsdecl>
+
+<rules:{rule |
+- (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>) <if(!rule.ruleDescriptor.isSynPred)>m<endif><rule.ruleName><if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>;
+}>
+
+
+ at end
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="id<ANTLRToken> ",
+      filterMode) ::= <<
+<cyclicDFAs:cyclicDFA()>
+
+/** As per Terence: No returns for lexer rules!
+#pragma mark Rule return scopes start
+<rules:{rule |
+<rule.ruleDescriptor:returnScope(ruleDescriptor=rule.ruleDescriptor)>
+}>
+#pragma mark Rule return scopes end
+*/
+ at implementation <className()>
+
+static NSArray *tokenNames;
+
+<actions.lexer.methods>
+
++ (void) initialize
+{
+    // todo: get tokenNames into lexer - requires changes to CodeGenerator.java and ANTLRCore.sti
+    tokenNames = [[NSArray alloc] init];
+}
+
+- (id) initWithCharStream:(id\<ANTLRCharStream>)anInput
+{
+	if (nil!=(self = [super initWithCharStream:anInput])) {
+		<if(memoize)>
+		// init memoize facility
+		<endif>
+		<synpreds:{p | <lexerSynpred(name=p)>};separator="\n">
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [[<name>DFA<dfa.decisionNumber> alloc] initWithRecognizer:self];}; separator="\n">
+		<actions.lexer.init>
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+	<cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
+<actions.lexer.dealloc>
+	[super dealloc];
+}
+
++ (NSString *) tokenNameForType:(int)aTokenType
+{
+    return nil;
+}
+
++ (NSArray *) tokenNames
+{
+    return tokenNames;
+}
+
+- (NSString *) grammarFileName
+{
+	return @"<fileName>";
+}
+
+<if(actions.lexer.reset)>
+- (void) reset
+{
+<actions.lexer.reset>
+	[super reset];
+}
+<endif>
+
+<if(filterMode)>
+<filteringNextToken()>
+<endif>
+
+<rules; separator="\n\n">
+
+ at end
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ */
+filteringNextToken() ::= <<
+- (id\<ANTLRToken>) nextToken
+{
+    while (YES) {
+        if ( [input LA:1] == ANTLRCharStreamEOF ) {
+            return nil; // should really be a +eofToken call here -> go figure
+        }
+        [self setToken:nil];
+        _channel = ANTLRTokenChannelDefault;
+        _tokenStartLine = [input line];
+        _tokenCharPositionInLine = [input charPositionInLine];
+        tokenStartCharIndex = [self charIndex];
+        @try {
+            int m = [input mark];
+            backtracking = 1;
+            failed = NO;
+            [self mTokens];
+            backtracking = 0;
+            if ( failed ) {
+                [input rewind:m];
+                [input consume]; <! // advance one char and try again !>
+            } else {
+                [self emit];
+                return token;
+            }
+        }
+        @catch (ANTLRRecognitionException *re) {
+            // shouldn't happen in backtracking mode, but...
+            [self reportError:re];
+            [self recover:re];
+        }
+    }
+}
+>>
+
+filteringActionGate() ::= "backtracking==1"
+
+treeParserHeaderFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment,
+           recognizer, name, tokens, tokenNames, rules, cyclicDFAs,
+           bitsets, buildTemplate, profile, backtracking, synpreds,
+           memoize, numRules, fileName, ANTLRVersion, generatedTimestamp, trace, scopes, superClass="ANTLRTreeParser") ::= <<
+<parserHeaderFile(...)>
+>>
+
+parserHeaderFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment,
+           recognizer, name, tokens, tokenNames, rules, cyclicDFAs,
+           bitsets, buildTemplate, profile, backtracking, synpreds,
+           memoize, numRules, fileName, ANTLRVersion, generatedTimestamp, trace, scopes, superClass="ANTLRParser") ::=
+<<
+
+<cyclicDFAs:cyclicDFAInterface()>
+
+#pragma mark Tokens
+<tokens:{#define <name>_<it.name>	<it.type>}; separator="\n">
+
+#pragma mark Dynamic Global Scopes
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(scope=it)><endif>}>
+
+#pragma mark Dynamic Rule Scopes
+<rules:{rule |
+<rule.ruleDescriptor.ruleScope:ruleAttributeScopeDecl(scope=it)>
+}>
+
+#pragma mark Rule Return Scopes
+<rules:{rule |
+<rule.ruleDescriptor:returnScopeInterface(ruleDescriptor=rule.ruleDescriptor)>
+}>
+
+
+ at interface <className()> : <@superClassName><superClass><@end> {
+
+	<cyclicDFAs:{dfa | <name>DFA<dfa.decisionNumber> *dfa<dfa.decisionNumber>;}; separator="\n">
+	<synpreds:{p | SEL <p>SyntacticPredicate;}; separator="\n">
+	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeIVar(scope=it)><endif>}>
+	<rules:{rule |
+	<rule.ruleDescriptor.ruleScope:ruleAttributeScopeIVar(scope=it)>
+	}>
+	<@ivars()>
+
+	<actions.parser.ivars>
+ }
+
+<actions.parser.methodsdecl>
+
+<rules:{rule |
+- (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>) <rule.ruleName><if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>;
+}>
+
+<@methodsdecl()>
+
+ at end
+>>
+
+/** How to generate a parser */
+genericParser(name, scopes, tokens, tokenNames, rules, numRules,
+              cyclicDFAs, bitsets, inputStreamType, superClass,
+              ASTLabelType="id", labelType, members) ::= <<
+
+<cyclicDFAs:cyclicDFA()>
+
+#pragma mark Bitsets
+<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>
+
+#pragma mark Dynamic Global Scopes
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeImpl(scope=it)><endif>}>
+
+#pragma mark Dynamic Rule Scopes
+<rules:{rule |
+<rule.ruleDescriptor.ruleScope:ruleAttributeScopeImpl(scope=it)>
+}>
+
+#pragma mark Rule return scopes start
+<rules:{rule |
+<rule.ruleDescriptor:returnScope(ruleDescriptor=rule.ruleDescriptor)>
+}>
+
+ at implementation <className()>
+
+static NSArray *tokenNames;
+
++ (void) initialize
+{
+	<bitsets:bitsetInit(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>
+	tokenNames = [[NSArray alloc] initWithObjects:@"\<invalid>", @"\<EOR>", @"\<DOWN>", @"\<UP>", <tokenNames:{@<it>}; separator=", ", wrap="\n	">, nil];
+}
+
+<if(PARSER)>
+- (id) initWithTokenStream:(<inputStreamType>)aStream
+{
+	if ((self = [super initWithTokenStream:aStream])) {
+<else><!TREE_PARSER!>
+- (id) initWithTreeNodeStream:(<inputStreamType>)aStream
+{
+	if ((self = [super initWithTreeNodeStream:aStream])) {
+<endif><\n>
+<if(memoize)>
+		ruleMemo = [[NSMutableDictionary alloc] initWithCapacity:<numRules>+1];
+<endif>
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [[<name>DFA<dfa.decisionNumber> alloc] initWithRecognizer:self];}; separator="\n">
+		<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeInit(scope=it)><endif>}>
+		<rules:{rule |
+		<rule.ruleDescriptor.ruleScope:ruleAttributeScopeInit(scope=it)>
+		}>
+		<actions.parser.init>
+		<@init()>
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+<if(memoize)>
+	[ruleMemo release];
+<endif>
+	<cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
+	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDealloc(scope=it)><endif>}>
+	<actions.parser.dealloc>
+	<@dealloc()>
+	[super dealloc];
+}
+
+- (NSString *) grammarFileName
+{
+	return @"<fileName>";
+}
+
+<actions.parser.methods>
+
+<rules; separator="\n\n">
+
+<synpreds:{p | <synpred(p)>}>
+
+<@methods()>
+
+ at end
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="ANTLRParser", labelType="id<ANTLRToken> ", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="id\<ANTLRTokenStream>", ...)>
+>>
+
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ANTLRASTLabelType>}, ASTLabelType="id", superClass="ANTLRTreeParser", members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="id\<ANTLRTreeNodeStream>", ...)>
+>>
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+- (void) <ruleName>
+{
+    <if(trace)>NSLog(@"enter <ruleName> %d failed=%@ backtracking=%d", [input LA:1], failed ? @"YES" : @"NO", backtracking);
+    @try {
+        <block>
+    }
+    @finally {
+        NSLog(@"exit <ruleName> %d failed=%@ backtracking=%d", [input LA:1], failed ? @"YES" : @"NO", backtracking);
+    }
+<else>
+    <block>
+<endif>
+}
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (backtracking > 0) {failed=YES; return <ruleReturnValue()>;}<endif>
+>>
+
+synpred(name) ::= <<
+<!name>SyntacticPredicate = @selector(<name>); !>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( backtracking>0 && [self alreadyParsedRule:input ruleIndex:<ruleDescriptor.index>] ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+
+/** How to generate code for a rule.
+ *  The return type aggregates are declared in the header file (headerFile template)
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+// $ANTLR start <ruleName>
+// <fileName>:<description>
+- (<returnType()>) <ruleName><ruleDescriptor.parameterScope:parameterScope(scope=it)>
+{
+    <if(trace)>NSLog(@"enter <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], failed ? @"YES" : @"NO", backtracking);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    <ruleMemoization(name=ruleName)>
+    @try {
+        <block>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+	<actions.(actionScope).rulecatch>
+<else>
+	@catch (ANTLRRecognitionException *re) {
+		[self reportError:re];
+		[self recover:input exception:re];
+	}<\n>
+<endif>
+<endif>
+<endif>
+	@finally {
+		<if(trace)>NSLog(@"exit <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], failed ? @"YES" : @"NO", backtracking);<endif>
+		<ruleCleanUp()>
+		<(ruleDescriptor.actions.finally):execAction()>
+		<ruleScopeCleanUp()>
+	}
+	<@postamble()>
+	return <ruleReturnValue()>;
+}
+// $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> _retval = [[[<returnTypeName()> alloc] init] autorelease];
+[_retval setStart:[input LT:1]];<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<returnType()> _<ruleDescriptor.singleValueReturnName>;
+<endif>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = [input index];
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{[<name>_<it>_stack addObject:[[[<name><it>Scope alloc] init] autorelease]];}; separator="\n">
+<ruleDescriptor.ruleScope:{[<name>_<it.name>_stack addObject:[[[<name><it.name>Scope alloc] init] autorelease]];}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{[<name>_<it>_stack removeLastObject];}; separator="\n">
+<ruleDescriptor.ruleScope:{[<name>_<it.name>_stack removeLastObject];}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<labelType> _<it.label.text> = nil;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{NSMutableArray *_list_<it.label.text> = nil;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|ANTLRRuleReturnScope <ll.label.text> = nil;}; separator="\n">
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+_<ruleDescriptor.singleValueReturnName>
+<else>
+_retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+// token+rule list labels
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{[_list_<it.label.text> release];}; separator="\n">
+<if(ruleDescriptor.hasMultipleReturnValues)>
+[_retval setStop:[input LT:-1]];<\n>
+<endif>
+<if(memoize)>
+<if(backtracking)>
+if ( backtracking>0 ) { [self memoize:input ruleIndex:<ruleDescriptor.index> startIndex:<ruleDescriptor.name>_StartIndex]; }
+<endif>
+<endif>
+>>
+
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block, memoize) ::= <<
+
+- (void) m<ruleName><if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>
+{
+    <if(trace)>NSLog(@"enter <ruleName> %C line=%d:%d failed=%@ backtracking=%d", [input LA:1], [self line], [self charPositionInLine], failed ? @"YES" : @"NO", backtracking);<endif>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleMemoization(name=ruleName)>
+    @try {
+        ruleNestingLevel++;
+<ruleDescriptor.actions.init>
+<if(nakedBlock)>
+        <block><\n>
+<else>
+        int _type = <name>_<ruleName>;
+        <block>
+        self->_tokenType = _type;<\n>
+<endif>
+    }
+    @finally {
+        ruleNestingLevel--;
+        <if(trace)>NSLog(@"exit <ruleName> %C line=%d:%d failed=%@ backtracking=%d", [input LA:1], [self line], [self charPositionInLine], failed ? @"YES" : @"NO", backtracking);<endif>
+        // rule cleanup
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.finally):execAction()>
+    }
+    return;
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+- (void) mTokens
+{
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description> // block
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description> //ruleblock
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description> // ruleBlockSingleAlt
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description> // blockSingleAlt
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 0 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>	// positiveClosureBlock
+int cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+
+do {
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+	default :
+	    if ( cnt<decisionNumber> >= 1 )  goto loop<decisionNumber>;
+	    <if(backtracking)>
+            <ruleBacktrackFailure()>
+            <endif>
+			ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:<decisionNumber>];
+			<@earlyExitException()>
+			@throw eee;
+    }
+    cnt<decisionNumber>++;
+} while (YES); loop<decisionNumber>: ;
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 0 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+<decls>
+<@preloop()>
+do {
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+	default :
+	    goto loop<decisionNumber>;
+    }
+} while (YES); loop<decisionNumber>: ;
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+// <fileName>:<description> // alt
+{
+<@declarations()>
+<elements:element()>
+<@cleanup()>
+}
+>>
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex) ::= <<
+<if(label)>
+_<label>=(<labelType>)[input LT:1];<\n>
+<endif>
+[self match:input tokenType:<token> follow:FOLLOW_<token>_in_<ruleName><elementIndex>]; <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (_list_<label> == nil)
+	_list_<label> = [[NSMutableArray alloc] init];
+[_list_<label> addObject:_<elem>];
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+int _<label> = [input LA:1];<\n>
+<endif>
+[self matchChar:<char>];
+<checkRuleBacktrackFailure()><\n>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+_<label> = [input LA:1];<\n>
+<endif>
+[self matchRangeFromChar:<a> to:<b>];<checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+_<label> = (<labelType>)[input LT:1];<\n>
+<endif>
+if (<s>) {
+	<postmatchCode>
+	[input consume];
+<if(!LEXER)>
+	errorRecovery = NO;
+<endif>
+	<if(backtracking)>failed = NO;<endif>
+} else {
+	<ruleBacktrackFailure()>
+	ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
+	<@mismatchedSetException()>
+<if(LEXER)>
+	[self recover:mse];
+<else>
+	[self recoverFromMismatchedSet:input exception:mse follow:FOLLOW_set_in_<ruleName><elementIndex>];
+<endif>
+	@throw mse;
+}<\n>
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int _<label>Start = [self charIndex];
+[self matchString:<string>];
+<checkRuleBacktrackFailure()>
+_<label> = [[ANTLRCommonToken alloc] initWithInput:input tokenType:ANTLRTokenTypeInvalid channel:ANTLRTokenChannelDefault start:_<label>Start stop:[self charIndex]];
+<else>
+[self matchString:<string>];
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+_<label> = (<labelType>)[input LT:1];<\n>
+<endif>
+[self matchAny:input];
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+int _<label> = [input LA:1];<\n>
+<endif>
+[self matchAny];
+<checkRuleBacktrackFailure()><\n>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args) ::= <<
+[following addObject:FOLLOW_<rule>_in_<ruleName><elementIndex>];
+<if(label)>
+_<label> = [self <rule><if(args)>:<args; separator=" :"><endif>];<\n>
+<else>
+[self <rule><if(args)>:<args; separator=" :"><endif>];<\n>
+<endif>
+[following removeLastObject];
+<checkRuleBacktrackFailure()><\n>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex) ::= <<
+<if(label)>
+int _<label>Start<elementIndex> = [self charIndex];
+[self m<rule><if(args)>:<args; separator=" :"><endif>];
+<checkRuleBacktrackFailure()><\n>
+_<label> = [[ANTLRCommonToken alloc] initWithInput:input tokenType:ANTLRTokenTypeInvalid channel:ANTLRTokenChannelDefault start:_<label>Start<elementIndex> stop:[self charIndex]-1];
+[_<label> setLine:[self line]];
+<else>
+[self m<rule><if(args)>:<args; separator=" :"><endif>];
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int _<label>Start<elementIndex> = [self charIndex];
+[self matchChar:ANTLRCharStreamEOF];
+<checkRuleBacktrackFailure()><\n>
+<labelType> _<label> = [[ANTLRCommonToken alloc] initWithInput:input tokenType:ANTLRTokenTypeEOF channel:ANTLRTokenChannelDefault start:_<label>Start<elementIndex> stop:[self charIndex]-1];
+[_<label> setLine:[self line]];
+<else>
+[self matchChar:ANTLRCharStreamEOF];
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root,actionsAfterRoot,children,nullableChildList) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( [input LA:1] == ANTLRTokenTypeDOWN ) {
+    [self match:input tokenType:ANTLRTokenTypeDOWN follow:nil]; <checkRuleBacktrackFailure()>
+    <children:element()>
+    [self match:input tokenType:ANTLRTokenTypeUP follow:nil]; <checkRuleBacktrackFailure()>
+}
+<else>
+[self match:input tokenType:ANTLRTokenTypeDOWN follow:nil]; <checkRuleBacktrackFailure()>
+<children:element()>
+[self match:input tokenType:ANTLRTokenTypeUP follow:nil]; <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    @throw [ANTLRFailedPredicateException exceptionWithRuleName:@"<ruleName>" predicate:@"<description>" stream:input];
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+	int LA<decisionNumber>_<stateNumber> = [input LA:<k>];
+	<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+	alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+	<ruleBacktrackFailure()>
+    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException exceptionWithDecision:<decisionNumber> state:<stateNumber> stream:input];
+	<@noViableAltException()>
+	@throw nvae;<\n>
+<endif>
+	}
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+	int LA<decisionNumber>_<stateNumber> = [input LA:<k>];
+	<edges; separator="\nelse ">
+}
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+	int LA<decisionNumber>_<stateNumber> = [input LA:<k>];
+	<edges; separator="\nelse "><\n>
+	<if(eotPredictsAlt)>
+	else {
+	    alt<decisionNumber> = <eotPredictsAlt>;
+	}<\n>
+	<endif>
+}
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( <labelExpr> <if(predicates)>&& <predicates><endif>) {
+	<targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ([input LA:<k>]) {
+	<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+	alt<decisionNumber> = <eotPredictsAlt>;
+<else> {
+	<ruleBacktrackFailure()>
+    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException exceptionWithDecision:<decisionNumber> state:<stateNumber> stream:input];
+	<@noViableAltException()>
+	@throw nvae;<\n>
+	}
+<endif>
+}
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ([input LA:<k>]) {
+	<edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ([input LA:<k>]) {
+	<edges; separator="\n">
+<if(eotPredictsAlt)>
+default:
+	alt<decisionNumber> = <eotPredictsAlt>
+	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+	<targetState>
+	break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = [dfa<decisionNumber> predict];
+>>
+
+/** Used in headerFile */
+cyclicDFAInterface(dfa) ::= <<
+#pragma mark Cyclic DFA interface start <name>DFA<dfa.decisionNumber>
+ at interface <name>DFA<dfa.decisionNumber> : ANTLRDFA {} @end<\n>
+#pragma mark Cyclic DFA interface end <name>DFA<dfa.decisionNumber>
+>>
+
+/** Used in lexer/parser implementation files */
+cyclicDFA(dfa) ::= <<
+#pragma mark Cyclic DFA implementation start <name>DFA<dfa.decisionNumber>
+ at implementation <name>DFA<dfa.decisionNumber>
+const static int <name>dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
+    {<dfa.eot; wrap="\n     ", separator=",", null="-1">};
+const static int <name>dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
+    {<dfa.eof; wrap="\n     ", separator=",", null="-1">};
+const static unichar <name>dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
+    {<dfa.min; wrap="\n     ", separator=",", null="0">};
+const static unichar <name>dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
+    {<dfa.max; wrap="\n     ", separator=",", null="0">};
+const static int <name>dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
+    {<dfa.accept; wrap="\n     ", separator=",", null="-1">};
+const static int <name>dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
+    {<dfa.special; wrap="\n     ", separator=",", null="-1">};
+const static int <name>dfa<dfa.decisionNumber>_transition[] = {};
+<dfa.edgeTransitionClassMap.keys:{ table |
+const static int <name>dfa<dfa.decisionNumber>_transition<i0>[] = {<table; separator=", ", wrap="\n	", null="-1">};
+}; null="">
+
+- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
+{
+	if ((self = [super initWithRecognizer:theRecognizer]) != nil) {
+		eot = <name>dfa<dfa.decisionNumber>_eot;
+		eof = <name>dfa<dfa.decisionNumber>_eof;
+		min = <name>dfa<dfa.decisionNumber>_min;
+		max = <name>dfa<dfa.decisionNumber>_max;
+		accept = <name>dfa<dfa.decisionNumber>_accept;
+		special = <name>dfa<dfa.decisionNumber>_special;
+		if (!(transition = calloc(<dfa.numberOfStates>, sizeof(void*)))) {
+			[self release];
+			return nil;
+		}
+		<dfa.transitionEdgeTables:{whichTable|transition[<i0>] = <name>dfa<dfa.decisionNumber>_transition<whichTable>;}; separator="\n", null="">
+	}
+	return self;
+}
+
+<if(dfa.specialStateSTs)>
+- (int) specialStateTransition:(int) s
+{
+	int _s = s;
+	switch ( s ) {
+ 		<dfa.specialStateSTs:{state |
+		case <i0> : <! compressed special state numbers 0..n-1 !>
+		<state>}; separator="\n">
+	}
+<if(backtracking)>
+	if ([recognizer isBacktracking]) {
+		[recognizer setIsFailed:YES];
+		return -1;
+	}<\n>
+<endif>
+	ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException exceptionWithDecision:<dfa.decisionNumber> state:_s stream:[recognizer input]];
+	<! [self error:nvae]; !> <! for debugger - do later !>
+	@throw nvae;
+}<\n>
+<endif>
+
+- (void) dealloc
+{
+	free(transition);
+	[super dealloc];
+}
+
+- (NSString *) description
+{
+	return @"<dfa.description>";
+}
+
+<@errorMethod()>
+
+ at end
+#pragma mark Cyclic DFA implementation end <name>DFA<dfa.decisionNumber>
+<\n>
+>>
+
+cyclicDFAState(decisionNumber, stateNumber,edges,needErrorClause,semPredState) ::= <<
+<if(semPredState)>
+[[recognizer input] rewind];<\n>
+<else>
+int LA<decisionNumber>_<stateNumber> = [input LA:1];<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(first(operands)<rest(operands):{o | ||<o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+/** synpreds are b0rken in cyclic DFA special states
+ *  Damn! For now, work around with using the selectors directly, and by providing a trampoline evalSynPred method in
+ *  ANTLRDFA
+ */
+/* evalSynPredicate(pred,description) ::= "[self evaluateSyntacticPredicate:<pred>SyntacticPredicate stream:input]" */
+evalSynPredicate(pred,description) ::= "[self evaluateSyntacticPredicate:@selector(<pred>)]"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "[input LA:<k>]==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "([input LA:<k>]\>=<lower> && [input LA:<k>]\<=<upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+globalAttributeScopeDecl(scope) ::= <<
+ at interface <name><scope.name>Scope : NSObject {
+	<scope.attributes:{<it.decl>;}; separator="\n">
+}
+// use KVC to access attributes!
+ at end
+>>
+
+globalAttributeScopeIVar(scope) ::= <<
+NSMutableArray *<name>_<scope.name>_stack;
+>>
+
+globalAttributeScopeImpl(scope) ::= <<
+ at implementation <name><scope.name>Scope
+ at end
+>>
+
+globalAttributeScopeInit(scope) ::= <<
+<name>_<scope.name>_stack = [[NSMutableArray alloc] init];
+>>
+
+globalAttributeScopeDealloc(scope) ::= <<
+[<name>_<scope.name>_stack release];
+>>
+
+ruleAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+ at interface <name><scope.name>Scope : NSObject {
+	<scope.attributes:{<it.decl>;}; separator="\n">
+}
+// use KVC to access attributes!
+ at end
+<endif>
+>>
+
+ruleAttributeScopeIVar(scope) ::= <<
+NSMutableArray *<name>_<scope.name>_stack;
+>>
+
+ruleAttributeScopeImpl(scope) ::= <<
+<if(scope.attributes)>
+ at implementation <name><scope.name>Scope
+ at end
+<endif>
+>>
+
+ruleAttributeScopeInit(scope) ::= <<
+<name>_<scope.name>_stack = [[NSMutableArray alloc] init];
+>>
+
+ruleAttributeScopeDealloc(scope) ::= <<
+[<name>_<scope.name>_stack release];
+>>
+
+
+returnTypeName() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<className()>_<ruleDescriptor.name>_return
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+<else>
+void<! for synpreds this is always void !>
+<endif>
+>>
+
+returnType() ::= <<
+<returnTypeName()><if(!ruleDescriptor.isSynPred)><if(ruleDescriptor.hasMultipleReturnValues)> *<endif><endif>
+>>
+
+/** Generate the Objective-C type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<className()>_<referencedRule.name>_return *
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<objcTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> _<label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScopeInterface(ruleDescriptor) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+ at interface <returnTypeName()> : ANTLR<if(TREE_PARSER)>Tree<endif>ParserRuleReturnScope {
+    <ruleDescriptor.returnScope.attributes:{<it.decl>;}; separator="\n">
+    <@ivars()>
+}
+<@methods()>
+ at end
+<endif>
+<endif>
+>>
+
+returnScope(ruleDescriptor) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+ at implementation <returnTypeName()>
+<@methods()>
+ at end
+<endif>
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{:(<it.type>)<it.name>}; separator=" ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+[<scopeAttributeRefStack()> valueForKey:@"<attr.name>"]
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+[<scopeAttributeRefStack()> setValue:<expr> forKey:@"<attr.name>"];
+>>
+
+scopeAttributeRefStack() ::= <<
+<if(negIndex)>
+[<name>_<scope>_stack objectAtIndex:[<name>_<scope>_stack count]-<negIndex>-1]
+<else>
+<if(index)>
+[<name>_<scope>_stack objectAtIndex:<index>]
+<else>
+[<name>_<scope>_stack lastObject]
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<name>_<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+[<scope> valueForKey:@"<attr.name>"]
+<else>
+_<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+[_retval setValue: forKey:@"<attr.name>"];
+<else>
+_<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+[_retval setValue:<expr> forKey:@"<attr.name>"];
+<else>
+_<attr.name> =<expr>;
+// double check this after beta release!
+[_<attr.name> retain];
+<endif>
+>>
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "_<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "_list_<label>"
+
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr) ::= "[_<scope> text]"
+tokenLabelPropertyRef_type(scope,attr) ::= "[_<scope> type]"
+tokenLabelPropertyRef_line(scope,attr) ::= "[_<scope> line]"
+tokenLabelPropertyRef_pos(scope,attr) ::= "[_<scope> charPositionInLine]"
+tokenLabelPropertyRef_channel(scope,attr) ::= "[_<scope> channel]"
+tokenLabelPropertyRef_index(scope,attr) ::= "[_<scope> tokenIndex]"
+tokenLabelPropertyRef_tree(scope,attr) ::= "_<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "[_<scope> start]"
+ruleLabelPropertyRef_stop(scope,attr) ::= "[_<scope> stop]"
+ruleLabelPropertyRef_tree(scope,attr) ::= "[_<scope> tree]"
+ruleLabelPropertyRef_text(scope,attr) ::= "[input substringWithRange:NSMakeRange([_<scope> start], [_<scope> stop]-[_<scope> start])]"
+ruleLabelPropertyRef_st(scope,attr) ::= "[_<scope> st]"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "_<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "[_<scope> type]"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "[_<scope> line]"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "[_<scope> charPositionInLine]"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "[_<scope> channel]"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "[_<scope> tokenIndex]"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "[_<scope> text]"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "[_retval start]"
+rulePropertyRef_stop(scope,attr) ::= "[_retval stop]"
+rulePropertyRef_tree(scope,attr) ::= "[_retval tree]"
+rulePropertyRef_text(scope,attr) ::= "[input substringWithRange:NSMakeRange(_start, [input index]-_start)]"
+rulePropertyRef_st(scope,attr) ::= "[_retval st]"
+
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "[_retval setValue:<expr> forKey:@\"tree\"]"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "<\n>#error StringTemplates are unsupported<\n>"
+
+lexerRulePropertyRef_text(scope,attr) ::= "[self text]"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "self->_tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "self->_tokenCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "self->_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "([self charIndex]-1)"
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+if ( backtracking==0 ) {
+  <action>
+}
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+const static unsigned long long <name>_data[] = {<words64:{<it>LL};separator=",">};
+static ANTLRBitSet *<name>;
+<! ANTLRBitSet *<name> = [[ANTLRBitSet alloc] initWithBits:<name>_data count:<length(words64)>];<\n> !>
+>>
+
+bitsetInit(name, words64) ::= <<
+<name> = [[ANTLRBitSet alloc] initWithBits:<name>_data count:<length(words64)>];<\n>
+>>
+
+codeFileExtension() ::= ".m"
+headerFileExtension() ::= ".h"
+
+true() ::= "YES"
+false() ::= "NO"
diff --git a/src/org/antlr/codegen/templates/Python/AST.stg b/src/org/antlr/codegen/templates/Python/AST.stg
new file mode 100644
index 0000000..2125271
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Python/AST.stg
@@ -0,0 +1,478 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* in sync with Java/AST.stg revision 36 */
+
+group AST;
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if self.backtracking == 0:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+from antlr3.tree import *<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+self.adaptor = CommonTreeAdaptor()<\n>
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+self.tree = None
+>>
+
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+root_0 = None<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{<it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{stream_<it> = RewriteRuleTokenStream(self.adaptor, "token <it>")}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "rule <it>")}; separator="\n">
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<finishedBacktracking({
+retval.tree = self.adaptor.rulePostProcessing(root_0)
+self.adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)})>
+<endif>
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+root_0 = self.adaptor.nil()<\n>
+<else>
+<endif>
+<endif>
+>>
+
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+
+<label>_tree = self.adaptor.createWithPayload(<label>)
+self.adaptor.addChild(root_0, <label>_tree)<\n>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = self.adaptor.createWithPayload(<label>)
+root_0 = self.adaptor.becomeRoot(<label>_tree, root_0)})>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefBang(...)>
+<listLabel(...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(...)>
+>>
+
+/** ID but track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefTrack(...)>
+<listLabel(...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<finishedBacktracking({self.adaptor.addChild(root_0, self.adaptor.createWithPayload(<label>))})>})>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={<finishedBacktracking({<!FIXME(40,untested)!>root_0 = self.adaptor.becomeRoot(self.adaptor.createWithPayload(<label>), root_0)})>})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({self.adaptor.addChild(root_0, <label>.tree)})>
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({root_0 = self.adaptor.becomeRoot(<label>.tree, root_0)})>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({stream_<rule>.add(<label>.tree)})>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefTrack(...)>
+<listLabel(...)>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefBang(...)>
+<listLabel(...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(...)>
+>>
+
+// WILDCARD AST
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self.adaptor.createWithPayload(<label>)
+self.adaptor.addChild(root_0, <label>_tree)})>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self.adaptor.createWithPayload(<label>)
+root_0 = self.adaptor.becomeRoot(<label>_tree, root_0)})>
+>>
+
+// TODO: ugh, am i really missing the combinations for Track and ListLabel?
+// there's got to be a better way
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+# AST Rewrite
+# elements: <referencedElementsDeep; separator=", ">
+# token labels: <referencedTokenLabels; separator=", ">
+# rule labels: <referencedRuleLabels; separator=", ">
+# token list labels: <referencedTokenListLabels; separator=", ">
+# rule list labels: <referencedRuleListLabels; separator=", ">
+<finishedBacktracking({
+<prevRuleRootRef()>.tree = root_0
+<rewriteCodeLabels()>
+root_0 = self.adaptor.nil()
+<first(alts):rewriteAltFirst(); anchor>
+
+<rest(alts):{a | el<rewriteAltRest(a)>}; anchor, separator="\n\n">})>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{stream_<it> = RewriteRuleTokenStream(self.adaptor, "token <it>", <it>)};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{stream_<it> = RewriteRuleTokenStream(self.adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{
+if <it> is not None:
+    stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "token <it>", <it>.tree)
+else:
+    stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "token <it>", None)
+};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+# <fileName>:<description>
+if <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+# <fileName>:<description>
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+# <fileName>:<description>
+if not (<referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">):
+    raise RewriteEarlyExitException()
+
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset()<\n>}>
+>>
+
+rewriteAltRest(a) ::= <<
+<if(a.pred)>
+if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>
+se: <! little hack to get if .. elif .. else block right !>
+    # <a.description>
+    <a.alt>
+<endif>
+>>
+
+rewriteAltFirst(a) ::= <<
+<if(a.pred)>
+if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>
+# <a.description>
+<a.alt>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = self.adaptor.nil()"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+# <fileName>:<description>
+root_<treeLevel> = self.adaptor.nil()
+<root:rewriteElement()>
+<children:rewriteElement()>
+self.adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args) ::= <<
+self.adaptor.addChild(root_<treeLevel>, <if(args)>self.adaptor.createFromToken(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>)<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+self.adaptor.addChild(root_<treeLevel>, stream_<label>.next())<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+self.adaptor.addChild(root_<treeLevel>, stream_<label>.next())<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = self.adaptor.becomeRoot(stream_<label>.next(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args) ::= <<
+root_<treeLevel> = self.adaptor.becomeRoot(<if(args)>self.adaptor.createFromToken(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>, root_<treeLevel>)<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
+<! need to call different adaptor.create*() methods depending of argument count !>
+self.adaptor.addChild(root_<treeLevel>, <if (!args)>self.adaptor.createFromType(<token>, "<token>")
+<else><if (!rest(args))>self.adaptor.createFromToken(<token>, <first(args)>)
+<else><if (!rest(rest(args)))>self.adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
+<endif>
+<endif>
+<endif>)<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
+<! need to call different adaptor.create*() methods depending of argument count !>
+root_<treeLevel> = self.adaptor.becomeRoot(<if (!args)>self.adaptor.createFromType(<token>, "<token>")
+<else><if (!rest(args))>self.adaptor.createFromToken(<token>, <first(args)>)
+<else><if (!rest(rest(args)))>self.adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
+<endif>
+<endif>
+<endif>, root_<treeLevel>)<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<!FIXME(96,untested)!>
+root_0 = <action><\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+self.adaptor.addChild(root_<treeLevel>, stream_<rule>.next())<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = self.adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+self.adaptor.addChild(root_<treeLevel>, <action>)<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = self.adaptor.becomeRoot(<action>, root_<treeLevel>)<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+self.adaptor.addChild(root_<treeLevel>, stream_<label>.next())<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+self.adaptor.addChild(root_<treeLevel>, stream_<label>.next().tree)<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = self.adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = self.adaptor.becomeRoot(stream_<label>.nextNode().tree, root_<treeLevel>)<\n>
+>>
diff --git a/src/org/antlr/codegen/templates/Python/Python.stg b/src/org/antlr/codegen/templates/Python/Python.stg
new file mode 100644
index 0000000..e89b2bb
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Python/Python.stg
@@ -0,0 +1,1281 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* in sync with Java/Java.stg revision 107 */
+
+group Python implements ANTLRCore;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+           bitsets, buildTemplate, buildAST, rewrite, profile,
+           backtracking, synpreds, memoize, numRules,
+           fileName, ANTLRVersion, generatedTimestamp, trace,
+           scopes, superClass, literals) ::=
+<<
+# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@imports>
+from antlr3 import *
+<if(TREE_PARSER)>
+from antlr3.tree import *<\n>
+<endif>
+from antlr3.compat import set, frozenset
+<@end>
+
+<actions.(actionScope).header>
+
+<! <docComment> !>
+
+# for convenience in actions
+HIDDEN = BaseRecognizer.HIDDEN
+
+# token types
+<tokens:{<it.name>=<it.type>}; separator="\n">
+
+<recognizer>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode) ::= <<
+class <name>(Lexer):
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+
+    grammarFileName = "<fileName>"
+
+    def __init__(self, input=None):
+        Lexer.__init__(self, input)
+<if(backtracking)>
+        self.ruleMemo = {}
+<endif>
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; seperator="\n">
+
+        <actions.lexer.init>
+
+
+    <actions.lexer.members>
+
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+def nextToken(self):
+    while True:
+        if self.input.LA(1) == EOF:
+            return EOF_TOKEN
+
+        self.token = None
+        self.channel = DEFAULT_CHANNEL
+        self.tokenStartCharIndex = self.input.index()
+        self.tokenStartCharPositionInLine = self.input.charPositionInLine
+        self.tokenStartLine = self.input.line
+        self._text = None
+        try:
+            m = self.input.mark()
+            self.backtracking = 1 <! means we won't throw slow exception !>
+            self.failed = False
+            self.mTokens()
+            self.backtracking = 0
+
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if self.failed:
+                self.input.rewind(m)
+                self.input.consume() <! advance one char and try again !>
+
+            else:
+                self.emit()
+                return self.token
+
+        except RecognitionException, re:
+            # shouldn't happen in backtracking mode, but...
+            self.reportError(re)
+            self.recover(re)
+
+
+def memoize(self, input, ruleIndex, ruleStartIndex):
+    if self.backtracking > 1:
+        # is Lexer always superclass?
+        Lexer.memoize(self, input, ruleIndex, ruleStartIndex)
+
+
+def alreadyParsedRule(self, input, ruleIndex):
+    if self.backtracking > 1:
+        return Lexer.alreadyParsedRule(self, input, ruleIndex)
+    return False
+
+
+>>
+
+filteringActionGate() ::= "self.backtracking == 1"
+
+/** How to generate a parser */
+
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              ASTLabelType="Object", labelType, members, init) ::= <<
+# token names
+tokenNames = [
+    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", 
+    <tokenNames; wrap, separator=", ">
+]
+
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeClass(scope=it)><endif>}>
+
+<rules:{<ruleAttributeScopeClass(scope=it.ruleDescriptor.ruleScope)>}>
+
+class <name>(<superClass>):
+    grammarFileName = "<fileName>"
+    tokenNames = tokenNames
+
+    def __init__(self, input):
+        <superClass>.__init__(self, input)
+<if(backtracking)>
+        self.ruleMemo = {}
+<endif>
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; seperator="\n">
+
+        <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeStack(scope=it)><endif>}>
+	<rules:{<ruleAttributeScopeStack(scope=it.ruleDescriptor.ruleScope)>}>
+
+        <init>
+
+        <@members>
+        <@end>
+
+
+    <members>
+
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <synpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:{FOLLOW_<it.name>_in_<it.inName><it.tokenIndex> = frozenset([<it.tokenTypes:{<it>};separator=", ">])<\n>}>
+
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", init={<actions.parser.init>}, ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", init={<actions.treeparser.init>}, ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+# $ANTLR start <ruleName>
+def <ruleName>_fragment(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
+<if(trace)>
+    self.traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
+    try:
+        <block>
+
+    finally:
+        self.traceOut("<ruleName>_fragment", <ruleDescriptor.index>)
+
+<else>
+    <block>
+<endif>
+# $ANTLR end <ruleName>
+
+
+>>
+
+synpred(name) ::= <<
+def <name>(self):
+    self.backtracking += 1
+    <@start()>
+    start = self.input.mark()
+    self.<name>_fragment()
+    success = not self.failed
+    self.input.rewind(start)
+    <@stop()>
+    self.backtracking -= 1
+    self.failed = False
+    return success
+
+
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if self.backtracking > 0 and self.alreadyParsedRule(self.input, <ruleDescriptor.index>):
+    return <ruleReturnValue()>
+
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>
+if self.failed:
+    return <ruleReturnValue()>
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if self.backtracking > 0:
+    self.failed = True
+    return <ruleReturnValue()><\n>
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+# $ANTLR start <ruleName>
+# <fileName>:<description>
+<ruleDescriptor.actions.decorate>
+def <ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
+<if(trace)>
+    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try:
+        try:
+            <ruleMemoization(name=ruleName)>
+            <block>
+            <ruleCleanUp()>
+            <(ruleDescriptor.actions.after):execAction()>
+
+<if(exceptions)>
+        <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+        <actions.(actionScope).rulecatch>
+<else>
+        except RecognitionException, re:
+            self.reportError(re)
+            self.recover(self.input, re)
+
+<endif>
+<else>
+        finally:
+            pass
+
+<endif>
+<endif>
+    finally:
+<if(trace)>
+        self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+        pass
+
+    <@postamble()>
+    return <ruleReturnValue()>
+
+# $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+except <e.decl>:
+    <e.action>
+
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval = self.<ruleDescriptor.name>_return()
+retval.start = self.input.LT(1)<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name> = <if(a.initValue)><a.initValue><else>None<endif>
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex = self.input.index()
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{self.<it>_stack.append(<it>_scope())}; separator="\n">
+<ruleDescriptor.ruleScope:{self.<it.name>_stack.append(<it.name>_scope())}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{self.<it>_stack.pop()}; separator="\n">
+<ruleDescriptor.ruleScope:{self.<it.name>_stack.pop()}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<it.label.text> = None}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text> = None}; separator="\n"
+>
+<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
+    :ruleLabelDef(label=it); separator="\n"
+>
+<ruleDescriptor.ruleListLabels:{<it.label.text> = None}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text> = None}; separator="\n"
+>
+<ruleDescriptor.charLabels:{<it.label.text> = None}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text> = None}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = self.input.LT(-1)<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if self.backtracking > 0:
+    self.memoize(self.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex)
+
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+# $ANTLR start <ruleName>
+def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
+<if(trace)>
+    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try:
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        self.type = <ruleName>
+
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+
+    finally:
+<if(trace)>
+        self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+	<ruleScopeCleanUp()>
+        <memoize()>
+        pass
+
+# $ANTLR end <ruleName>
+
+
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+def mTokens(self):
+    <block><\n>
+
+
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+<alts:altSwitchCase(); separator="\nel">
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<alts:altSwitchCase(); separator="\nel">
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+cnt<decisionNumber> = 0
+<decls>
+<@preloop()>
+while True: #loop<decisionNumber>
+    alt<decisionNumber> = <maxAlt>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    <alts:altSwitchCase(); separator="\nel">
+    else:
+        if cnt<decisionNumber> >= 1:
+            break #loop<decisionNumber>
+
+        <ruleBacktrackFailure()>
+        eee = EarlyExitException(<decisionNumber>, self.input)
+        <@earlyExitException()>
+        raise eee
+
+    cnt<decisionNumber> += 1
+
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@preloop()>
+while True: #loop<decisionNumber>
+    alt<decisionNumber> = <maxAlt>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    <alts:altSwitchCase(); separator="\nel">
+    else:
+        break #loop<decisionNumber>
+
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+if alt<decisionNumber> == <i>:
+    <@prealt()>
+    <it>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+# <fileName>:<description>
+<@declarations()>
+<elements:element()>
+<@cleanup()>
+>>
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+self.match(self.input, <token>, self.FOLLOW_<token>_in_<ruleName><elementIndex>)
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(...)>
+>>
+
+listLabel(label, elem) ::= <<
+if list_<label> is None:
+    list_<label> = []
+list_<label>.append(<label>)<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.match(<char>)
+<checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.matchRange(<a>, <b>)
+<checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+if <s>:
+    self.input.consume();
+    <postmatchCode>
+<if(!LEXER)>
+    self.errorRecovery = False<\n>
+<endif>
+<if(backtracking)>
+    self.failed = False<\n>
+<endif>
+
+else:
+    <ruleBacktrackFailure()>
+    mse = MismatchedSetException(None, self.input)
+    <@mismatchedSetException()>
+<if(LEXER)>
+    self.recover(mse)<\n>
+<else>
+    self.recoverFromMismatchedSet(
+        self.input, mse, self.FOLLOW_set_in_<ruleName><elementIndex>
+        )<\n>
+<endif>
+    raise mse
+<\n>
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+<label>Start = self.getCharIndex()
+self.match(<string>)
+<checkRuleBacktrackFailure()>
+<label> = CommonToken(input=self.input, type=INVALID_TOKEN_TYPE, channel=DEFAULT_CHANNEL, start=<label>Start, stop=self.getCharIndex()-1)
+<else>
+self.match(<string>)
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+self.matchAny(self.input)
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.matchAny()
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args) ::= <<
+self.following.append(self.FOLLOW_<rule>_in_<ruleName><elementIndex>)
+<if(label)>
+<label> = self.<rule>(<args; separator=", ">)<\n>
+<else>
+self.<rule>(<args; separator=", ">)<\n>
+<endif>
+self.following.pop()
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=rule */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex) ::= <<
+<if(label)>
+<label>Start<elementIndex> = self.getCharIndex()
+self.m<rule>(<args; separator=", ">)
+<checkRuleBacktrackFailure()>
+<label> = CommonToken(
+    input=self.input, 
+    type=INVALID_TOKEN_TYPE,
+    channel=DEFAULT_CHANNEL,
+    start=<label>Start<elementIndex>,
+    stop=self.getCharIndex()-1
+    )
+<else>
+self.m<rule>(<args; separator=", ">)
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+<label>Start<elementIndex> = self.getCharIndex()
+self.match(EOF)
+<checkRuleBacktrackFailure()>
+<label> = CommonToken(input=self.input, type=EOF, channel=DEFAULT_CHANNEL, start=<label>Start<elementIndex>, stop=self.getCharIndex()-1)
+<else>
+self.match(EOF)
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if self.input.LA(1) == DOWN:
+    self.match(self.input, DOWN, None)
+    <checkRuleBacktrackFailure()>
+    <children:element()>
+    self.match(self.input, UP, None)
+    <checkRuleBacktrackFailure()>
+
+<else>
+self.match(self.input, DOWN, None)
+<checkRuleBacktrackFailure()>
+<children:element()>
+self.match(self.input, UP, None)
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if not (<evalPredicate(...)>):
+    <ruleBacktrackFailure()>
+    raise FailedPredicateException(self.input, "<ruleName>", "<description>")
+
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel">
+else:
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
+    <@noViableAltException()>
+    raise nvae<\n>
+<endif>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel"><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber> = <eotPredictsAlt> <! if no edges, don't gen ELSE !>
+<else>
+else:
+    alt<decisionNumber> = <eotPredictsAlt>
+<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if (<labelExpr>) <if(predicates)>and (<predicates>)<endif>:
+    <targetState>
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+<! 
+  FIXME: this is one of the few occasion, where I miss a switch statement
+  in Python. ATM this is implemented as a list of if .. elif ..
+  This may be replaced by faster a dictionary lookup, when I find a solution
+  for the cases when an edge is not a plain dfaAcceptState.
+!>
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+else:
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
+    <@noViableAltException()>
+    raise nvae<\n>
+<endif>
+
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+<if(eotPredictsAlt)>
+else:
+    alt<decisionNumber> = <eotPredictsAlt>
+<endif>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+if <labels:{LA<decisionNumber> == <it>}; separator=" or ">:
+    <targetState>
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = self.dfa<decisionNumber>.predict(self.input)
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+# lookup tables for DFA #<dfa.decisionNumber>
+
+DFA<dfa.decisionNumber>_eot = DFA.unpack(
+    u"<dfa.javaCompressedEOT; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_eof = DFA.unpack(
+    u"<dfa.javaCompressedEOF; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_min = DFA.unpack(
+    u"<dfa.javaCompressedMin; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_max = DFA.unpack(
+    u"<dfa.javaCompressedMax; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_accept = DFA.unpack(
+    u"<dfa.javaCompressedAccept; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_special = DFA.unpack(
+    u"<dfa.javaCompressedSpecial; wrap="\"\n    u\"">"
+    )
+
+        
+DFA<dfa.decisionNumber>_transition = [
+    <dfa.javaCompressedTransition:{s|DFA.unpack(u"<s; wrap="\"\nu\"">")}; separator=",\n">
+]
+
+# class definition for DFA #<dfa.decisionNumber>
+
+<if(dfa.specialStateSTs)>
+class DFA<dfa.decisionNumber>(DFA):
+    def specialStateTransition(self_, s, input):
+        # convince pylint that my self_ magic is ok ;)
+        # pylint: disable-msg=E0213
+
+        # pretend we are a member of the recognizer
+        # thus semantic predicates can be evaluated
+        self = self_.recognizer
+
+        _s = s
+
+        <dfa.specialStateSTs:{state |
+if s == <i0>: <! compressed special state numbers 0..n-1 !>
+    <state>}; separator="\nel">
+
+<if(backtracking)>
+        if self.backtracking >0:
+            self.failed = True
+            return -1<\n>
+<endif>
+        nvae = NoViableAltException(self_.getDescription(), <dfa.decisionNumber>, _s, input)
+        self_.error(nvae)
+        raise nvae<\n>
+<else>
+DFA<dfa.decisionNumber> = DFA<\n>
+<endif>
+
+>>
+
+cyclicDFAInit(dfa) ::= <<
+self.dfa<dfa.decisionNumber> = self.DFA<dfa.decisionNumber>(
+    self, <dfa.decisionNumber>,
+    eot = self.DFA<dfa.decisionNumber>_eot,
+    eof = self.DFA<dfa.decisionNumber>_eof,
+    min = self.DFA<dfa.decisionNumber>_min,
+    max = self.DFA<dfa.decisionNumber>_max,
+    accept = self.DFA<dfa.decisionNumber>_accept,
+    special = self.DFA<dfa.decisionNumber>_special,
+    transition = self.DFA<dfa.decisionNumber>_transition
+    )<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = input.LA(1)<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+index<decisionNumber>_<stateNumber> = input.index()
+input.rewind()<\n>
+<endif>
+s = -1
+<edges; separator="\nel">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>)<\n>
+<endif>
+if s >= 0:
+    return s
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if (<labelExpr>)<if(predicates)> and (<predicates>)<endif>:
+    s = <targetStateNumber><\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+se:
+    s = <targetStateNumber><\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left> and <right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o |  or <o>}>)"
+
+notPredicate(pred) ::= "not (<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+evalSynPredicate(pred,description) ::= "self.<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "self.input.LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(<lower> \<= LA<decisionNumber>_<stateNumber> \<= <upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(<lower> \<= self.input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\" or \">"
+
+// A T T R I B U T E S
+
+globalAttributeScopeClass(scope) ::= <<
+<if(scope.attributes)>
+class <scope.name>_scope(object):
+    def __init__(self):
+        <scope.attributes:{self.<it.decl> = None}; separator="\n">
+
+<endif>
+>>
+
+globalAttributeScopeStack(scope) ::= <<
+<if(scope.attributes)>
+self.<scope.name>_stack = []<\n>
+<endif>
+>>
+
+ruleAttributeScopeClass(scope) ::= <<
+<if(scope.attributes)>
+class <scope.name>_scope(object):
+    def __init__(self):
+        <scope.attributes:{self.<it.decl> = None}; separator="\n">
+
+<endif>
+>>
+
+ruleAttributeScopeStack(scope) ::= <<
+<if(scope.attributes)>
+self.<scope.name>_stack = []<\n>
+<endif>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<label.label.text> = None<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+class <ruleDescriptor.name>_return(object):
+    def __init__(self):
+        self.start = None
+        self.stop = None
+        <if(TREE_PARSER)>
+        self.tree = None
+        <endif>
+
+        <scope.attributes:{self.<it.decl> = None}; separator="\n">
+        <@ruleReturnMembers()>
+
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+self.<scope>_stack[-<negIndex>].<attr.name>
+<else>
+<if(index)>
+self.<scope>_stack[<index>].<attr.name>
+<else>
+self.<scope>_stack[-1].<attr.name>
+<endif>
+<endif>
+>>
+
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+<!FIXME: this seems not to be used by ActionTranslator...!>
+self.<scope>_stack[-<negIndex>].<attr.name> = <expr>
+<else>
+<if(index)>
+<!FIXME: this seems not to be used by ActionTranslator...!>
+self.<scope>_stack[<index>].<attr.name> = <expr>
+<else>
+self.<scope>_stack[-1].<attr.name> = <expr>
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "self.<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>.<attr.name>
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> = <expr>
+<else>
+<attr.name> = <expr>
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.text"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.type"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.line"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.charPositionInLine"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.channel"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.index"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
+ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
+ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+self.input.getTokenStream().toString(
+    self.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+    self.input.getTreeAdaptor().getTokenStopIndex(<scope>.start)
+    )
+<else>
+self.input.toString(<scope>.start,<scope>.stop)
+<endif>
+>>
+ruleLabelPropertyRef_st(scope,attr) ::= "<!FIXME(201:ST)!><scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.type"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.line"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.charPositionInLine"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.channel"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.index"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.text"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.start"
+rulePropertyRef_stop(scope,attr) ::= "retval.stop" //mmm... or input.LT(-1)??
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
+rulePropertyRef_text(scope,attr) ::= "self.input.toString(retval.start, self.input.LT(-1))"
+rulePropertyRef_st(scope,attr) ::= "<!FIXME(203:ST)!>retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "self.text"
+lexerRulePropertyRef_type(scope,attr) ::= "self.type"
+lexerRulePropertyRef_line(scope,attr) ::= "self.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "self.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "self.channel"
+lexerRulePropertyRef_start(scope,attr) ::= "self.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(self.getCharIndex()-1)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "<!FIXME(205:ST)!>retval.st =<expr>"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if <actions.(actionScope).synpredgate>:
+    <action>
+
+<else>
+if self.backtracking == 0:
+    <action>
+
+<endif>
+<else>
+#action start
+<action>
+#action end
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+codeFileExtension() ::= ".py"
+
+true() ::= "True"
+false() ::= "False"
diff --git a/src/org/antlr/codegen/templates/Ruby/Ruby.stg b/src/org/antlr/codegen/templates/Ruby/Ruby.stg
new file mode 100644
index 0000000..b116b05
--- /dev/null
+++ b/src/org/antlr/codegen/templates/Ruby/Ruby.stg
@@ -0,0 +1,1352 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2006 Martin Traverso
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+group Ruby implements ANTLRCore;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+		   docComment, recognizer,
+		   name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+# <name> (<fileName>)
+# Generated by ANTLR <ANTLRVersion> on <generatedTimestamp>
+
+<docComment>
+<recognizer>
+>>
+
+/**
+ * Inherits parameters from outputFile(...)
+ *
+ * labelType is not used for Ruby (no explicit type declarations)
+ */
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType, filterMode) ::=
+<<
+class <name>
+    require 'stringio'
+
+	<tokens:{<it.name>=<it.type>}; separator="\n">
+	
+    def initialize(input)
+        input = StringIO.new(input) if input.respond_to?(:to_str)
+        @input = CharStream.new(input)
+        @backtracking = 0
+        @failed = false
+
+        <actions.lexer.init>
+    end
+
+    def next_token
+    	# TODO: catch exceptions
+		@token = nil
+		@channel = nil
+		@text = nil
+
+		@start = @input.index
+		@line = @input.line
+		@pos = @input.column
+
+		@type = nil
+		@type_int = nil
+
+		return :EOF if <LA(1)> == :EOF
+
+		match_Tokens()
+
+		if @token == nil
+			@text ||= @input.substring(@start, @input.index - 1)
+			@token = Token.new(@type, @type_int, @line, @pos, @text, @channel)
+		end
+
+		<if(trace)>
+			puts @token.inspect
+		<endif>
+        return @token
+    end
+
+    class Token
+        attr_reader :token_type
+        attr_reader :int_type
+        attr_reader :line
+        attr_reader :pos
+        attr_reader :text
+        attr_reader :channel
+
+        def initialize(token_type, int_type, line, pos, text, channel = nil)
+            @token_type = token_type
+            @int_type = int_type
+            @line = line
+            @pos = pos
+            @text = text
+            @channel = channel
+        end
+
+		alias :to_i :int_type
+    end
+
+    <actions.lexer.members>
+
+    private
+
+    class CharStream
+        attr_reader :line
+        attr_reader :column
+        attr_reader :index
+
+        def initialize(input)
+            @buffer = ""
+            @input = input
+            @line = 1
+            @column = 0
+
+            @index = 0;
+        end
+
+        # returns a Fixnum between 0 and 0xFFFF or :EOF
+        def look_ahead(pos)
+            offset = @index + pos - 1
+            if @buffer.length \< offset + 1
+                char = @input.read(offset + 1 - @buffer.length)
+                @buffer \<\< char if not char.nil?
+            end
+
+            if offset \< @buffer.length
+                @buffer[offset]
+            else
+                :EOF
+            end
+        end
+
+        def mark
+            @state = { :index => @index, :line => @line, :column => @column }
+            return 0
+        end
+
+        def rewind(marker)
+            @index = @state[:index]
+            @line = @state[:line]
+            @column = @state[:column]
+        end
+
+        def consume
+           look_ahead(1) # force a read from the input if necessary
+           @column = @column + 1
+           if @buffer[@index] == ?\n
+                @line = @line + 1
+                @column = 0
+           end
+           @index = @index + 1
+        end
+
+        def substring(start, stop)
+            @buffer.slice(start, stop - start + 1)
+        end
+    end
+
+
+    def match(value = nil)
+        @failed = false
+        case
+            when value.nil?
+                @input.consume()
+            when value.respond_to?(:to_str)
+                catch(:done) do
+                    value.each_byte do |c|
+                        @failed ||= !(<isolatedLookaheadTest(atom="c", k=1)>)
+                        @input.consume() if !@failed
+                        throw :done if @failed
+                    end
+                end
+            else
+                @failed = !(<isolatedLookaheadTest(atom="value", k=1)>)
+                @input.consume() if !@failed
+        end
+
+		if @failed && @backtracking \<= 0
+			raise "Expected #{value.respond_to?(:chr) ? value.chr : value}"
+		end
+    end
+
+    def match_range(from, to)
+        char = <LA(1)>
+
+        if char != :EOF && (char \>= from || char \<= to)
+			@failed = false
+			match()
+        elsif @backtracking > 0
+            @failed = true
+        else
+            raise "Expected [#{from.chr}..#{to.chr}]"
+        end
+    end
+
+    <rules; separator="\n\n">
+
+	<synpreds: synpred(); separator="\n\n">
+
+	<dfaClass()>
+    <cyclicDFAs: cyclicDFA()>
+end
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+       bitsets, ASTLabelType, superClass,
+       labelType, members) ::=
+<<
+require '<grammar.name>Lexer'
+
+class <name>
+	attr_reader :lexer
+	
+    TOKENS = [
+        <tokenNames: {[<it>, <i>]}; separator=",\n">
+    ].inject({}) { |hash, pair|
+        name = pair[0]
+        index = pair[1] + 3 # hardcoded for now... no way to get this value from ANTLR
+
+        if name[0] == ?'
+            hash[:"T#{index}"] = index
+        else
+            hash[:"#{name}"] = index
+        end
+
+        hash
+    }
+    
+    TOKENS[:EOF] = -1
+
+    def initialize(input)
+        if input.respond_to?(:to_str) || input.respond_to?(:read)
+            input = <grammar.name>Lexer.new(input)
+        end
+
+		@lexer = input
+        @input = TokenStream.new(input)
+        @backtracking = 0
+        @failed = false
+
+        <actions.parser.init>
+
+        <if(trace)>
+        @indent = 0
+        <endif>
+    end
+
+    <rules; separator="\n\n">
+
+    <actions.parser.members>
+
+    private
+
+    class TokenStream
+        attr_reader :index
+
+        def initialize(input)
+            @buffer = []
+            @input = input
+            @channel = nil
+
+            @index = 0;
+        end
+
+        # returns a Token
+        def look_ahead(pos)
+            offset = @index + pos - 1
+
+            while @buffer[-1] != :EOF && @buffer.length \< offset + 1
+                token = @input.next_token
+                if token == :EOF || token.channel == @channel
+                    @buffer \<\< token
+                end
+            end
+
+            offset = -1 if offset >= @buffer.length
+            if offset \< @buffer.length
+                @buffer[offset]
+            end
+        end
+
+        def mark
+            @state = { :index => @index }
+            return 0
+        end
+
+        def rewind(marker)
+            @index = @state[:index]
+        end
+
+        def consume
+           look_ahead(1) # force a read from the input if necessary
+           @index = @index + 1
+        end
+    end
+
+    def match(token = nil)
+        if token.nil? || <LA(1)> == token
+            @input.consume
+            @failed = false
+            return
+        elsif @backtracking > 0
+            @failed = true
+        else
+            raise "Expected #{token}"
+        end
+    end
+
+    def look_ahead(k)
+        token = @input.look_ahead(k)
+        if token != :EOF
+            token = token.token_type
+        end
+
+        token
+    end
+
+    <synpreds: synpred(); separator="\n\n">
+
+	<dfaClass()>
+    <cyclicDFAs: cyclicDFA()>
+end
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+          numRules, bitsets, labelType, ASTLabelType,
+          superClass, members) ::=
+<<
+	raise "treeParser not implemented"
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+# <description>
+def <ruleName>_fragment
+    <block>
+end
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::=
+<<
+# <description>
+def <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)
+	<ruleDescriptor.returnScope.attributes:
+		{ _retval_<it.name> = nil }; separator = "\n"
+	>
+	<ruleLabelDefs()>
+
+    <if(trace)>
+        puts " " * @indent + "+<ruleName>"
+        @indent += 1
+    <endif>
+
+	<ruleDescriptor.actions.init>
+
+    <block>
+
+    <if(trace)>
+        @indent -= 1
+        puts " " * @indent + "-<ruleName>"
+    <endif>
+
+
+    <if(!ruleDescriptor.isSynPred)>
+    <if(ruleDescriptor.hasReturnValue)>
+    <if(ruleDescriptor.hasMultipleReturnValues)>
+    return {<ruleDescriptor.returnScope.attributes:{ a | :<a.name> => _retval_<a.name> }; separator = ",">}
+   # TODO: need "Attribute.index" for this to work: return <ruleDescriptor.returnScope.attributes:{ a | _retval_<a.name> }; separator = ",">
+    <else>
+    return _retval_<ruleDescriptor.singleValueReturnName>
+    <endif>
+    <endif>
+    <endif>
+end
+>>
+
+ruleLabelDefs() ::= <<
+<ruleDescriptor.tokenLabels :{_<it.label.text> = nil}; separator="\n">
+<[ruleDescriptor.tokenListLabels, ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text> = nil}; separator="\n"
+>
+>>
+
+
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::=
+<<
+def match_<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)
+	<ruleDescriptor.actions.init>
+
+	<lexerRuleLabelDefs()>
+	<if(nakedBlock)>
+		<block><\n>
+	<else>
+		@type = :<ruleName>
+		@type_int = <ruleName>
+		<block>
+	<endif>
+end
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels, ruleDescriptor.ruleLabels]
+    :{<it.label.text> = nil}; separator="\n"
+>
+<ruleDescriptor.charLabels:{<it.label.text> = nil}; separator="\n">
+<[ruleDescriptor.tokenListLabels, ruleDescriptor.ruleListLabels]
+    :{list_<it.label.text> = nil}; separator="\n"
+>
+>>
+
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::=
+<<
+<lexerRule(...)>
+>>
+
+filteringNextToken() ::=
+<<
+    raise "filteringNextToken not implemented"
+>>
+
+filteringActionGate() ::=
+<<
+    raise "filteringActionGate not implemented"
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::=
+<<
+<switchBlock(...)>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::=
+<<
+<switchBlock(...)>
+>>                    
+
+
+/**
+ *  decision, decisionNumber don't seem to be relevant in this template
+ *  alts actually has a single element
+ */
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::=
+<<
+<plainBlock(...)>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::=
+<<
+<plainBlock(...)>
+>>
+
+/** A (..)+ block with 0 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::=
+<<
+# <description>
+matchedOnce<decisionNumber> = false
+<decls>
+while true
+    alt<decisionNumber> = <maxAlt>
+    <decision>
+    case alt<decisionNumber>
+        <alts:switchCase(); separator="\n">
+        else
+            break
+    end
+    matchedOnce<decisionNumber> = true
+end
+
+if !matchedOnce<decisionNumber>
+    raise "Expected at least one match: <description>"
+end
+>>
+
+positiveClosureBlockSingleAlt  ::= positiveClosureBlock
+
+/** A (..)* block with 0 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::=
+<<
+# <description>
+<decls>
+while true
+    alt<decisionNumber> = <maxAlt>
+    <decision>
+    case alt<decisionNumber>
+        <alts:switchCase(); separator="\n">
+        else
+            break
+    end
+end
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt)::=
+<<
+# <description>
+<elements: element(); separator="\n">
+>>
+
+// E L E M E N T S
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex)::=
+<<
+<if(label)>
+_<label> = @input.look_ahead(1)<\n>
+<endif>
+match(:<token>)
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex)::=
+<<
+<tokenRef(...)>
+<listLabel(elem=label, ...)>
+>>
+
+
+listLabel(label,elem)::=
+<<
+list_<label> ||= []
+list_<label> \<\< _<elem>
+>>
+
+/** match a character */
+charRef(char,label)::=
+<<
+<if(label)>
+_<label> = <char><\n>
+<endif>
+match(<char>)
+>>
+
+/** match a character range */
+charRangeRef(a,b,label)::=
+<<
+<if(label)>
+_<label> = <LA(1)><\n>
+<endif>
+match_range(<a>, <b>)
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode)::=
+<<
+<if(label)>
+_<label> = <LA(1)><\n>
+<endif>
+if <s>
+    match()
+    <postmatchCode>
+else
+    raise "Expected set"
+end
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode)::=
+<<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label)::=
+<<
+<if(label)>
+_<label> = <string><\n>
+<endif>
+match(<string>)<\n>
+>>
+
+wildcard(label, elementIndex)::=
+<<
+<if(label)>
+_<label> = <LA(1)><\n>
+<endif>
+match()
+>>
+
+
+wildcardAndListLabel(label,elementIndex)::=
+<<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex)::=
+<<
+<if(label)>
+_<label> = <LA(1)><\n>
+<endif>
+match()
+>>
+
+wildcardCharListLabel(label, elementIndex)::=
+<<
+	raise "wildcardCharListLabel not implemented"
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args)::=
+<<
+<if(label)>
+_<label> = <rule>(<args; separator=", ">)<\n>
+<else>
+<rule>(<args; separator=", ">)<\n>
+<endif>
+>>
+
+/** ids+=ID */
+ruleRefAndListLabel(rule,label,elementIndex,args)::=
+<<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+
+/**
+
+A: b=B;
+B: .;
+
+TODO: Should we use a real token type instead of :invalid? How do we get it?
+ 
+*/
+lexerRuleRef(rule,label,args,elementIndex)::=
+<<
+<if(label)>
+_<label>_start_<elementIndex> = @input.index
+_<label>_line_<elementIndex> = @input.line
+_<label>_pos_<elementIndex> = @input.column
+match_<rule>(<args; separator=", ">)
+_<label> = Token.new(:invalid, 0,
+                     _<label>_line_<elementIndex>,
+                     _<label>_pos_<elementIndex>,
+                     @input.substring(_<label>_start_<elementIndex>, @input.index - 1), nil)
+<else>
+match_<rule>(<args; separator=", ">)
+<endif>
+>>
+
+
+lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::=
+<<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex)::=
+<<
+<if(label)>
+_<label> = :EOF<\n>
+<endif>
+match(:EOF)
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList) ::=
+<<
+	raise "tree not implemented"
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description)::=
+<<
+# <description>
+if !<evalPredicate(...)>
+    raise "Semantic predicate failed: #{<description>}"
+end
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)::=
+<<
+# <description>
+look_ahead<decisionNumber>_<stateNumber> = <LA(k)>
+<if(LEXER)>
+look_ahead<decisionNumber>_<stateNumber> = -1 if look_ahead<decisionNumber>_<stateNumber> == :EOF
+<endif>
+
+if <edges; separator="\nelsif ">
+else
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt><\n>
+<else>
+    raise "Expected: <description>"<\n>
+<endif>
+end
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ *
+ *  If a semPredState, don't force lookahead lookup; preds might not
+ *  need.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)::=
+<<
+# <description>
+look_ahead<decisionNumber>_<stateNumber> = <LA(k)>
+
+<if(LEXER)>
+look_ahead<decisionNumber>_<stateNumber> = -1 if look_ahead<decisionNumber>_<stateNumber> == :EOF
+<endif>
+
+if <edges; separator="\nelsif ">
+end
+>>
+
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a
+ *  rule anything other than 'a' predicts exiting.
+ *
+ *  If a semPredState, don't force lookahead lookup; preds might not
+ *  need.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)::=
+<<
+# <description>
+look_ahead<decisionNumber>_<stateNumber> = <LA(k)>
+<if(LEXER)>
+look_ahead<decisionNumber>_<stateNumber> = -1 if look_ahead<decisionNumber>_<stateNumber> == :EOF
+<endif>
+
+if <edges; separator="\nelsif ">
+<if(eotPredictsAlt)>
+else
+    alt<decisionNumber> = <eotPredictsAlt>
+<endif>
+end
+>>
+
+
+/** An accept state indicates a unique alternative has been predicted */
+/** It is not clear that decisionNumber is available here */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates)::=
+<<
+<labelExpr>  <if(predicates)>&& <predicates><endif>
+    <targetState>
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState)::=
+<<
+# <description>
+case <LA(k)>
+    <edges; separator="\n">
+    else
+        <if(eotPredictsAlt)>
+        alt<decisionNumber> = <eotPredictsAlt><\n>
+        <else>
+        raise "Expected: <description>"<\n>
+        <endif>
+end
+>>
+
+/**
+ * eotPredictsAlt is not relevant here
+ */
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState)::=
+<<
+# <description>
+case <LA(k)>
+    <edges; separator="\n">
+end
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState)::=
+<<
+# <description>
+case <LA(k)>
+    <edges; separator="\n">
+    <if(eotPredictsAlt)><\n>
+    else
+        alt<decisionNumber> = <eotPredictsAlt>
+    <endif>
+end
+>>
+
+dfaEdgeSwitch(labels, targetState)::=
+<<
+<if(PARSER)>
+when <labels:{:<it>}; separator=","><\n>
+<else>
+when <labels:{<it>}; separator=","><\n>
+<endif>
+    <targetState>
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description)::=
+<<
+alt<decisionNumber> = DFA<decisionNumber>.predict(self, @input)
+>>
+
+/** Generate the tables and support code needed for the DFAState object
+ *  argument.  Unless there is a semantic predicate (or syn pred, which
+ *  become sem preds), all states should be encoded in the state tables.
+ *  Consequently, cyclicDFAState/cyclicDFAEdge,eotDFAEdge templates are
+ *  not used except for special DFA states that cannot be encoded as
+ *  a transition table.
+ */
+cyclicDFA(dfa)::=
+<<
+
+DFA<dfa.decisionNumber> = DFA.new(
+    [<dfa.eot; wrap="\n     ", separator=",", null="-1">],
+    [<dfa.eof; wrap="\n     ", separator=",", null="-1">],
+    [<dfa.min; wrap="\n     ", separator=",", null="0">],
+    [<dfa.max; wrap="\n     ", separator=",", null="0">],
+    [<dfa.accept; wrap="\n     ", separator=",", null="-1">],
+    [<dfa.special; wrap="\n     ", separator=",", null="-1">],
+    [
+        <dfa.transition:{s | [<s; wrap="\n     ", separator=",", null="-1">]}; separator=",\n", null="">
+    ])
+
+def special_state_transition(s)
+	<if(dfa.specialStateSTs)>
+		case s
+			<dfa.specialStateSTs:{state |
+			when <i0>
+				<state>}; separator="\n">
+		end
+
+		raise "Expected: <dfa.description>"
+	<else>
+		-1
+	<endif>
+end
+
+public :special_state_transition
+>>
+
+/** A special state in a cyclic DFA; special means has a semantic predicate
+ *  or it's a huge set of symbols to check.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState)::=
+<<
+	<if(semPredState)>
+	@input.rewind(0)
+	<else>
+	look_ahead_<decisionNumber>_<stateNumber> = <LA(1)>
+	<endif>
+	s = -1
+	<edges>
+	return s if s >= 0 
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.  Again, this is for special
+ *  states.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates)::=
+<<
+return s = <targetStateNumber> if (<labelExpr>) <if(predicates)>&& (<predicates>)<endif><\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates)::=
+<<
+s = <targetStateNumber><\n>
+>>
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right)::= "(<left> && <right>)"
+
+orPredicates(operands)::=
+<<
+(<operands; separator=" || ">)
+>>
+
+notPredicate(pred)::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description)::= "(<pred>)"
+
+evalSynPredicate(pred,description)::= "<pred>()"
+
+/**
+ *  It's not really clear that decisionNumber and stateNumber are available here
+ */
+lookaheadTest(atom,k,atomAsInt)::=
+<<
+<if(LEXER)>
+look_ahead<decisionNumber>_<stateNumber> == <atom>
+<else>
+look_ahead<decisionNumber>_<stateNumber> == :<atom>
+<endif>
+>>
+
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::=
+<<
+<if(LEXER)>
+<LA(k)> == <atom>
+<else>
+<LA(k)> == :<atom>
+<endif>
+>>
+
+/**
+ *  It's not really clear that decisionNumber and stateNumber are available here
+ */
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt)::=
+<<
+<if(LEXER)>
+(look_ahead<decisionNumber>_<stateNumber> \>= <lower> && look_ahead<decisionNumber>_<stateNumber> \<= <upper>)
+<else>
+(TOKENS[look_ahead<decisionNumber>_<stateNumber>] \>= <lowerAsInt> && TOKENS[look_ahead<decisionNumber>_<stateNumber>] \<= <upperAsInt>)
+<endif>
+>>
+
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::=
+<<
+<if(LEXER)>
+(<LA(k)> \>= <lower> && <LA(k)> \<= <upper>)
+<else>
+(TOKENS[<LA(k)>] \>= <lowerAsInt> && TOKENS[<LA(k)>] \<= <upperAsInt>)
+<endif>
+>>
+
+setTest(ranges) ::=
+<<
+<ranges; separator=" || ">
+>>
+
+// A T T R I B U T E S
+
+parameterAttributeRef(attr)::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
+
+scopeAttributeRef(scope,attr,index,negIndex)::=
+<<
+	raise "scopeAttributeRef not implemented"
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::=
+<<
+	raise "scopeSetAttributeRef not implemented"
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope)::=
+<<
+	raise "isolatedDynamicScopeRef not implemented"
+>>
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr)::=
+<<
+<if(referencedRule.hasMultipleReturnValues)>
+_<scope>[:<attr.name>]
+<else>
+_<scope>
+<endif>
+>>
+
+/**
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>[<attr.index>] # TODO: need "Attribute.index" for this to work
+<else>
+<scope>
+<endif>
+>>
+**/
+
+returnAttributeRef(ruleDescriptor,attr)::=
+<<
+	_retval_<attr.name>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::=
+<<
+	_retval_<attr.name> = <expr>
+>>
+
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label)::= "_<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label)::= "list_<label>"
+
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr)::= "_<scope>.text"
+tokenLabelPropertyRef_type(scope,attr)::= "_<scope>.token_type"
+tokenLabelPropertyRef_line(scope,attr)::= "_<scope>.line"
+tokenLabelPropertyRef_pos(scope,attr) ::= "_<scope>.pos"
+tokenLabelPropertyRef_channel(scope,attr)::= "_<scope>.channel"
+tokenLabelPropertyRef_index(scope,attr)::= "_<scope>.index"
+
+
+tokenLabelPropertyRef_tree(scope,attr)::= <<
+	raise "tokenLabelPropertyRef_tree not implemented"	
+>>
+
+ruleLabelPropertyRef_start(scope,attr)::=
+<<
+	raise "ruleLabelPropertyRef_start not implemented"
+>>
+
+ruleLabelPropertyRef_stop(scope,attr)::=
+<<
+	raise "ruleLabelPropertyRef_stop not implemented"
+>>
+
+ruleLabelPropertyRef_tree(scope,attr)::=
+<<
+	raise "ruleLabelPropertyRef_tree not implemented"
+>>
+
+ruleLabelPropertyRef_text(scope,attr)::=
+<<
+	raise "ruleLabelPropertyRef_text not implemented"
+>>
+
+ruleLabelPropertyRef_st(scope,attr)::=
+<<
+	raise "ruleLabelPropertyRef_st not implemented"
+>>
+
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label)::=
+<<
+	raise "lexerRuleLabel not implemented"
+>>
+
+lexerRuleLabelPropertyRef_type(scope,attr)::=
+<<
+	raise "lexerRuleLabelPropertyRef_type not implemented"
+>>
+
+lexerRuleLabelPropertyRef_line(scope,attr)::=
+<<
+	raise "lexerRuleLabelPropertyRef_line not implemented"
+>>
+
+lexerRuleLabelPropertyRef_pos(scope,attr)::=
+<<
+	raise "lexerRuleLabelPropertyRef_pos not implemented"
+>>
+
+lexerRuleLabelPropertyRef_channel(scope,attr)::=
+<<
+	raise "lexerRuleLabelPropertyRef_channel not implemented"
+>>
+
+lexerRuleLabelPropertyRef_index(scope,attr)::=
+<<
+	raise "lexerRuleLabelPropertyRef_index not implemented"
+>>
+
+lexerRuleLabelPropertyRef_text(scope,attr)::=
+<<
+	raise "lexerRuleLabelPropertyRef_text not implemented"
+>>
+
+lexerRulePropertyRef_text(scope,attr) ::= "@text"
+lexerRulePropertyRef_type(scope,attr) ::= <<
+	raise "lexerRulePropertyRef_type not implemented"
+>>
+
+lexerRulePropertyRef_line(scope,attr) ::= "@line"
+lexerRulePropertyRef_pos(scope,attr) ::= "@pos"
+
+lexerRulePropertyRef_index(scope,attr) ::= <<
+	raise "lexerRulePropertyRef_index not implemented"
+>>
+lexerRulePropertyRef_channel(scope,attr) ::= "@channel"
+
+lexerRulePropertyRef_start(scope,attr) ::= "@start"
+lexerRulePropertyRef_stop(scope,attr) ::= <<
+	raise "lexerRulePropertyRef_stop not implemented"
+>>
+
+
+ruleSetPropertyRef_tree(scope,attr,expr) ::= <<
+	raise "ruleSetPropertyRef_tree not implemented"
+>>
+ruleSetPropertyRef_st(scope,attr,expr) ::= <<
+	raise "ruleSetPropertyRef_st not implemented"
+>>
+
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr)::=
+<<
+	raise "rulePropertyRef_start not implemented"
+>>
+
+rulePropertyRef_stop(scope,attr)::=
+<<
+	raise "rulePropertyRef_stop not implemented"
+>>
+
+rulePropertyRef_tree(scope,attr)::=
+<<
+	raise "rulePropertyRef_tree not implemented"
+>>
+
+rulePropertyRef_text(scope,attr)::=
+<<
+	raise "rulePropertyRef_text not implemented"
+>>
+
+
+rulePropertyRef_st(scope,attr)::=
+<<
+	raise "rulePropertyRef_st not implemented"
+>>
+
+/** How to execute an action */
+/** TODO: add syntactic predicate & bactracking gates **/
+execAction(action)::=
+<<
+<action>
+>>
+
+// M I S C (properties, etc...)
+
+codeFileExtension()::=".rb"
+
+true()::= "true"
+false()::= "false"
+
+
+
+///////////// --------------------------- private templates --------------------------------
+
+
+bitset()::=
+<<
+	raise "bitset not implemented"
+>>
+
+
+element() ::= "<it.el>"
+
+plainBlock(decls, alts, description) ::=
+<<
+<decls>
+<alts>
+>>
+
+switchBlock(description, decisionNumber, maxAlt, alts, decls, decision) ::=
+<<
+# <description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<decision>
+case alt<decisionNumber>
+    <alts:switchCase(); separator="\n">
+end
+>>
+
+switchCase() ::=
+<<
+when <i>
+    <it>
+>>
+
+LA(k) ::=
+<<
+<if(LEXER)>
+ at input.look_ahead(<k>)
+<else>
+look_ahead(<k>)
+<endif>
+>>
+
+
+synpred(name) ::= <<
+def <name>
+    start = @input.mark()
+    @backtracking += 1
+    <name>_fragment()
+    @backtracking -= 1
+
+    success = !@failed
+    @input.rewind(start)
+    @failed = false
+
+    return success
+end
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+
+dfaClass() ::= <<
+<if(cyclicDFAs)>
+    class DFA
+        def initialize(eot, eof, min, max, accept, special, transition)
+            @eot = eot
+            @eof = eof
+            @min = min
+            @max = max
+            @accept = accept
+            @special = special
+            @transition = transition
+        end
+
+        def predict(parser, input)
+            mark = input.mark()
+            s = 0 # we always start at s0
+            begin
+                loop do
+                    special_state = @special[s]
+                    if special_state >= 0
+                        s = parser.special_state_transition(special_state)
+                        input.consume()
+                        next
+                    end
+
+                    if @accept[s] >= 1
+                        return @accept[s]
+                    end
+
+                    # look for a normal char transition
+                    c = input.look_ahead(1).to_i
+                    if c != :EOF && c >= @min[s] && c \<= @max[s]
+                        next_state = @transition[s][c - @min[s]] # move to next state
+                        if next_state \< 0
+                            # was in range but not a normal transition
+                            # must check EOT, which is like the else clause.
+                            # eot[s]>=0 indicates that an EOT edge goes to another
+                            # state.
+                            if @eot[s] >= 0  # EOT Transition to accept state?
+                                s = @eot[s]
+                                input.consume()
+                                next
+                            end
+                            raise "No viable alt"
+                        end
+                        s = next_state
+                        input.consume()
+                        next
+                    end
+                    if @eot[s] >= 0   # EOT Transition?
+                        s = @eot[s]
+                        input.consume()
+                        next
+                    end
+                    if c == :EOF && @eof[s] >= 0   # EOF Transition to accept state?
+                        return @accept[@eof[s]]
+                    end
+
+                    # not in range and not EOF/EOT, must be invalid symbol
+                    raise "No viable alt"
+                end
+            ensure
+                input.rewind(mark)
+            end
+        end
+    end
+    <endif>
+>>
\ No newline at end of file
diff --git a/src/org/antlr/codegen/templates/cpp/CPP.stg b/src/org/antlr/codegen/templates/cpp/CPP.stg
new file mode 100644
index 0000000..86b5b70
--- /dev/null
+++ b/src/org/antlr/codegen/templates/cpp/CPP.stg
@@ -0,0 +1,1351 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group Cpp implements ANTLRCore;
+
+cppTypeInitMap ::= [
+	"int":"0",
+	"long":"0",
+	"float":"0.0",
+	"double":"0.0",
+	"bool":"false",
+	"byte":"0",
+	"short":"0",
+	"char":"0",
+	default:"0" // anything other than an atomic type
+]
+
+// What we generate lexer/parser/treeparser, used a suffix in a few places
+generatedType() ::= <<
+<if(LEXER)>Lexer<endif><if(PARSER)>Parser<endif><if(TREE_PARSER)>TreeParser<endif>
+>>
+
+leadIn(type) ::=
+<<
+/** \file
+ *
+ *  This <type> file was generated by ANTLR version <ANTLRVersion>
+ *
+ *     -  From the grammar source file : <fileName>
+ *     -                            On : <generatedTimestamp>
+<if(LEXER)>
+ *     -                 for the lexer : <name><\n>
+<endif>
+<if(PARSER)>
+ *     -                for the parser : <name><\n>
+<endif>
+<if(TREE_PARSER)>
+ *     -           for the tree parser : <name><\n>
+<endif>
+ *
+ *  Edit at your own peril.
+ */
+>>
+
+standardHeaders() ::=
+<<
+#include \<antlr3/<generatedType()>.h>
+
+<if(profile)>
+#warning "No profiling support.."
+<endif>
+<if(TREE_PARSER)>
+#warning "No tree parsing yet..."
+<endif>
+>>
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+			  docComment, recognizer,
+			  name, tokens, tokenNames, rules, cyclicDFAs,
+			  bitsets, buildTemplate, profile,
+			  backtracking, synpreds, memoize, numRules,
+			  fileName, ANTLRVersion, generatedTimestamp, trace,
+			  scopes, superClass) ::=
+<<
+<leadIn("C++ source")>
+<@includes>
+#include "<name><headerFileExtension()>"
+<@end>
+<if(actions.(actionScope).header)>
+// Header action start ========================================================
+<actions.(actionScope).header>
+// Header action end   ========================================================
+<endif>
+
+<headerAction>
+
+<standardHeaders()>
+
+<docComment>
+<recognizer>
+>>
+parserHeaderFile() ::= <<
+>>
+treeParserHeaderFile() ::= <<
+>>
+lexerHeaderFile() ::= <<
+template\<typename StreamType, typename TokenType, typename TokenBuilder>
+class <name> : public antlr3::Lexer\<StreamType,TokenType,TokenBuilder> {
+	// carry over general types
+	typedef typename StreamType::position_type position_type;
+	typedef typename StreamType::char_type char_type;
+
+	typedef antlr3::tokenid_type               tokenid_type;
+	typedef antlr3::channel_type               channel_type;
+	typedef antlr3::decision_type              decision_type;
+	// exception shorthands
+	typedef antlr3::MismatchException\<position_type,char_type>        MismatchException;
+	typedef antlr3::MismatchedRangeException\<position_type,char_type> MismatchedRangeException;
+	typedef antlr3::MismatchedSetException\<position_type,char_type>   MismatchedSetException;
+	typedef antlr3::EarlyExitException\<position_type>       EarlyExitException;
+	typedef antlr3::NoViableAltException\<position_type>     NoViableAltException;
+<if(backtracking)>
+	// @TODO backtracking ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+
+public:
+	<tokens:{static const tokenid_type <tokenPrefix()><it.name> = <it.type>;}; separator="\n">
+	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<actions.lexer.members>
+	
+	<name>(StreamType* input)
+	: antlr3::Lexer\<StreamType,TokenType,TokenBuilder>(input)
+	{
+	}
+
+<!if(filterMode)!>
+    <!filteringNextToken()!>
+<!endif!>
+	<rules; separator="\n\n">
+
+	// syn preds
+	<synpreds:{p | <lexerSynpred(p)>}>
+
+	// cyclic dfa's
+	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+	// dfa tables..
+}; // class <name><\n>
+>>
+
+headerFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope, 
+            actions,
+            docComment, 
+            recognizer,
+            name, 
+            tokens, 
+            tokenNames, 
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            profile,
+            backtracking, 
+            synpreds, 
+            memoize, 
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass
+        ) ::=
+<<
+#ifndef	_<name>_H
+#define _<name>_H
+<leadIn("C++ header")>
+<actions.(actionScope).headerfile>
+
+<@includes>
+<standardHeaders()>
+<@end>
+
+<if(LEXER)>
+<lexerHeaderFile()>
+<endif>
+<if(PARSER)>
+<parserHeaderFile()>
+<endif>
+<if(TREE_PARSER)>
+<treeParserHeaderFile()>
+<endif>
+
+
+#endif	// _<name>_H<\n>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode) ::= <<
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+>>
+
+filteringNextToken() ::= <<
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+public Token nextToken() {
+	while (true) {
+		if ( input.LA(1)==CharStream.EOF ) {
+			return Token.EOF_TOKEN;
+		}
+		this->token = 0;
+		tokenStartCharIndex = getCharIndex();
+		try {
+			int m = input.mark();
+			backtracking=1; <! means we won't throw slow exception !>
+			failed=false;
+			mTokens();
+			backtracking=0;
+			<! mTokens backtracks with synpred at backtracking==2
+				and we set the synpredgate to allow actions at level 1. !>
+			if ( failed ) {
+				input.rewind(m);
+				input.consume(); <! advance one char and try again !>
+			}
+			else {
+				return token;
+			}
+		}
+		catch (RecognitionException re) {
+			// shouldn't happen in backtracking mode, but...
+			reportError(re);
+			recover(re);
+		}
+	}
+}
+
+public void memoize(IntStream input, int ruleIndex, int ruleStartIndex)
+{
+	if ( backtracking > 1 ) 
+		super.memoize(input, ruleIndex, ruleStartIndex);
+}
+
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) 
+{
+	if ( backtracking > 1 ) 
+		return super.alreadyParsedRule(input, ruleIndex);
+	return false;
+}
+>>
+
+filteringActionGate() ::= "backtracking == 1"
+
+/** How to generate a parser */
+genericParser(
+	grammar, name, scopes, tokens, tokenNames, rules, numRules, cyclicDFAs, 
+	bitsets, inputStreamType, superClass, ASTLabelType="Object",
+	labelType, members
+	) ::= <<
+// genericParser	
+class <name> : public <@superClassName><superClass><@end> {
+public:
+	static const char* tokenNames[] = {
+		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+	};
+	<tokens:{static tokenid_type <tokenPrefix()><it.name>=<it.type>;}; separator="\n">
+	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<@members>
+
+	<name>(StreamType* input)
+	: <superClass>\<StreamType,TokenType>(input)
+	{
+<if(backtracking)>
+		ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+	}
+	<@end>
+
+	//@TODO public String[] getTokenNames() { return tokenNames; }
+	//@TODO public String getGrammarFileName() { return "<fileName>"; }
+	<members>
+
+	<rules; separator="\n\n">
+
+	<synpreds:{p | <synpred(p)>}>
+
+	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+	<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+	<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+							words64=it.bits)>
+};
+>>
+
+parser(
+	grammar, name, scopes, tokens, tokenNames, 
+	rules, numRules, bitsets, ASTLabelType, 
+	superClass="Parser", labelType="Token", 
+	members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, 
+	rules, numRules, 
+	bitsets,
+	labelType={<ASTLabelType>}, ASTLabelType="Object", 
+	superClass="TreeParser", members={<actions.treeparser.members>}
+	) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {   
+	<if(trace)>System.out.println("enter <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);<endif>
+<if(trace)>
+	try {
+		<block>
+	}
+	finally {
+		System.out.println("exit <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);
+	}
+<else>
+	<block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(name) ::= <<
+public boolean <name>() {
+    this->backtracking++;
+    <@start()>
+    int start = input.mark();
+    try {
+        <name>_fragment(); // can never throw exception
+    } catch (RecognitionException re) {
+        System.err.println("impossible: "+re);
+    }
+    boolean success = ! this->failed;
+    input.rewind(start);
+    <@stop()>
+    this->backtracking--;
+    this->failed = false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( backtracking > 0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) 
+	return <ruleReturnValue()>;
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>
+if (failed) 
+	return <ruleReturnValue()>;
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if (backtracking > 0)
+{
+	failed = true;
+	return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start <ruleName>
+// <fileName>:<description>
+public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throw(antlr3::BaseRecognitionException) 
+{   
+<if(trace)>
+	antlr3::Tracer trace(this,"<ruleName>");
+	System.out.println("enter <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);
+<endif>
+	<ruleDeclarations()>
+	<ruleLabelDefs()>
+	<ruleDescriptor.actions.init>
+	<@preamble()>
+	try {
+		<ruleMemoization(name=ruleName)>
+		<block>
+	}
+<if(exceptions)>
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+	<actions.(actionScope).rulecatch>
+<else>
+	catch (RecognitionException re) {
+		reportError(re);
+		recover(input,re);
+	}<\n>
+<endif>
+<endif>
+<endif>
+	finally {
+		<if(trace)>System.out.println("exit <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);<endif>
+		<ruleCleanUp()>
+		<(ruleDescriptor.actions.finally):execAction()>
+	}
+	<@postamble()>
+	return <ruleReturnValue()>;
+}
+// $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = new <returnType()>();
+retval.start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.index();
+<endif>
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{<labelType> <it.label.text>=null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
+    :ruleLabelDef(label=it); separator="\n"
+>
+<[ruleDescriptor.allRuleRefsInAltsWithRewrites,ruleDescriptor.allTokenRefsInAltsWithRewrites]
+    :{List list_<it>=new ArrayList();}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.stop = input.LT(-1);<\n>
+<endif>
+<if(memoize)>
+<if(backtracking)>
+if ( backtracking > 0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throw(antlr3::BaseRecognitionException)
+{
+<if(trace)>
+	antlr3::Tracer trace(this,"<ruleName>");
+<endif>
+	antlr3::CountScope nestingTracker(this->ruleNestingLevel);
+	StreamType& input(this->getInput());
+<if(nakedBlock)>
+	<ruleDescriptor.actions.init>
+	<ruleMemoization(name=ruleName)>
+	<block><\n>
+<else>
+	tokenid_type type = <tokenPrefix()><ruleName>;
+	channel_type channel = antlr3::Token::DEFAULT_CHANNEL;
+	position_type start(input.getPosition());
+	<ruleDescriptor.actions.init>
+	<ruleMemoization(name=ruleName)>
+	<block>
+	<! create token if none exists *and* we are an outermost token rule !>
+	<execAction({if ( this->token == 0 && this->ruleNestingLevel == 1 ) {
+	TokenType *tt = TokenBuilder::build(type,start,input,channel);
+	std::cout \<\< (*tt) \<\< std::endl;
+	this->emit(tt);
+	}<\n>
+})>
+<endif>
+}
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+void mTokens() throw(antlr3::BaseRecognitionException)
+{
+	StreamType& input(this->getInput());
+	<block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,
+	maxK,maxAlt,description) ::= <<
+// block <fileName>:<description>
+decision_type alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// ruleBlock <fileName>:<description>
+decision_type alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// ruleBlockSingleAlt <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 0 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// positiveClosureBlock <fileName>:<description>
+decision_type cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+do {
+	decision_type alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+	default :
+		if ( cnt<decisionNumber> >= 1 )
+			goto loop<decisionNumber>;
+            EarlyExitException eee( input.getPosition(), <decisionNumber> );
+				<@earlyExitException()>
+            throw eee;
+	}
+	cnt<decisionNumber>++;
+} while (true);
+loop<decisionNumber>: ;
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// closureBlock <fileName>:<description>
+<decls>
+<@preloop()>
+do {
+	decision_type alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>) {
+	<alts:altSwitchCase()>
+	default :
+		goto loop<decisionNumber>;
+	}
+} while (true);
+loop<decisionNumber>: ;
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+	<@prealt()>
+	<it>
+	break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+// alt <fileName>:<description>
+{
+	<@declarations()>
+	<elements:element()>
+	<@cleanup()>
+}
+>>
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+// element <fileName>:<description>
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex) ::= <<
+// tokenRef
+<if(label)>
+<label> = input.LT(1);<\n>
+<endif>
+this->match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID no AST building */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(...)>
+>>
+
+listLabel(label) ::= <<
+if (list_<label>==null) list_<label>=new ArrayList();
+list_<label>.add(<label>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+// charRef
+<if(label)>
+<tokenid_type()> <label> = input.LA(1);<\n>
+<endif>
+this->match(<char>); 
+<checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b) ::= "this->matchRange(<a>,<b>); <checkRuleBacktrackFailure()>"
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+// matchSet
+<if(label)>
+<label> = input.LT(1);<\n>
+<endif>
+if ( <s> )
+{
+	<postmatchCode>
+	input.consume();
+<if(!LEXER)>
+	errorRecovery=false;
+<endif>
+	<if(backtracking)>failed=false;<endif>
+}
+else
+{
+	<ruleBacktrackFailure()>
+	MismatchedSetException mse(input.getPosition(),input.LA(1));
+	<@mismatchedSetException()>
+<if(LEXER)>
+	this->recover(mse);
+<else>
+	this->recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+<endif>
+	throw mse;
+}<\n>
+>>
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+// lexerStringRef
+<if(label)>
+position_type <label>Start(input.getPosition());
+this->match( <string> ); 
+<checkRuleBacktrackFailure()>
+TokenType* <label> = TokenBuilder::build(Token.INVALID_TOKEN_TYPE,<label>Start,input,Token.DEFAULT_CHANNEL);
+<else>
+this->match( <string> ); 
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> = input.LT(1);<\n>
+<endif>
+this->matchAny( input );
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(...)>
+>>
+
+/** Match . wildcard */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<tokenid_type()> <label> = input.LA(1);<\n>
+<endif>
+this->matchAny();
+<checkRuleBacktrackFailure()>
+>>
+
+tokenid_type() ::= "<if(LEXER)>char_type<else>tokenid_type<endif>"
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args) ::= <<
+following.push(FOLLOW_<rule>_in_<ruleName><elementIndex>);
+<if(label)>
+<label>=<rule>(<args>);<\n>
+<else>
+<rule>(<args>);<\n>
+<endif>
+following.pop();
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+<ruleRef(...)>
+<listLabel(...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args) ::= <<
+<if(label)>
+position_type <label>Start(input.getPosition());
+m<rule>(<args>);
+<checkRuleBacktrackFailure()>
+TokenType* <label> = TokenBuilder::build(Token.INVALID_TOKEN_TYPE,<label>Start,input,Token.DEFAULT_CHANNEL);
+<else>
+m<rule>(<args>);
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label) ::= <<
+<if(label)>
+position_type <label>Start(input.getPosition());
+match(EOF); 
+<checkRuleBacktrackFailure()>
+TokenType* <label> = TokenBuilder::build(Token.EOF,<label>Start,input,Token.DEFAULT_CHANNEL);
+<else>
+match(EOF); 
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, children, nullableChildList) ::= <<
+<root:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==antlr3::Token::DOWN ) {
+    match(input, antlr3::Token::DOWN, null); 
+    <checkRuleBacktrackFailure()>
+    <children:element()>
+    match(input, antlr3::Token::UP, null); 
+    <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, antlr3::Token::DOWN, null); 
+<checkRuleBacktrackFailure()>
+<children:element()>
+match(input, antlr3::Token::UP, null); 
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+	<ruleBacktrackFailure()>
+	throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+<if(!semPredState)>
+<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<endif>
+<edges; separator="\nelse ">
+else 
+{
+<if(eotPredictsAlt)>
+	alt<decisionNumber> = <eotPredictsAlt>;<\n>
+<else>
+	<ruleBacktrackFailure()>
+	NoViableAltException nvae(input.getPosition(), "<description>", <decisionNumber>, <stateNumber>);<\n>
+	<@noViableAltException()>
+	throw nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+<if(!semPredState)>
+<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);
+<endif>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+<if(!semPredState)>
+<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);
+<endif>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+else 
+{
+	alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) 
+{
+	<targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+	alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+	NoViableAltException nvae( input.getPosition(), "<description>", <decisionNumber>, <stateNumber> );<\n>
+	<@noViableAltException()>
+	throw nvae;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+	<edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+	alt<decisionNumber> = <eotPredictsAlt>;
+	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n"> {
+	<targetState>
+} break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+// dfaDecision
+alt<decisionNumber> = predictDFA<decisionNumber>(input);
+>>
+
+/** The overall cyclic DFA chunk; contains all the DFA states */
+cyclicDFA(dfa) ::= <<
+/* cyclicDFA=<dfa>
+*/
+// cyclic    = <dfa.cyclic>
+// numstates = <dfa.numberOfStates>
+
+// startState = <dfa.startState>
+// startState.numberOfTransitions = <dfa.startState.NumberOfTransitions>
+// startState.lookaheadDepth = <dfa.startState.LookaheadDepth>
+
+const static short <name>dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] = {
+	<dfa.eot; wrap="\n     ", separator=",", null="-1">
+};
+const static short <name>dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] = {
+	<dfa.eof; wrap="\n     ", separator=",", null="-1">
+};
+const static unichar <name>dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] = {
+	<dfa.min; wrap="\n     ", separator=",", null="0">
+};
+const static unichar <name>dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] = {
+	<dfa.max; wrap="\n     ", separator=",", null="0">
+};
+const static short <name>dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] = {
+	<dfa.accept; wrap="\n     ", separator=",", null="-1">
+};
+const static short <name>dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] = {
+	<dfa.special; wrap="\n     ", separator=",", null="-1">
+};
+<dfa.edgeTransitionClassMap.keys:{ table |
+const static short <name>dfa<dfa.decisionNumber>_transition<i0>[] = {
+	<table; separator=", ", wrap="\n	", null="-1">
+};
+}; null="">
+const static short <name>dfa<dfa.decisionNumber>_transition[] = {
+	<dfa.transitionEdgeTables:{whichTable|<name>dfa<dfa.decisionNumber>_transition<whichTable>,}; separator="\n", null="0 /* fixme? */">
+};
+	<! add attribute for the DFA !>
+	DFA\<char_type> dfa<dfa.decisionNumber>;
+<! this should go in the initializer of the thing
+- (id) init
+{
+	if ((self = [super init]) != nil) {
+		eot = <name>dfa<dfa.decisionNumber>_eot;
+		eof = <name>dfa<dfa.decisionNumber>_eof;
+		min = <name>dfa<dfa.decisionNumber>_min;
+		max = <name>dfa<dfa.decisionNumber>_max;
+		accept = <name>dfa<dfa.decisionNumber>_accept;
+		special = <name>dfa<dfa.decisionNumber>_special;
+		if (!(transition = calloc(<dfa.numberOfStates>, sizeof(void*)))) {
+			[self release];
+			return nil;
+		}
+		<dfa.transitionEdgeTables:{whichTable|transition[<i0>] = <name>dfa<dfa.decisionNumber>_transition<whichTable>;}; separator="\n", null="">
+	}
+	return self;
+}
+!>
+
+<if(dfa.specialStateSTs)>
+int specialStateTransition( int state )
+{
+	int s = state;
+	switch ( s ) {
+ 		<dfa.specialStateSTs:{state |
+		case <i0> : <! compressed special state numbers 0..n-1 !>
+		<state>}; separator="\n">
+	}
+<if(backtracking)>
+	if ( recognizer.isBacktracking() ) {
+		recognizer.setFailed();
+		return -1;
+	}<\n>
+<endif>
+	noViableAlt(s, input);
+}<\n>
+<endif>
+
+
+<\n>
+
+// <dfa.description>
+decision_type predictDFA<dfa.decisionNumber>( StreamType& input )
+{
+	/* mark current location (rewind automatically when the rewinder goes
+	 * out of scope */
+	antlr3::Rewinder\<position_type> markPoint(input.getPosition());
+	goto s0;	// goto start...
+	// ...
+	throw NoViableAltException( input.getPosition(), "<dfa.description>", <dfa.decisionNumber>, 0 /* fixme */ );<\n>
+}<\n>
+>>
+
+/** A state in a cyclic DFA */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+// cyclicDFAState
+s<stateNumber>: {
+	<if(semPredState)>
+	input.rewind();<\n>
+	<else>
+	<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(1);
+	<endif>
+	<edges>
+	<if(needErrorClause)>
+	throw NoViableAltException( input.getPosition(), "<description>", <decisionNumber>, <stateNumber> );<\n>
+	<endif><\n>
+}<\n>
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+// cyclicDFAEdge
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
+{
+	input.consume();
+	goto s<targetStateNumber>;
+}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= "goto s<targetStateNumber>;"
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left> && <right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
+
+notPredicate(pred) ::= "!(<pred>)"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name> {
+    <scope.attributes:{<it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+    <scope.attributes:{<it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.name>_return
+<else>
+<if(ruleDescriptor.singleValueReturnType)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.name>_return
+<else>
+<if(referencedRule.singleValueReturnType)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<javaTypeInitMap.(typeName)>
+>>
+
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public static class <returnType()> {
+    <labelType> start, stop;
+<if(buildAST)>
+    <ASTLabelType> tree;
+<else>
+<if(buildTemplate)>
+    StringTemplate st;
+<endif>
+<endif>
+    <scope.attributes:{<it.decl>;}; separator="\n">
+};
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+/** Used in codegen.g to translate $x.y references.
+ *  I could have left actions as StringTemplates to be inserted in
+ *  the output (so they could use attributes inherited from surrounding
+ *  templates), but really wanted to pass in AttributeScope and Attribute
+ *  objects so this translation could query them.  So, translation of
+ *  $x.y to executable code occurs before recognizerST.toString() occurs.
+ *  I.e., actions are just text strings during final code generation.
+ */
+globalAttributeRef(scope,attr) ::= <<
+((<scope>)<scope>_stack.peek()).<attr.name>
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.singleValueReturnType)>
+<scope>
+<else>
+<scope>.<attr.name>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.singleValueReturnType)>
+<attr.name>
+<else>
+retval.<attr.name>
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
+ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
+ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
+ruleLabelPropertyRef_text(scope,attr) ::= "input.toString(<scope>.start,<scope>.stop)"
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
+rulePropertyRef_text(scope,attr) ::= "input.toString(retval.start,input.LT(-1))"
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+// A C T I O N S
+
+emit(type) ::= "emit(<type>);"
+
+setType(type) ::= "setType(<type>);"
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> ) 
+{
+	<action>
+}
+<else>
+if ( backtracking == 0 ) 
+{
+	<action>
+}
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static final BitSet <name> = new BitSet(new long[]{<words64:{<it>L};separator=",">});<\n>
+>>
+
+tokenPrefix() ::= "TOK_"
+codeFileExtension() ::= ".cpp"
+// used in CPPTarget.java to generate the headerfile extension
+headerFileExtension() ::= ".h"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/src/org/antlr/misc/Barrier.java b/src/org/antlr/misc/Barrier.java
new file mode 100644
index 0000000..aa965e0
--- /dev/null
+++ b/src/org/antlr/misc/Barrier.java
@@ -0,0 +1,35 @@
+package org.antlr.misc;
+
+/**A very simple barrier wait.  Once a thread has requested a
+ * wait on the barrier with waitForRelease, it cannot fool the
+ * barrier into releasing by "hitting" the barrier multiple times--
+ * the thread is blocked on the wait().
+ */
+public class Barrier {
+    protected int threshold;
+    protected int count = 0;
+
+    public Barrier(int t) {
+        threshold = t;
+    }
+
+    public synchronized void waitForRelease()
+        throws InterruptedException
+    {
+        count++;
+        // The final thread to reach barrier resets barrier and
+        // releases all threads
+        if ( count==threshold ) {
+            // notify blocked threads that threshold has been reached
+            action(); // perform the requested operation
+            notifyAll();
+        }
+        else while ( count<threshold ) {
+            wait();
+        }
+    }
+
+    /** What to do when everyone reaches barrier */
+    public void action() {
+    }
+}
diff --git a/src/org/antlr/misc/BitSet.java b/src/org/antlr/misc/BitSet.java
new file mode 100644
index 0000000..2414d26
--- /dev/null
+++ b/src/org/antlr/misc/BitSet.java
@@ -0,0 +1,562 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+import org.antlr.analysis.Label;
+import org.antlr.tool.Grammar;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**A BitSet to replace java.util.BitSet.
+ *
+ * Primary differences are that most set operators return new sets
+ * as opposed to oring and anding "in place".  Further, a number of
+ * operations were added.  I cannot contain a BitSet because there
+ * is no way to access the internal bits (which I need for speed)
+ * and, because it is final, I cannot subclass to add functionality.
+ * Consider defining set degree.  Without access to the bits, I must
+ * call a method n times to test the ith bit...ack!
+ *
+ * Also seems like or() from util is wrong when size of incoming set is bigger
+ * than this.bits.length.
+ *
+ * @author Terence Parr
+ */
+public class BitSet implements IntSet, Cloneable {
+    protected final static int BITS = 64;    // number of bits / long
+    protected final static int LOG_BITS = 6; // 2^6 == 64
+
+    /* We will often need to do a mod operator (i mod nbits).  Its
+     * turns out that, for powers of two, this mod operation is
+     * same as (i & (nbits-1)).  Since mod is slow, we use a
+     * precomputed mod mask to do the mod instead.
+     */
+    protected final static int MOD_MASK = BITS - 1;
+
+    /** The actual data bits */
+    protected long bits[];
+
+    /** Construct a bitset of size one word (64 bits) */
+    public BitSet() {
+        this(BITS);
+    }
+
+    /** Construction from a static array of longs */
+    public BitSet(long[] bits_) {
+        bits = bits_;
+    }
+
+    /** Construct a bitset given the size
+     * @param nbits The size of the bitset in bits
+     */
+    public BitSet(int nbits) {
+        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
+    }
+
+    /** or this element into this set (grow as necessary to accommodate) */
+    public void add(int el) {
+        //System.out.println("add("+el+")");
+        int n = wordNumber(el);
+        //System.out.println("word number is "+n);
+        //System.out.println("bits.length "+bits.length);
+        if (n >= bits.length) {
+            growToInclude(el);
+        }
+        bits[n] |= bitMask(el);
+    }
+
+    public void addAll(IntSet set) {
+        if ( set instanceof BitSet ) {
+            this.orInPlace((BitSet)set);
+        }
+		else if ( set instanceof IntervalSet ) {
+			IntervalSet other = (IntervalSet)set;
+			// walk set and add each interval
+			for (Iterator iter = other.intervals.iterator(); iter.hasNext();) {
+				Interval I = (Interval) iter.next();
+				this.orInPlace(BitSet.range(I.a,I.b));
+			}
+		}
+		else {
+			throw new IllegalArgumentException("can't add "+
+											   set.getClass().getName()+
+											   " to BitSet");
+		}
+    }
+
+	public void addAll(int[] elements) {
+		if ( elements==null ) {
+			return;
+		}
+		for (int i = 0; i < elements.length; i++) {
+			int e = elements[i];
+			add(e);
+		}
+	}
+
+	public void addAll(List elements) {
+		if ( elements==null ) {
+			return;
+		}
+		for (int i = 0; i < elements.size(); i++) {
+			Object o = elements.get(i);
+			if ( !(o instanceof Integer) ) {
+				throw new IllegalArgumentException();
+			}
+			Integer eI = (Integer)o;
+			add(eI.intValue());
+		}
+	}
+
+    public IntSet and(IntSet a) {
+        BitSet s = (BitSet)this.clone();
+        s.andInPlace((BitSet)a);
+        return s;
+    }
+
+    public void andInPlace(BitSet a) {
+        int min = Math.min(bits.length, a.bits.length);
+        for (int i = min - 1; i >= 0; i--) {
+            bits[i] &= a.bits[i];
+        }
+        // clear all bits in this not present in a (if this bigger than a).
+        for (int i = min; i < bits.length; i++) {
+            bits[i] = 0;
+        }
+    }
+
+    private final static long bitMask(int bitNumber) {
+        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
+        return 1L << bitPosition;
+    }
+
+    public void clear() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            bits[i] = 0;
+        }
+    }
+
+    public void clear(int el) {
+        int n = wordNumber(el);
+        if (n >= bits.length) {	// grow as necessary to accommodate
+            growToInclude(el);
+        }
+        bits[n] &= ~bitMask(el);
+    }
+
+    public Object clone() {
+        BitSet s;
+        try {
+            s = (BitSet)super.clone();
+            s.bits = new long[bits.length];
+            System.arraycopy(bits, 0, s.bits, 0, bits.length);
+        }
+        catch (CloneNotSupportedException e) {
+            throw new InternalError();
+        }
+        return s;
+    }
+
+    public int size() {
+        int deg = 0;
+        for (int i = bits.length - 1; i >= 0; i--) {
+            long word = bits[i];
+            if (word != 0L) {
+                for (int bit = BITS - 1; bit >= 0; bit--) {
+                    if ((word & (1L << bit)) != 0) {
+                        deg++;
+                    }
+                }
+            }
+        }
+        return deg;
+    }
+
+    public boolean equals(Object other) {
+        if ( other == null || !(other instanceof BitSet) ) {
+            return false;
+        }
+
+        BitSet otherSet = (BitSet)other;
+
+        int n = Math.min(this.bits.length, otherSet.bits.length);
+
+        // for any bits in common, compare
+        for (int i=0; i<n; i++) {
+            if (this.bits[i] != otherSet.bits[i]) {
+                return false;
+            }
+        }
+
+        // make sure any extra bits are off
+
+        if (this.bits.length > n) {
+            for (int i = n+1; i<this.bits.length; i++) {
+                if (this.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+        else if (otherSet.bits.length > n) {
+            for (int i = n+1; i<otherSet.bits.length; i++) {
+                if (otherSet.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * Grows the set to a larger number of bits.
+     * @param bit element that must fit in set
+     */
+    public void growToInclude(int bit) {
+        int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
+        long newbits[] = new long[newSize];
+        System.arraycopy(bits, 0, newbits, 0, bits.length);
+        bits = newbits;
+    }
+
+    public boolean member(int el) {
+        int n = wordNumber(el);
+        if (n >= bits.length) return false;
+        return (bits[n] & bitMask(el)) != 0;
+    }
+
+    /** Get the first element you find and return it.  Return Label.INVALID
+     *  otherwise.
+     */
+    public int getSingleElement() {
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                return i;
+            }
+        }
+        return Label.INVALID;
+    }
+
+    public boolean isNil() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            if (bits[i] != 0) return false;
+        }
+        return true;
+    }
+
+    public IntSet complement() {
+        BitSet s = (BitSet)this.clone();
+        s.notInPlace();
+        return s;
+    }
+
+    public IntSet complement(IntSet set) {
+		if ( set==null ) {
+			return this.complement();
+		}
+        return set.subtract(this);
+    }
+
+    public void notInPlace() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            bits[i] = ~bits[i];
+        }
+    }
+
+    /** complement bits in the range 0..maxBit. */
+    public void notInPlace(int maxBit) {
+        notInPlace(0, maxBit);
+    }
+
+    /** complement bits in the range minBit..maxBit.*/
+    public void notInPlace(int minBit, int maxBit) {
+        // make sure that we have room for maxBit
+        growToInclude(maxBit);
+        for (int i = minBit; i <= maxBit; i++) {
+            int n = wordNumber(i);
+            bits[n] ^= bitMask(i);
+        }
+    }
+
+    private final int numWordsToHold(int el) {
+        return (el >> LOG_BITS) + 1;
+    }
+
+    public static BitSet of(int el) {
+        BitSet s = new BitSet(el + 1);
+        s.add(el);
+        return s;
+    }
+
+    public static BitSet of(Collection elements) {
+        BitSet s = new BitSet();
+        Iterator iter = elements.iterator();
+        while (iter.hasNext()) {
+            Integer el = (Integer) iter.next();
+            s.add(el.intValue());
+        }
+        return s;
+    }
+
+	public static BitSet of(IntSet set) {
+		if ( set==null ) {
+			return null;
+		}
+
+		if ( set instanceof BitSet ) {
+			return (BitSet)set;
+		}
+		if ( set instanceof IntervalSet ) {
+			BitSet s = new BitSet();
+			s.addAll(set);
+			return s;
+		}
+		throw new IllegalArgumentException("can't create BitSet from "+set.getClass().getName());
+	}
+
+    public static BitSet of(Map elements) {
+        return BitSet.of(elements.keySet());
+    }
+
+	public static BitSet range(int a, int b) {
+		BitSet s = new BitSet(b + 1);
+		for (int i = a; i <= b; i++) {
+			int n = wordNumber(i);
+			s.bits[n] |= bitMask(i);
+		}
+		return s;
+	}
+
+    /** return this | a in a new set */
+    public IntSet or(IntSet a) {
+		if ( a==null ) {
+			return this;
+		}
+        BitSet s = (BitSet)this.clone();
+        s.orInPlace((BitSet)a);
+        return s;
+    }
+
+    public void orInPlace(BitSet a) {
+		if ( a==null ) {
+			return;
+		}
+        // If this is smaller than a, grow this first
+        if (a.bits.length > bits.length) {
+            setSize(a.bits.length);
+        }
+        int min = Math.min(bits.length, a.bits.length);
+        for (int i = min - 1; i >= 0; i--) {
+            bits[i] |= a.bits[i];
+        }
+    }
+
+    // remove this element from this set
+    public void remove(int el) {
+        int n = wordNumber(el);
+        if (n >= bits.length) {
+            growToInclude(el);
+        }
+        bits[n] &= ~bitMask(el);
+    }
+
+    /**
+     * Sets the size of a set.
+     * @param nwords how many words the new set should be
+     */
+    private void setSize(int nwords) {
+        long newbits[] = new long[nwords];
+        int n = Math.min(nwords, bits.length);
+        System.arraycopy(bits, 0, newbits, 0, n);
+        bits = newbits;
+    }
+
+    public int numBits() {
+        return bits.length << LOG_BITS; // num words * bits per word
+    }
+
+    /** return how much space is being used by the bits array not
+     *  how many actually have member bits on.
+     */
+    public int lengthInLongWords() {
+        return bits.length;
+    }
+
+    /**Is this contained within a? */
+    public boolean subset(BitSet a) {
+        if (a == null) return false;
+        return this.and(a).equals(this);
+    }
+
+    /**Subtract the elements of 'a' from 'this' in-place.
+     * Basically, just turn off all bits of 'this' that are in 'a'.
+     */
+    public void subtractInPlace(BitSet a) {
+        if (a == null) return;
+        // for all words of 'a', turn off corresponding bits of 'this'
+        for (int i = 0; i < bits.length && i < a.bits.length; i++) {
+            bits[i] &= ~a.bits[i];
+        }
+    }
+
+    public IntSet subtract(IntSet a) {
+        if (a == null || !(a instanceof BitSet)) return null;
+
+        BitSet s = (BitSet)this.clone();
+        s.subtractInPlace((BitSet)a);
+        return s;
+    }
+
+	public List toList() {
+		throw new NoSuchMethodError("BitSet.toList() unimplemented");
+	}
+
+    public int[] toArray() {
+        int[] elems = new int[size()];
+        int en = 0;
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                elems[en++] = i;
+            }
+        }
+        return elems;
+    }
+
+    public long[] toPackedArray() {
+        return bits;
+    }
+
+    public String toString() {
+        return toString(null);
+    }
+
+    /** Transform a bit set into a string by formatting each element as an integer
+     * separator The string to put in between elements
+     * @return A commma-separated list of values
+     */
+    public String toString(Grammar g) {
+        StringBuffer buf = new StringBuffer();
+        String separator = ",";
+		boolean havePrintedAnElement = false;
+		buf.append('{');
+
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                if (i > 0 && havePrintedAnElement ) {
+                    buf.append(separator);
+                }
+                if ( g!=null ) {
+                    buf.append(g.getTokenDisplayName(i));
+                }
+                else {
+                    buf.append(i);
+                }
+				havePrintedAnElement = true;
+            }
+        }
+		buf.append('}');
+        return buf.toString();
+    }
+
+    /**Create a string representation where instead of integer elements, the
+     * ith element of vocabulary is displayed instead.  Vocabulary is a Vector
+     * of Strings.
+     * separator The string to put in between elements
+     * @return A commma-separated list of character constants.
+     */
+    public String toString(String separator, List vocabulary) {
+        if (vocabulary == null) {
+            return toString(null);
+        }
+        String str = "";
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                if (str.length() > 0) {
+                    str += separator;
+                }
+                if (i >= vocabulary.size()) {
+                    str += "'" + (char)i + "'";
+                }
+                else if (vocabulary.get(i) == null) {
+                    str += "'" + (char)i + "'";
+                }
+                else {
+                    str += (String)vocabulary.get(i);
+                }
+            }
+        }
+        return str;
+    }
+
+    /**
+     * Dump a comma-separated list of the words making up the bit set.
+     * Split each 64 bit number into two more manageable 32 bit numbers.
+     * This generates a comma-separated list of C++-like unsigned long constants.
+     */
+    public String toStringOfHalfWords() {
+        StringBuffer s = new StringBuffer();
+        for (int i = 0; i < bits.length; i++) {
+            if (i != 0) s.append(", ");
+            long tmp = bits[i];
+            tmp &= 0xFFFFFFFFL;
+            s.append(tmp);
+			s.append("UL");
+            s.append(", ");
+            tmp = bits[i] >>> 32;
+            tmp &= 0xFFFFFFFFL;
+			s.append(tmp);
+			s.append("UL");
+        }
+		return s.toString();
+    }
+
+    /**
+     * Dump a comma-separated list of the words making up the bit set.
+     * This generates a comma-separated list of Java-like long int constants.
+     */
+    public String toStringOfWords() {
+		StringBuffer s = new StringBuffer();
+        for (int i = 0; i < bits.length; i++) {
+            if (i != 0) s.append(", ");
+            s.append(bits[i]);
+			s.append("L");
+        }
+        return s.toString();
+    }
+
+    public String toStringWithRanges() {
+        return toString();
+    }
+
+    private final static int wordNumber(int bit) {
+        return bit >> LOG_BITS; // bit / BITS
+    }
+}
diff --git a/src/org/antlr/misc/IntArrayList.java b/src/org/antlr/misc/IntArrayList.java
new file mode 100644
index 0000000..426497d
--- /dev/null
+++ b/src/org/antlr/misc/IntArrayList.java
@@ -0,0 +1,153 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+import java.util.AbstractList;
+
+/** An ArrayList based upon int members.  Not quite a real implementation of a
+ *  modifiable list as I don't do, for example, add(index,element).
+ *  TODO: unused?
+ */
+public class IntArrayList extends AbstractList implements Cloneable {
+	private static final int DEFAULT_CAPACITY = 10;
+	protected int n = 0;
+	protected int[] elements = null;
+
+	public IntArrayList() {
+		this(DEFAULT_CAPACITY);
+	}
+
+	public IntArrayList(int initialCapacity) {
+		elements = new int[initialCapacity];
+	}
+
+	/** Set the ith element.  Like ArrayList, this does NOT affect size. */
+	public int set(int i, int newValue) {
+		if ( i>=n ) {
+			setSize(i); // unlike definition of set in ArrayList, set size
+		}
+		int v = elements[i];
+		elements[i] = newValue;
+		return v;
+	}
+
+	public boolean add(int o) {
+		if ( n>=elements.length ) {
+			grow();
+		}
+		elements[n] = o;
+		n++;
+		return true;
+	}
+
+	public void setSize(int newSize) {
+		if ( newSize>=elements.length ) {
+            ensureCapacity(newSize);
+		}
+		n = newSize;
+	}
+
+	protected void grow() {
+		ensureCapacity((elements.length * 3)/2 + 1);
+	}
+
+	public boolean contains(int v) {
+		for (int i = 0; i < n; i++) {
+			int element = elements[i];
+			if ( element == v ) {
+				return true;
+			}
+		}
+		return false;
+	}
+
+	public void ensureCapacity(int newCapacity) {
+		int oldCapacity = elements.length;
+		if (n>=oldCapacity) {
+			int oldData[] = elements;
+			elements = new int[newCapacity];
+			System.arraycopy(oldData, 0, elements, 0, n);
+		}
+	}
+
+	public Object get(int i) {
+		return Utils.integer(element(i));
+	}
+
+	public int element(int i) {
+		return elements[i];
+	}
+
+	public int[] elements() {
+		int[] a = new int[n];
+		System.arraycopy(elements, 0, a, 0, n);
+		return a;
+	}
+
+	public int size() {
+		return n;
+	}
+
+    public int capacity() {
+        return elements.length;
+    }
+
+	public boolean equals(Object o) {
+        if ( o==null ) {
+            return false;
+        }
+        IntArrayList other = (IntArrayList)o;
+        if ( this.size()!=other.size() ) {
+            return false;
+        }
+		for (int i = 0; i < n; i++) {
+			if ( elements[i] != other.elements[i] ) {
+				return false;
+			}
+		}
+		return true;
+	}
+
+    public Object clone() throws CloneNotSupportedException {
+		IntArrayList a = (IntArrayList)super.clone();
+        a.n = this.n;
+        System.arraycopy(this.elements, 0, a.elements, 0, this.elements.length);
+        return a;
+    }
+
+	public String toString() {
+		StringBuffer buf = new StringBuffer();
+		for (int i = 0; i < n; i++) {
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			buf.append(elements[i]);
+		}
+		return buf.toString();
+	}
+}
diff --git a/src/org/antlr/misc/IntSet.java b/src/org/antlr/misc/IntSet.java
new file mode 100644
index 0000000..9b9c289
--- /dev/null
+++ b/src/org/antlr/misc/IntSet.java
@@ -0,0 +1,84 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+import org.antlr.tool.Grammar;
+
+import java.util.List;
+
+/** A generic set of ints that has an efficient implementation, BitSet,
+ *  which is a compressed bitset and is useful for ints that
+ *  are small, for example less than 500 or so, and w/o many ranges.  For
+ *  ranges with large values like unicode char sets, this is not very efficient.
+ *  Consider using IntervalSet.  Not all methods in IntervalSet are implemented.
+ *
+ *  @see org.antlr.misc.BitSet
+ *  @see org.antlr.misc.IntervalSet
+ */
+public interface IntSet {
+    /** Add an element to the set */
+    void add(int el);
+
+    /** Add all elements from incoming set to this set.  Can limit
+     *  to set of its own type.
+     */
+    void addAll(IntSet set);
+
+    /** Return the intersection of this set with the argument, creating
+     *  a new set.
+     */
+    IntSet and(IntSet a);
+
+    IntSet complement(IntSet elements);
+
+    IntSet or(IntSet a);
+
+    IntSet subtract(IntSet a);
+
+    /** Return the size of this set (not the underlying implementation's
+     *  allocated memory size, for example).
+     */
+    int size();
+
+    boolean isNil();
+
+    boolean equals(Object obj);
+
+    int getSingleElement();
+
+    boolean member(int el);
+
+    /** remove this element from this set */
+    void remove(int el);
+
+    List toList();
+
+    String toString();
+
+    String toString(Grammar g);
+}
diff --git a/src/org/antlr/misc/Interval.java b/src/org/antlr/misc/Interval.java
new file mode 100644
index 0000000..76589ed
--- /dev/null
+++ b/src/org/antlr/misc/Interval.java
@@ -0,0 +1,137 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+/** An immutable inclusive interval a..b */
+public class Interval {
+	public static final int INTERVAL_POOL_MAX_VALUE = 1000;
+	static Interval[] intervals = new Interval[INTERVAL_POOL_MAX_VALUE+1];
+
+    public int a;
+    public int b;
+
+    public Interval(int a, int b) { this.a=a; this.b=b; }
+
+	/** Interval objects are used readonly so share all with the
+	 *  same single value a==b up to some max size.  Use an array as a perfect hash.
+	 *  Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new
+	 *  Interval object with a..a in it.  On Java.g, 218623 IntervalSets
+	 *  have a..a (set with 1 element).
+	public static Interval create(int a, int b) {
+		if ( a!=b || a<0 || a>INTERVAL_POOL_MAX_VALUE ) {
+			return new Interval(a,b);
+		}
+		if ( intervals[a]==null ) {
+			intervals[a] = new Interval(a,a);
+		}
+		return intervals[a];
+	}
+	 ACK!  Fuond out that add() actually modifies intervals. :(
+	 */
+
+	public static Interval create(int a, int b) { return new Interval(a,b); }
+
+	public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+        Interval other = (Interval)o;
+        return this.a==other.a && this.b==other.b;
+    }
+
+    /** Does this start completely before other? Disjoint */
+    public boolean startsBeforeDisjoint(Interval other) {
+        return this.a<other.a && this.b<other.a;
+    }
+
+    /** Does this start at or before other? Nondisjoint */
+    public boolean startsBeforeNonDisjoint(Interval other) {
+        return this.a<=other.a && this.b>=other.a;
+    }
+
+    /** Does this.a start after other.b? May or may not be disjoint */
+    public boolean startsAfter(Interval other) { return this.a>other.a; }
+
+    /** Does this start completely after other? Disjoint */
+    public boolean startsAfterDisjoint(Interval other) {
+        return this.a>other.b;
+    }
+
+    /** Does this start after other? NonDisjoint */
+    public boolean startsAfterNonDisjoint(Interval other) {
+        return this.a>other.a && this.a<=other.b; // this.b>=other.b implied
+    }
+
+    /** Are both ranges disjoint? I.e., no overlap? */
+    public boolean disjoint(Interval other) {
+        return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
+    }
+
+    /** Are two intervals adjacent such as 0..41 and 42..42? */
+    public boolean adjacent(Interval other) {
+        return this.a == other.b+1 || this.b == other.a-1;
+    }
+
+    public boolean properlyContains(Interval other) {
+        return other.a >= this.a && other.b <= this.b;
+    }
+
+    /** Return the interval computed from combining this and other */
+    public Interval union(Interval other) {
+        return new Interval(Math.min(a,other.a), Math.max(b,other.b));
+    }
+
+    /** Return the interval in common between this and o */
+    public Interval intersection(Interval other) {
+        return new Interval(Math.max(a,other.a), Math.min(b,other.b));
+    }
+
+    /** Return the interval with elements from this not in other;
+     *  other must not be totally enclosed (properly contained)
+     *  within this, which would result in two disjoint intervals
+     *  instead of the single one returned by this method.
+     */
+    public Interval differenceNotProperlyContained(Interval other) {
+        Interval diff = null;
+        // other.a to left of this.a (or same)
+        if ( other.startsBeforeNonDisjoint(this) ) {
+            diff = new Interval(Math.max(this.a,other.b+1),
+                                this.b);
+        }
+
+        // other.a to right of this.a
+        else if ( other.startsAfterNonDisjoint(this) ) {
+            diff = new Interval(this.a, other.a-1);
+        }
+        return diff;
+    }
+
+    public String toString() {
+        return a+".."+b;
+    }
+}
diff --git a/src/org/antlr/misc/IntervalSet.java b/src/org/antlr/misc/IntervalSet.java
new file mode 100644
index 0000000..27d1770
--- /dev/null
+++ b/src/org/antlr/misc/IntervalSet.java
@@ -0,0 +1,640 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+import org.antlr.analysis.Label;
+import org.antlr.tool.Grammar;
+
+import java.util.*;
+
+/** A set of integers that relies on ranges being common to do
+ *  "run-length-encoded" like compression (if you view an IntSet like
+ *  a BitSet with runs of 0s and 1s).  Only ranges are recorded so that
+ *  a few ints up near value 1000 don't cause massive bitsets, just two
+ *  integer intervals.
+ *
+ *  element values may be negative.  Useful for sets of EPSILON and EOF.
+ *
+ *  0..9 char range is index pair ['\u0030','\u0039'].
+ *  Multiple ranges are encoded with multiple index pairs.  Isolated
+ *  elements are encoded with an index pair where both intervals are the same.
+ *
+ *  The ranges are ordered and disjoint so that 2..6 appears before 101..103.
+ */
+public class IntervalSet implements IntSet {
+	/** The list of sorted, disjoint intervals. */
+    protected List intervals;
+
+    /** Create a set with no elements */
+    public IntervalSet() {
+        intervals = new ArrayList(2); // most sets are 1 or 2 elements
+    }
+
+    /** Create a set with a single element, el. */
+    public static IntervalSet of(int a) {
+        IntervalSet s = new IntervalSet();
+        s.add(a);
+        return s;
+    }
+
+    /** Create a set with all ints within range [a..b] (inclusive) */
+    public static IntervalSet of(int a, int b) {
+        IntervalSet s = new IntervalSet();
+        s.add(a,b);
+        return s;
+    }
+
+    /** Add a single element to the set.  An isolated element is stored
+     *  as a range el..el.
+     */
+    public void add(int el) {
+        add(el,el);
+    }
+
+    /** Add interval; i.e., add all integers from a to b to set.
+     *  If b<a, do nothing.
+     *  Keep list in sorted order (by left range value).
+     *  If overlap, combine ranges.  For example,
+     *  If this is {1..5, 10..20}, adding 6..7 yields
+     *  {1..5, 6..7, 10..20}.  Adding 4..8 yields {1..8, 10..20}.
+     */
+    public void add(int a, int b) {
+        add(Interval.create(a,b));
+    }
+
+	protected void add(Interval addition) {
+		//System.out.println("add "+addition+" to "+intervals.toString());
+		if ( addition.b<addition.a ) {
+			return;
+		}
+		// find position in list
+		// Use iterators as we modify list in place
+		for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
+			Interval r = (Interval) iter.next();
+			if ( addition.equals(r) ) {
+				return;
+			}
+			if ( addition.adjacent(r) || !addition.disjoint(r) ) {
+				// next to each other, make a single larger interval
+				Interval bigger = addition.union(r);
+				iter.set(bigger);
+				// make sure we didn't just create an interval that
+				// should be merged with next interval in list
+				if ( iter.hasNext() ) {
+					Interval next = (Interval) iter.next();
+					if ( bigger.adjacent(next)||!bigger.disjoint(next) ) {
+						// if we bump up against or overlap next, merge
+						iter.remove();   // remove this one
+						iter.previous(); // move backwards to what we just set
+						iter.set(bigger.union(next)); // set to 3 merged ones
+					}
+				}
+				return;
+			}
+			if ( addition.startsBeforeDisjoint(r) ) {
+				// insert before r
+				iter.previous();
+				iter.add(addition);
+				return;
+			}
+			// if disjoint and after r, a future iteration will handle it
+		}
+		// ok, must be after last interval (and disjoint from last interval)
+		// just add it
+		intervals.add(addition);
+	}
+
+	/*
+	protected void add(Interval addition) {
+        //System.out.println("add "+addition+" to "+intervals.toString());
+        if ( addition.b<addition.a ) {
+            return;
+        }
+        // find position in list
+        //for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
+		int n = intervals.size();
+		for (int i=0; i<n; i++) {
+			Interval r = (Interval)intervals.get(i);
+            if ( addition.equals(r) ) {
+                return;
+            }
+            if ( addition.adjacent(r) || !addition.disjoint(r) ) {
+                // next to each other, make a single larger interval
+                Interval bigger = addition.union(r);
+				intervals.set(i, bigger);
+                // make sure we didn't just create an interval that
+                // should be merged with next interval in list
+				if ( (i+1)<n ) {
+					i++;
+					Interval next = (Interval)intervals.get(i);
+                    if ( bigger.adjacent(next)||!bigger.disjoint(next) ) {
+                        // if we bump up against or overlap next, merge
+						intervals.remove(i); // remove next one
+						i--;
+						intervals.set(i, bigger.union(next)); // set to 3 merged ones
+                    }
+                }
+                return;
+            }
+            if ( addition.startsBeforeDisjoint(r) ) {
+                // insert before r
+				intervals.add(i, addition);
+                return;
+            }
+            // if disjoint and after r, a future iteration will handle it
+        }
+        // ok, must be after last interval (and disjoint from last interval)
+        // just add it
+        intervals.add(addition);
+    }
+*/
+
+	public void addAll(IntSet set) {
+		if ( set==null ) {
+			return;
+		}
+        if ( !(set instanceof IntervalSet) ) {
+            throw new IllegalArgumentException("can't add non IntSet ("+
+											   set.getClass().getName()+
+											   ") to IntervalSet");
+        }
+        IntervalSet other = (IntervalSet)set;
+        // walk set and add each interval
+        for (Iterator iter = other.intervals.iterator(); iter.hasNext();) {
+            Interval I = (Interval) iter.next();
+            this.add(I.a,I.b);
+        }
+    }
+
+    public IntSet complement(int minElement, int maxElement) {
+        return this.complement(IntervalSet.of(minElement,maxElement));
+    }
+
+    /** Given the set of possible values (rather than, say UNICODE or MAXINT),
+     *  return a new set containing all elements in vocabulary, but not in
+     *  this.  The computation is (vocabulary - this).
+     *
+     *  'this' is assumed to be either a subset or equal to vocabulary.
+     */
+    public IntSet complement(IntSet vocabulary) {
+        if ( vocabulary==null ) {
+            return null; // nothing in common with null set
+        }
+		if ( !(vocabulary instanceof IntervalSet ) ) {
+			throw new IllegalArgumentException("can't complement with non IntervalSet ("+
+											   vocabulary.getClass().getName()+")");
+		}
+		IntervalSet vocabularyIS = ((IntervalSet)vocabulary);
+		int maxElement = vocabularyIS.getMaxElement();
+
+		IntervalSet compl = new IntervalSet();
+		if ( intervals.size()==0 ) {
+			return compl;
+		}
+		Interval first = (Interval)intervals.get(0);
+		// add a range from 0 to first.a constrained to vocab
+		if ( first.a > 0 ) {
+			IntervalSet s = IntervalSet.of(0, first.a-1);
+			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
+			compl.addAll(a);
+		}
+		for (int i=1; i<intervals.size(); i++) { // from 2nd interval .. nth
+			Interval previous = (Interval)intervals.get(i-1);
+			Interval current = (Interval)intervals.get(i);
+			IntervalSet s = IntervalSet.of(previous.b+1, current.a-1);
+			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
+			compl.addAll(a);
+		}
+		Interval last = (Interval)intervals.get(intervals.size()-1);
+		// add a range from last.b to maxElement constrained to vocab
+		if ( last.b < maxElement ) {
+			IntervalSet s = IntervalSet.of(last.b+1, maxElement);
+			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
+			compl.addAll(a);
+		}
+		return compl;
+    }
+
+	/** Compute this-other via this&~other.
+	 *  Return a new set containing all elements in this but not in other.
+	 *  other is assumed to be a subset of this;
+     *  anything that is in other but not in this will be ignored.
+	 */
+	public IntSet subtract(IntSet other) {
+		// assume the whole unicode range here for the complement
+		// because it doesn't matter.  Anything beyond the max of this' set
+		// will be ignored since we are doing this & ~other.  The intersection
+		// will be empty.  The only problem would be when this' set max value
+		// goes beyond MAX_CHAR_VALUE, but hopefully the constant MAX_CHAR_VALUE
+		// will prevent this.
+		return this.and(((IntervalSet)other).complement(0,Label.MAX_CHAR_VALUE));
+	}
+
+    /** return a new set containing all elements in this but not in other.
+     *  Intervals may have to be broken up when ranges in this overlap
+     *  with ranges in other.  other is assumed to be a subset of this;
+     *  anything that is in other but not in this will be ignored.
+	 *
+	 *  Keep around, but 10-20-2005, I decided to make complement work w/o
+	 *  subtract and so then subtract can simply be a&~b
+	 *
+    public IntSet subtract(IntSet other) {
+        if ( other==null || !(other instanceof IntervalSet) ) {
+            return null; // nothing in common with null set
+        }
+
+        IntervalSet diff = new IntervalSet();
+
+        // iterate down both interval lists
+        ListIterator thisIter = this.intervals.listIterator();
+        ListIterator otherIter = ((IntervalSet)other).intervals.listIterator();
+        Interval mine=null;
+        Interval theirs=null;
+        if ( thisIter.hasNext() ) {
+            mine = (Interval)thisIter.next();
+        }
+        if ( otherIter.hasNext() ) {
+            theirs = (Interval)otherIter.next();
+        }
+        while ( mine!=null ) {
+            //System.out.println("mine="+mine+", theirs="+theirs);
+            // CASE 1: nothing in theirs removes a chunk from mine
+            if ( theirs==null || mine.disjoint(theirs) ) {
+                // SUBCASE 1a: finished traversing theirs; keep adding mine now
+                if ( theirs==null ) {
+                    // add everything in mine to difference since theirs done
+                    diff.add(mine);
+                    mine = null;
+                    if ( thisIter.hasNext() ) {
+                        mine = (Interval)thisIter.next();
+                    }
+                }
+                else {
+                    // SUBCASE 1b: mine is completely to the left of theirs
+                    // so we can add to difference; move mine, but not theirs
+                    if ( mine.startsBeforeDisjoint(theirs) ) {
+                        diff.add(mine);
+                        mine = null;
+                        if ( thisIter.hasNext() ) {
+                            mine = (Interval)thisIter.next();
+                        }
+                    }
+                    // SUBCASE 1c: theirs is completely to the left of mine
+                    else {
+                        // keep looking in theirs
+                        theirs = null;
+                        if ( otherIter.hasNext() ) {
+                            theirs = (Interval)otherIter.next();
+                        }
+                    }
+                }
+            }
+            else {
+                // CASE 2: theirs breaks mine into two chunks
+                if ( mine.properlyContains(theirs) ) {
+                    // must add two intervals: stuff to left and stuff to right
+                    diff.add(mine.a, theirs.a-1);
+                    // don't actually add stuff to right yet as next 'theirs'
+                    // might overlap with it
+                    // The stuff to the right might overlap with next "theirs".
+                    // so it is considered next
+                    Interval right = new Interval(theirs.b+1, mine.b);
+                    mine = right;
+                    // move theirs forward
+                    theirs = null;
+                    if ( otherIter.hasNext() ) {
+                        theirs = (Interval)otherIter.next();
+                    }
+                }
+
+                // CASE 3: theirs covers mine; nothing to add to diff
+                else if ( theirs.properlyContains(mine) ) {
+                    // nothing to add, theirs forces removal totally of mine
+                    // just move mine looking for an overlapping interval
+                    mine = null;
+                    if ( thisIter.hasNext() ) {
+                        mine = (Interval)thisIter.next();
+                    }
+                }
+
+                // CASE 4: non proper overlap
+                else {
+                    // overlap, but not properly contained
+                    diff.add(mine.differenceNotProperlyContained(theirs));
+                    // update iterators
+                    boolean moveTheirs = true;
+                    if ( mine.startsBeforeNonDisjoint(theirs) ||
+                         theirs.b > mine.b )
+                    {
+                        // uh oh, right of theirs extends past right of mine
+                        // therefore could overlap with next of mine so don't
+                        // move theirs iterator yet
+                        moveTheirs = false;
+                    }
+                    // always move mine
+                    mine = null;
+                    if ( thisIter.hasNext() ) {
+                        mine = (Interval)thisIter.next();
+                    }
+                    if ( moveTheirs ) {
+                        theirs = null;
+                        if ( otherIter.hasNext() ) {
+                            theirs = (Interval)otherIter.next();
+                        }
+                    }
+                }
+            }
+        }
+        return diff;
+    }
+	 */
+
+    /** TODO: implement this! */
+	public IntSet or(IntSet a) {
+		throw new NoSuchMethodError();
+    }
+
+    /** Return a new set with the intersection of this set with other.  Because
+     *  the intervals are sorted, we can use an iterator for each list and
+     *  just walk them together.  This is roughly O(min(n,m)) for interval
+     *  list lengths n and m.
+     */
+	public IntSet and(IntSet other) {
+		if ( other==null ) { //|| !(other instanceof IntervalSet) ) {
+			return null; // nothing in common with null set
+		}
+
+		ArrayList myIntervals = (ArrayList)this.intervals;
+		ArrayList theirIntervals = (ArrayList)((IntervalSet)other).intervals;
+		IntervalSet intersection = null;
+		int mySize = myIntervals.size();
+		int theirSize = theirIntervals.size();
+		int i = 0;
+		int j = 0;
+		// iterate down both interval lists looking for nondisjoint intervals
+		while ( i<mySize && j<theirSize ) {
+			Interval mine = (Interval)myIntervals.get(i);
+			Interval theirs = (Interval)theirIntervals.get(j);
+			//System.out.println("mine="+mine+" and theirs="+theirs);
+			if ( mine.startsBeforeDisjoint(theirs) ) {
+				// move this iterator looking for interval that might overlap
+				i++;
+			}
+			else if ( theirs.startsBeforeDisjoint(mine) ) {
+				// move other iterator looking for interval that might overlap
+				j++;
+			}
+			else if ( mine.properlyContains(theirs) ) {
+				// overlap, add intersection, get next theirs
+				if ( intersection==null ) {
+					intersection = new IntervalSet();
+				}
+				intersection.add(mine.intersection(theirs));
+				j++;
+			}
+			else if ( theirs.properlyContains(mine) ) {
+				// overlap, add intersection, get next mine
+				if ( intersection==null ) {
+					intersection = new IntervalSet();
+				}
+				intersection.add(mine.intersection(theirs));
+				i++;
+			}
+			else if ( !mine.disjoint(theirs) ) {
+				// overlap, add intersection
+				if ( intersection==null ) {
+					intersection = new IntervalSet();
+				}
+				intersection.add(mine.intersection(theirs));
+				// Move the iterator of lower range [a..b], but not
+				// the upper range as it may contain elements that will collide
+				// with the next iterator. So, if mine=[0..115] and
+				// theirs=[115..200], then intersection is 115 and move mine
+				// but not theirs as theirs may collide with the next range
+				// in thisIter.
+				// move both iterators to next ranges
+				if ( mine.startsAfterNonDisjoint(theirs) ) {
+					j++;
+				}
+				else if ( theirs.startsAfterNonDisjoint(mine) ) {
+					i++;
+				}
+			}
+		}
+		if ( intersection==null ) {
+			return new IntervalSet();
+		}
+		return intersection;
+	}
+
+    /** Is el in any range of this set? */
+    public boolean member(int el) {
+        for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
+            Interval I = (Interval) iter.next();
+            if ( el<I.a ) {
+                break; // list is sorted and el is before this interval; not here
+            }
+            if ( el>=I.a && el<=I.b ) {
+                return true; // found in this interval
+            }
+        }
+        return false;
+    }
+
+    /** return true if this set has no members */
+    public boolean isNil() {
+        return intervals==null || intervals.size()==0;
+    }
+
+    /** If this set is a single integer, return it otherwise Label.INVALID */
+    public int getSingleElement() {
+        if ( intervals!=null && intervals.size()==1 ) {
+            Interval I = (Interval)intervals.get(0);
+            if ( I.a == I.b ) {
+                return I.a;
+            }
+        }
+        return Label.INVALID;
+    }
+
+	public int getMaxElement() {
+		if ( isNil() ) {
+			return Label.INVALID;
+		}
+		Interval last = (Interval)intervals.get(intervals.size()-1);
+		return last.b;
+	}
+
+	/** Return minimum element >= 0 */
+	public int getMinElement() {
+		if ( isNil() ) {
+			return Label.INVALID;
+		}
+		Iterator iter = this.intervals.iterator();
+		while (iter.hasNext()) {
+			Interval I = (Interval) iter.next();
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				if ( v>=0 ) return v;
+			}
+		}
+		return Label.INVALID;
+	}
+
+    /** Return a list of Interval objects. */
+    public List getIntervals() {
+        return intervals;
+    }
+
+    /** Are two IntervalSets equal?  Because all intervals are sorted
+     *  and disjoint, equals is a simple linear walk over both lists
+     *  to make sure they are the same.  Interval.equals() is used
+     *  by the List.equals() method to check the ranges.
+     */
+    public boolean equals(Object obj) {
+        if ( obj==null || !(obj instanceof IntervalSet) ) {
+            return false;
+        }
+        IntervalSet other = (IntervalSet)obj;
+        return this.intervals.equals(other.intervals);
+    }
+
+    public String toString() {
+        return toString(null);
+    }
+
+    public String toString(Grammar g) {
+        StringBuffer buf = new StringBuffer();
+		if ( this.intervals==null || this.intervals.size()==0 ) {
+			return "{}";
+		}
+        if ( this.intervals.size()>1 ) {
+            buf.append("{");
+        }
+        Iterator iter = this.intervals.iterator();
+        while (iter.hasNext()) {
+            Interval I = (Interval) iter.next();
+            int a = I.a;
+            int b = I.b;
+            if ( a==b ) {
+                if ( g!=null ) {
+                    buf.append(g.getTokenDisplayName(a));
+                }
+                else {
+                    buf.append(a);
+                }
+            }
+            else {
+                if ( g!=null ) {
+                    buf.append(g.getTokenDisplayName(a)+".."+g.getTokenDisplayName(b));
+                }
+                else {
+                    buf.append(a+".."+b);
+                }
+            }
+            if ( iter.hasNext() ) {
+                buf.append(", ");
+            }
+        }
+        if ( this.intervals.size()>1 ) {
+            buf.append("}");
+        }
+        return buf.toString();
+    }
+
+    public int size() {
+		int n = 0;
+		Iterator iter = this.intervals.iterator();
+		while (iter.hasNext()) {
+			Interval I = (Interval) iter.next();
+			int a = I.a;
+			int b = I.b;
+			n += (b-a+1);
+		}
+		return n;
+    }
+
+    public List toList() {
+		List values = new ArrayList();
+		Iterator iter = this.intervals.iterator();
+		while (iter.hasNext()) {
+			Interval I = (Interval) iter.next();
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				values.add(Utils.integer(v));
+			}
+		}
+		return values;
+    }
+
+	public int[] toArray() {
+		int[] values = new int[size()];
+		Iterator iter = this.intervals.iterator();
+		int i = 0;
+		while (iter.hasNext()) {
+			Interval I = (Interval) iter.next();
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				values[i] = v;
+				i++;
+			}
+		}
+		return values;
+	}
+
+	public org.antlr.runtime.BitSet toRuntimeBitSet() {
+		org.antlr.runtime.BitSet s =
+			new org.antlr.runtime.BitSet(getMaxElement()+1);
+		Iterator iter = this.intervals.iterator();
+		int i = 0;
+		while (iter.hasNext()) {
+			Interval I = (Interval) iter.next();
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				s.add(v);
+				i++;
+			}
+		}
+		return s;
+	}
+
+	public void remove(int el) {
+        throw new NoSuchMethodError("IntervalSet.remove() unimplemented");
+    }
+
+	/*
+	protected void finalize() throws Throwable {
+		super.finalize();
+		System.out.println("size "+intervals.size()+" "+size());
+	}
+	*/
+}
diff --git a/src/org/antlr/misc/MutableInteger.java b/src/org/antlr/misc/MutableInteger.java
new file mode 100644
index 0000000..ae80407
--- /dev/null
+++ b/src/org/antlr/misc/MutableInteger.java
@@ -0,0 +1,15 @@
+package org.antlr.misc;
+
+/** Java won't let you modify an Integer; not sure how that's more
+ *  efficient, but...here's one that let's you modify it.
+ *  Frightening I have to implement this myself. Blech.
+ */
+public class MutableInteger {
+	public int value;
+	public MutableInteger() {
+		this(0);
+	}
+	public MutableInteger(int value) {
+		this.value = value;
+	}
+}
diff --git a/src/org/antlr/misc/OrderedHashSet.java b/src/org/antlr/misc/OrderedHashSet.java
new file mode 100644
index 0000000..408a70f
--- /dev/null
+++ b/src/org/antlr/misc/OrderedHashSet.java
@@ -0,0 +1,101 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+import org.antlr.tool.ErrorManager;
+
+import java.util.*;
+
+/** A HashMap that remembers the order that the elements were added.
+ *  You can alter the ith element with set(i,value) too :)  Unique list.
+ *  I need the replace/set-element-i functionality so I'm subclassing
+ *  OrderedHashSet.
+ */
+public class OrderedHashSet extends HashSet {
+    /** Track the elements as they are added to the set */
+    protected List elements = new ArrayList();
+
+    public Object get(int i) {
+        return elements.get(i);
+    }
+
+    /** Replace an existing value with a new value; updates the element
+     *  list and the hash table, but not the key as that has not changed.
+     */
+    public Object set(int i, Object value) {
+        Object oldElement = elements.get(i);
+        elements.set(i,value); // update list
+        super.remove(oldElement); // now update the set: remove/add
+        super.add(value);
+        return oldElement;
+    }
+
+    /** Add a value to list; keep in hashtable for consistency also;
+     *  Key is object itself.  Good for say asking if a certain string is in
+     *  a list of strings.
+     */
+    public boolean add(Object value) {
+        boolean result = super.add(value);
+		if ( result ) {  // only track if new element not in set
+			elements.add(value);
+		}
+		return result;
+    }
+
+    public boolean remove(Object o) {
+		throw new UnsupportedOperationException();
+		/*
+		elements.remove(o);
+        return super.remove(o);
+        */
+    }
+
+    public void clear() {
+        elements.clear();
+        super.clear();
+    }
+
+    /** Return the List holding list of table elements.  Note that you are
+     *  NOT getting a copy so don't write to the list.
+     */
+    public List elements() {
+        return elements;
+    }
+
+    public int size() {
+        if ( elements.size()!=super.size() ) {
+			ErrorManager.internalError("OrderedHashSet: elements and set size differs; "+
+									   elements.size()+"!="+super.size());
+        }
+        return elements.size();
+    }
+
+    public String toString() {
+        return elements.toString();
+    }
+}
diff --git a/src/org/antlr/misc/Utils.java b/src/org/antlr/misc/Utils.java
new file mode 100644
index 0000000..0d201dd
--- /dev/null
+++ b/src/org/antlr/misc/Utils.java
@@ -0,0 +1,73 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+public class Utils {
+	public static final int INTEGER_POOL_MAX_VALUE = 1000;
+	static Integer[] ints = new Integer[INTEGER_POOL_MAX_VALUE+1];
+
+	/** Integer objects are immutable so share all Integers with the
+	 *  same value up to some max size.  Use an array as a perfect hash.
+	 *  Return shared object for 0..INTEGER_POOL_MAX_VALUE or a new
+	 *  Integer object with x in it.
+	 */
+	public static Integer integer(int x) {
+		if ( x<0 || x>INTEGER_POOL_MAX_VALUE ) {
+			return new Integer(x);
+		}
+		if ( ints[x]==null ) {
+			ints[x] = new Integer(x);
+		}
+		return ints[x];
+	}
+
+	/** Given a source string, src,
+		a string to replace, replacee,
+		and a string to replace with, replacer,
+		return a new string w/ the replacing done.
+		You can use replacer==null to remove replacee from the string.
+
+		This should be faster than Java's String.replaceAll as that one
+		uses regex (I only want to play with strings anyway).
+	*/
+	public static String replace(String src, String replacee, String replacer) {
+		StringBuffer result = new StringBuffer(src.length() + 50);
+		int startIndex = 0;
+		int endIndex = src.indexOf(replacee);
+		while(endIndex != -1) {
+			result.append(src.substring(startIndex,endIndex));
+			if ( replacer!=null ) {
+				result.append(replacer);
+			}
+			startIndex = endIndex + replacee.length();
+			endIndex = src.indexOf(replacee,startIndex);
+		}
+		result.append(src.substring(startIndex,src.length()));
+		return result.toString();
+	}
+}
diff --git a/src/org/antlr/test/BaseTest.java b/src/org/antlr/test/BaseTest.java
new file mode 100644
index 0000000..f6903ee
--- /dev/null
+++ b/src/org/antlr/test/BaseTest.java
@@ -0,0 +1,542 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import junit.framework.TestCase;
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Message;
+
+import java.io.*;
+import java.util.ArrayList;
+import java.util.List;
+
+public abstract class BaseTest extends TestCase {
+
+	public static final String jikes = null;//"/usr/bin/jikes";
+	public static final String pathSep = System.getProperty("path.separator");
+	public static final String CLASSPATH = System.getProperty("java.class.path");
+	public static final String tmpdir = new File(System.getProperty("java.io.tmpdir"), "antlr3").getAbsolutePath();
+
+	/** If error during execution, store stderr here */
+	protected String stderr;
+
+	protected Tool newTool() {
+		Tool tool = new Tool();
+		tool.setOutputDirectory(tmpdir);
+		return tool;
+	}
+
+	protected boolean compile(String fileName) {
+		String compiler = "javac";
+		String classpathOption = "-classpath";
+
+		if (jikes!=null) {
+			compiler = jikes;
+			classpathOption = "-bootclasspath";
+		}
+
+		String[] args = new String[] {
+					compiler, "-d", tmpdir,
+					classpathOption, tmpdir+pathSep+CLASSPATH,
+					tmpdir+"/"+fileName
+		};
+		String cmdLine = compiler+" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName;
+		//System.out.println("compile: "+cmdLine);
+		File outputDir = new File(tmpdir);
+		try {
+			Process process =
+				Runtime.getRuntime().exec(args, null, outputDir);
+			StreamVacuum stdout = new StreamVacuum(process.getInputStream());
+			StreamVacuum stderr = new StreamVacuum(process.getErrorStream());
+			stdout.start();
+			stderr.start();
+			process.waitFor();
+			if ( stdout.toString().length()>0 ) {
+				System.err.println("compile stderr from: "+cmdLine);
+				System.err.println(stdout);
+			}
+			if ( stderr.toString().length()>0 ) {
+				System.err.println("compile stderr from: "+cmdLine);
+				System.err.println(stderr);
+			}
+			int ret = process.exitValue();
+			return ret==0;
+		}
+		catch (Exception e) {
+			System.err.println("can't exec compilation");
+			e.printStackTrace(System.err);
+			return false;
+		}
+	}
+
+	/** Return true if all is ok, no errors */
+	protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) {
+		boolean allIsWell = true;
+		mkdir(tmpdir);
+		writeFile(tmpdir, fileName, grammarStr);
+		try {
+			final List options = new ArrayList();
+			if ( debug ) {
+				options.add("-debug");
+			}
+			options.add("-o");
+			options.add(tmpdir);
+			options.add("-lib");
+			options.add(tmpdir);
+			options.add(new File(tmpdir,grammarFileName).toString());
+			final String[] optionsA = new String[options.size()];
+			options.toArray(optionsA);
+			final ErrorQueue equeue = new ErrorQueue();
+			ErrorManager.setErrorListener(equeue);
+			Tool antlr = new Tool(optionsA);
+			antlr.process();
+			if ( equeue.errors.size()>0 ) {
+				allIsWell = false;
+				System.err.println("antlr reports errors from "+options);
+				for (int i = 0; i < equeue.errors.size(); i++) {
+					Message msg = (Message) equeue.errors.get(i);
+					System.err.println(msg);
+				}
+			}
+		}
+		catch (Exception e) {
+			allIsWell = false;
+			System.err.println("problems building grammar: "+e);
+			e.printStackTrace(System.err);
+		}
+		return allIsWell;
+	}
+
+	protected String execParser(String grammarFileName,
+									String grammarStr,
+									String parserName,
+									String lexerName,
+									String startRuleName,
+									String input, boolean debug)
+	{
+		eraseFiles(".class");
+		eraseFiles(".java");
+
+		rawGenerateAndBuildRecognizer(grammarFileName,
+									  grammarStr,
+									  parserName,
+									  lexerName,
+									  debug);
+		writeFile(tmpdir, "input", input);
+		boolean parserBuildsTrees =
+			grammarStr.indexOf("output=AST")>=0 ||
+			grammarStr.indexOf("output = AST")>=0;
+		boolean parserBuildsTemplate =
+			grammarStr.indexOf("output=template")>=0 ||
+			grammarStr.indexOf("output = template")>=0;
+		return rawExecRecognizer(parserName,
+								 null,
+								 lexerName,
+								 startRuleName,
+								 null,
+								 parserBuildsTrees,
+								 parserBuildsTemplate,
+								 debug);
+	}
+
+	protected String execTreeParser(String parserGrammarFileName,
+										String parserGrammarStr,
+										String parserName,
+										String treeParserGrammarFileName,
+										String treeParserGrammarStr,
+										String treeParserName,
+										String lexerName,
+										String parserStartRuleName,
+										String treeParserStartRuleName,
+										String input)
+	{
+		return execTreeParser(parserGrammarFileName,
+							  parserGrammarStr,
+							  parserName,
+							  treeParserGrammarFileName,
+							  treeParserGrammarStr,
+							  treeParserName,
+							  lexerName,
+							  parserStartRuleName,
+							  treeParserStartRuleName,
+							  input,
+							  false);
+	}
+
+	protected String execTreeParser(String parserGrammarFileName,
+										String parserGrammarStr,
+										String parserName,
+										String treeParserGrammarFileName,
+										String treeParserGrammarStr,
+										String treeParserName,
+										String lexerName,
+										String parserStartRuleName,
+										String treeParserStartRuleName,
+										String input,
+										boolean debug)
+	{
+		eraseFiles(".class");
+		eraseFiles(".java");
+
+		// build the parser
+		rawGenerateAndBuildRecognizer(parserGrammarFileName,
+									  parserGrammarStr,
+									  parserName,
+									  lexerName,
+									  debug);
+
+		// build the tree parser
+		rawGenerateAndBuildRecognizer(treeParserGrammarFileName,
+									  treeParserGrammarStr,
+									  treeParserName,
+									  lexerName,
+									  debug);
+
+		writeFile(tmpdir, "input", input);
+
+		boolean parserBuildsTrees = parserGrammarStr.indexOf("output=AST")>=0;
+		boolean parserBuildsTemplate = parserGrammarStr.indexOf("output=template")>=0;
+
+		return rawExecRecognizer(parserName,
+								 treeParserName,
+								 lexerName,
+								 parserStartRuleName,
+								 treeParserStartRuleName,
+								 parserBuildsTrees,
+								 parserBuildsTemplate,
+								 debug);
+	}
+
+	/** Return true if all is well */
+	protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
+													String grammarStr,
+													String parserName,
+													String lexerName,
+													boolean debug)
+	{
+		boolean allIsWell =
+			antlr(grammarFileName, grammarFileName, grammarStr, debug);
+		if ( lexerName!=null ) {
+			boolean ok;
+			if ( parserName!=null ) {
+				ok = compile(parserName+".java");
+				if ( !ok ) { allIsWell = false; }
+			}
+			ok = compile(lexerName+".java");
+			if ( !ok ) { allIsWell = false; }
+		}
+		else {
+			boolean ok = compile(parserName+".java");
+			if ( !ok ) { allIsWell = false; }
+		}
+		return allIsWell;
+	}
+
+	protected String rawExecRecognizer(String parserName,
+											  String treeParserName,
+											  String lexerName,
+											  String parserStartRuleName,
+											  String treeParserStartRuleName,
+											  boolean parserBuildsTrees,
+											  boolean parserBuildsTemplate,
+											  boolean debug)
+	{
+		if ( parserBuildsTrees ) {
+			writeTreeTestFile(parserName,
+							  treeParserName,
+							  lexerName,
+							  parserStartRuleName,
+							  treeParserStartRuleName,
+							  debug);
+		}
+		else if ( parserBuildsTemplate ) {
+			writeTemplateTestFile(parserName,
+								  lexerName,
+								  parserStartRuleName,
+								  debug);
+		}
+		else {
+			writeTestFile(parserName,
+						  lexerName,
+						  parserStartRuleName,
+						  debug);
+		}
+
+		compile("Test.java");
+		try {
+			String[] args = new String[] {
+				"java", "-classpath", CLASSPATH+pathSep+tmpdir,
+				"Test", new File(tmpdir, "input").getAbsolutePath()
+			};
+			String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath();
+			//System.out.println("execParser: "+cmdLine);
+			this.stderr = null;
+			Process process =
+				Runtime.getRuntime().exec(args, null, new File(tmpdir));
+			StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream());
+			StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream());
+			stdoutVacuum.start();
+			stderrVacuum.start();
+			process.waitFor();
+			stdoutVacuum.join();
+			stderrVacuum.join();
+			String output = null;
+			output = stdoutVacuum.toString();
+			if ( stderrVacuum.toString().length()>0 ) {
+				this.stderr = stderrVacuum.toString();
+				System.err.println("exec parser stderrVacuum: "+ stderrVacuum);
+			}
+			return output;
+		}
+		catch (Exception e) {
+			System.err.println("can't exec parser");
+			e.printStackTrace(System.err);
+		}
+		return null;
+	}
+
+	public static class StreamVacuum implements Runnable {
+		StringBuffer buf = new StringBuffer();
+		BufferedReader in;
+		Thread sucker;
+		public StreamVacuum(InputStream in) {
+			this.in = new BufferedReader( new InputStreamReader(in) );
+		}
+		public void start() {
+			sucker = new Thread(this);
+			sucker.start();
+		}
+		public void run() {
+			try {
+				String line = in.readLine();
+				while (line!=null) {
+					buf.append(line);
+					buf.append('\n');
+					line = in.readLine();
+				}
+			}
+			catch (IOException ioe) {
+				System.err.println("can't read output from process");
+			}
+		}
+		/** wait for the thread to finish */
+		public void join() throws InterruptedException {
+			sucker.join();
+		}
+		public String toString() {
+			return buf.toString();
+		}
+	}
+
+	protected void writeFile(String dir, String fileName, String content) {
+		try {
+			File f = new File(dir, fileName);
+			FileWriter w = new FileWriter(f);
+			BufferedWriter bw = new BufferedWriter(w);
+			bw.write(content);
+			bw.close();
+			w.close();
+		}
+		catch (IOException ioe) {
+			System.err.println("can't write file");
+			ioe.printStackTrace(System.err);
+		}
+	}
+
+	protected void mkdir(String dir) {
+		File f = new File(dir);
+		f.mkdirs();
+	}
+
+	protected void writeTestFile(String parserName,
+									 String lexerName,
+									 String parserStartRuleName,
+									 boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        $createParser$\n"+
+			"        parser.$parserStartRuleName$();\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	protected void writeTreeTestFile(String parserName,
+										 String treeParserName,
+										 String lexerName,
+										 String parserStartRuleName,
+										 String treeParserStartRuleName,
+										 boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
+			"        $createParser$\n"+
+			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
+			"        $if(!treeParserStartRuleName)$\n" +
+			"        if ( r.tree!=null )\n" +
+			"            System.out.println(((Tree)r.tree).toStringTree());\n" +
+			"        $else$\n" +
+			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
+			"        nodes.setTokenStream(tokens);\n" +
+			"        $treeParserName$ walker = new $treeParserName$(nodes);\n" +
+			"        walker.$treeParserStartRuleName$();\n" +
+			"        $endif$\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("treeParserName", treeParserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		outputFileST.setAttribute("treeParserStartRuleName", treeParserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	protected void writeTemplateTestFile(String parserName,
+											 String lexerName,
+											 String parserStartRuleName,
+											 boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.stringtemplate.*;\n" +
+			"import org.antlr.stringtemplate.language.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"import java.io.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    static String templates =\n" +
+			"    		\"group test;\"+" +
+			"    		\"foo(x,y) ::= \\\"<x> <y>\\\"\";\n"+
+			"    static StringTemplateGroup group ="+
+			"    		new StringTemplateGroup(new StringReader(templates)," +
+			"					AngleBracketTemplateLexer.class);"+
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        $createParser$\n"+
+			"		 parser.setTemplateLib(group);\n"+
+			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
+			"        if ( r.st!=null )\n" +
+			"            System.out.print(r.st.toString());\n" +
+			"	 	 else\n" +
+			"            System.out.print(\"\");\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	protected void eraseFiles(final String filesEndingWith) {
+		File tmpdirF = new File(tmpdir);
+		String[] files = tmpdirF.list();
+		for(int i = 0; files!=null && i < files.length; i++) {
+			if ( files[i].endsWith(filesEndingWith) ) {
+        		new File(tmpdir+"/"+files[i]).delete();
+			}
+		}
+	}
+
+	public String getFirstLineOfException() {
+		if ( this.stderr==null ) {
+			return null;
+		}
+		String[] lines = this.stderr.split("\n");
+		String prefix="Exception in thread \"main\" ";
+		return lines[0].substring(prefix.length(),lines[0].length());
+	}
+}
diff --git a/src/org/antlr/test/DebugTestAutoAST.java b/src/org/antlr/test/DebugTestAutoAST.java
new file mode 100644
index 0000000..223aa43
--- /dev/null
+++ b/src/org/antlr/test/DebugTestAutoAST.java
@@ -0,0 +1,32 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+public class DebugTestAutoAST extends TestAutoAST {
+	public DebugTestAutoAST() {debug=true;}
+}
diff --git a/src/org/antlr/test/DebugTestRewriteAST.java b/src/org/antlr/test/DebugTestRewriteAST.java
new file mode 100644
index 0000000..156edeb
--- /dev/null
+++ b/src/org/antlr/test/DebugTestRewriteAST.java
@@ -0,0 +1,6 @@
+package org.antlr.test;
+
+public class DebugTestRewriteAST extends TestRewriteAST {
+	public DebugTestRewriteAST() {debug=true;}
+}
+
diff --git a/src/org/antlr/test/ErrorQueue.java b/src/org/antlr/test/ErrorQueue.java
new file mode 100644
index 0000000..c75e900
--- /dev/null
+++ b/src/org/antlr/test/ErrorQueue.java
@@ -0,0 +1,41 @@
+package org.antlr.test;
+
+import org.antlr.tool.ANTLRErrorListener;
+import org.antlr.tool.Message;
+import org.antlr.tool.ToolMessage;
+
+import java.util.List;
+import java.util.LinkedList;
+
+public class ErrorQueue implements ANTLRErrorListener {
+	List infos = new LinkedList();
+	List errors = new LinkedList();
+	List warnings = new LinkedList();
+
+	public void info(String msg) {
+		infos.add(msg);
+	}
+
+	public void error(Message msg) {
+		errors.add(msg);
+	}
+
+	public void warning(Message msg) {
+		warnings.add(msg);
+	}
+
+	public void error(ToolMessage msg) {
+		errors.add(msg);
+	}
+
+	public int size() {
+		return infos.size() + errors.size() + warnings.size();
+	}
+
+	public String toString() {
+		return "infos: "+infos+
+			"errors: "+errors+
+			"warnings: "+warnings;
+	}
+}
+
diff --git a/src/org/antlr/test/TestASTConstruction.java b/src/org/antlr/test/TestASTConstruction.java
new file mode 100644
index 0000000..a321fed
--- /dev/null
+++ b/src/org/antlr/test/TestASTConstruction.java
@@ -0,0 +1,361 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.tool.Grammar;
+
+public class TestASTConstruction extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestASTConstruction() {
+    }
+
+	public void testA() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : A;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT A <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNakeRulePlusInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"A : B+;\n" +
+				"B : 'a';");
+		String expecting =
+			" ( rule A ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT B <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("A").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRulePlus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (b)+;\n" +
+				"b : B;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT b <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNakedRulePlus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : b+;\n" +
+				"b : B;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT b <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRuleOptional() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (b)?;\n" +
+				"b : B;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( ? ( BLOCK ( ALT b <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNakedRuleOptional() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : b?;\n" +
+				"b : B;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( ? ( BLOCK ( ALT b <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRuleStar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (b)*;\n" +
+				"b : B;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT b <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNakedRuleStar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : b*;\n" +
+				"b : B;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT b <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharStar() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a'*;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT 'a' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharStarInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'b'*;");
+		String expecting =
+			" ( rule B ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT 'b' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testStringStar() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'while'*;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT 'while' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testStringStarInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'while'*;");
+		String expecting =
+			" ( rule B ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT 'while' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharPlus() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a'+;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT 'a' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharPlusInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'b'+;");
+		String expecting =
+			" ( rule B ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT 'b' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharOptional() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a'?;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( ? ( BLOCK ( ALT 'a' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharOptionalInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'b'?;");
+		String expecting =
+			" ( rule B ARG RET scope ( BLOCK ( ALT ( ? ( BLOCK ( ALT 'b' <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testCharRangePlus() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"ID : 'a'..'z'+;");
+		String expecting =
+			" ( rule ID ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT ( .. 'a' 'z' ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("ID").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=ID;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( = x ID ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testLabelOfOptional() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=ID?;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( ? ( BLOCK ( ALT ( = x ID ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testLabelOfClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=ID*;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT ( = x ID ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRuleLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=b;\n" +
+				"b : ID;\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( = x b ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testSetLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=(A|B);\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( = x ( BLOCK ( ALT A <end-of-alt> ) ( ALT B <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNotSetLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=~(A|B);\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( = x ( ~ ( BLOCK ( ALT A <end-of-alt> ) ( ALT B <end-of-alt> ) <end-of-block> ) ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNotSetListLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+=~(A|B);\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( += x ( ~ ( BLOCK ( ALT A <end-of-alt> ) ( ALT B <end-of-alt> ) <end-of-block> ) ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testNotSetListLabelInLoop() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+=~(A|B)+;\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT ( += x ( ~ ( BLOCK ( ALT A <end-of-alt> ) ( ALT B <end-of-alt> ) <end-of-block> ) ) ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRuleLabelOfPositiveClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=b+;\n" +
+				"b : ID;\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT ( = x b ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testListLabelOfClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+=ID*;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT ( += x ID ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testListLabelOfClosure2() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+='int'*;");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( * ( BLOCK ( ALT ( += x 'int' ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRuleListLabelOfPositiveClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n" +
+				"options {output=AST;}\n"+
+				"a : x+=b+;\n" +
+				"b : ID;\n");
+		String expecting =
+			" ( rule a ARG RET scope ( BLOCK ( ALT ( + ( BLOCK ( ALT ( += x b ) <end-of-alt> ) <end-of-block> ) ) <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testRootTokenInStarLoop() throws Exception {
+		Grammar g = new Grammar(
+				"grammar Expr;\n" +
+				"options { backtrack=true; }\n" +
+				"a : ('*'^)* ;\n");  // bug: the synpred had nothing in it
+		String expecting =
+			" ( rule synpred1 ARG RET scope ( BLOCK ( ALT '*' <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("synpred1").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	public void testActionInStarLoop() throws Exception {
+		Grammar g = new Grammar(
+				"grammar Expr;\n" +
+				"options { backtrack=true; }\n" +
+				"a : ({blort} 'x')* ;\n");  // bug: the synpred had nothing in it
+		String expecting =
+			" ( rule synpred1 ARG RET scope ( BLOCK ( ALT blort 'x' <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("synpred1").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestAttributes.java b/src/org/antlr/test/TestAttributes.java
new file mode 100644
index 0000000..8f47c07
--- /dev/null
+++ b/src/org/antlr/test/TestAttributes.java
@@ -0,0 +1,3140 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.codegen.ActionTranslatorLexer;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+import org.antlr.tool.*;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.ArrayList;
+
+/** Check the $x, $x.y attributes.  For checking the actual
+ *  translation, assume the Java target.  This is still a great test
+ *  for the semantics of the $x.y stuff regardless of the target.
+ */
+public class TestAttributes extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestAttributes() {
+	}
+
+	public void testEscapedLessThanInAction() throws Exception {
+		Grammar g = new Grammar();
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		String action = "i<3; '<xmltag>'";
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
+		String expecting = action;
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, "<action>");
+		actionST.setAttribute("action", rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testEscaped$InAction() throws Exception {
+		String action = "int \\$n; \"\\$in string\\$\"";
+		String expecting = "int $n; \"$in string$\"";
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"@members {"+action+"}\n"+
+				"a[User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),0);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testArguments() throws Exception {
+		String action = "$i; $i.x; $u; $u.x";
+		String expecting = "i; i.x; u; u.x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	/** $x.start refs are checked during translation not before so ANTLR misses
+	 the fact that rule r has refs to predefined attributes if the ref is after
+	 the def of the method or self-referential.  Actually would be ok if I didn't
+	 convert actions to strings; keep as templates.
+	 June 9, 2006: made action translation leave templates not strings
+	 */
+	public void testRefToReturnValueBeforeRefToPredefinedAttr() throws Exception {
+		String action = "$x.foo";
+		String expecting = "x.foo";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : x=b {"+action+"} ;\n" +
+			"b returns [int foo] : B {$b.start} ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleLabelBeforeRefToPredefinedAttr() throws Exception {
+		// As of Mar 2007, I'm removing unused labels.  Unfortunately,
+		// the action is not seen until code gen.  Can't see $x.text
+		// before stripping unused labels.  We really need to translate
+		// actions first so code gen logic can use info.
+		String action = "$x.text";
+		String expecting = "input.toString(x.start,x.stop)";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : x=b {"+action+"} ;\n" +
+			"b : B ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testInvalidArguments() throws Exception {
+		String action = "$x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testReturnValue() throws Exception {
+		String action = "$x.i";
+		String expecting = "x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i]\n" +
+				"        : 'a'\n" +
+				"        ;\n" +
+				"b : x=a {"+action+"} ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "b",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testReturnValueWithNumber() throws Exception {
+		String action = "$x.i1";
+		String expecting = "x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i1]\n" +
+				"        : 'a'\n" +
+				"        ;\n" +
+				"b : x=a {"+action+"} ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "b",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testReturnValues() throws Exception {
+		String action = "$i; $i.x; $u; $u.x";
+		String expecting = "retval.i; retval.i.x; retval.u; retval.u.x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	/* regression test for ANTLR-46 */
+	public void testReturnWithMultipleRuleRefs() throws Exception {
+		String action1 = "$obj = $rule2.obj;";
+		String action2 = "$obj = $rule3.obj;";
+		String expecting1 = "obj = rule21;";
+		String expecting2 = "obj = rule32;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"rule1 returns [ Object obj ]\n" +
+			":	rule2 { "+action1+" }\n" +
+			"|	rule3 { "+action2+" }\n" +
+			";\n"+
+			"rule2 returns [ Object obj ]\n"+
+			":	foo='foo' { $obj = $foo.text; }\n"+
+			";\n"+
+			"rule3 returns [ Object obj ]\n"+
+			":	bar='bar' { $obj = $bar.text; }\n"+
+			";");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		int i = 0;
+		String action = action1;
+		String expecting = expecting1;
+		do {
+			ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"rule1",
+																		 new antlr.CommonToken(ANTLRParser.ACTION,action),i+1);
+			String rawTranslation =
+					translator.translate();
+			StringTemplateGroup templates =
+					new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+			StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+			String found = actionST.toString();
+			assertEquals(expecting, found);
+			action = action2;
+			expecting = expecting2;
+		} while (i++ < 1);
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testInvalidReturnValues() throws Exception {
+		String action = "$x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testTokenLabels() throws Exception {
+		String action = "$id; $f; $id.text; $id.getText(); $id.dork " +
+						"$id.type; $id.line; $id.pos; " +
+						"$id.channel; $id.index;";
+		String expecting = "id; f; id.getText(); id.getText(); id.dork " +
+						   "id.getType(); id.getLine(); id.getCharPositionInLine(); " +
+						   "id.getChannel(); id.getTokenIndex();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : id=ID f=FLOAT {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleLabels() throws Exception {
+		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.stop;";
+		String expecting = "r.x; ((Token)r.start); ((Token)r.stop); ((Object)r.tree); r.x; ((Token)r.stop);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleLabelsWithSpecialToken() throws Exception {
+		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.stop;";
+		String expecting = "r.x; ((MYTOKEN)r.start); ((MYTOKEN)r.stop); ((Object)r.tree); r.x; ((MYTOKEN)r.stop);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"options {TokenLabelType=MYTOKEN;}\n"+
+				"a returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testForwardRefRuleLabels() throws Exception {
+		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.tree;";
+		String expecting = "r.x; ((Token)r.start); ((Token)r.stop); ((Object)r.tree); r.x; ((Object)r.tree);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;\n" +
+				"a returns [int x]\n" +
+				"  : ;\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testInvalidRuleLabelAccessesParameter() throws Exception {
+		String action = "$r.z";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[int z] returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_RULE_PARAMETER_REF;
+		Object expectedArg = "a";
+		Object expectedArg2 = "z";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testInvalidRuleLabelAccessesScopeAttribute() throws Exception {
+		String action = "$r.n";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a\n" +
+				"scope { int n; }\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF;
+		Object expectedArg = "a";
+		Object expectedArg2 = "n";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testInvalidRuleAttribute() throws Exception {
+		String action = "$r.blort";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[int z] returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE;
+		Object expectedArg = "a";
+		Object expectedArg2 = "blort";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testMissingRuleAttribute() throws Exception {
+		String action = "$r";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[int z] returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "r";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testMissingUnlabeledRuleAttribute() throws Exception {
+		String action = "$a";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [int x]:\n" +
+				"  ;\n"+
+				"b : a {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "a";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testNonDynamicAttributeOutsideRule() throws Exception {
+		String action = "public void foo() { $x; }";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"@members {'+action+'}\n" +
+				"a : ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 null,
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testNonDynamicAttributeOutsideRule2() throws Exception {
+		String action = "public void foo() { $x.y; }";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"@members {'+action+'}\n" +
+				"a : ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 null,
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE;
+		Object expectedArg = "x";
+		Object expectedArg2 = "y";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	// D Y N A M I C A L L Y  S C O P E D  A T T R I B U T E S
+
+	public void testBasicGlobalScope() throws Exception {
+		String action = "$Symbols::names.add($id.text);";
+		String expecting = "((Symbols_scope)Symbols_stack.peek()).names.add(id.getText());";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testUnknownGlobalScope() throws Exception {
+		String action = "$Symbols::names.add($id.text);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+			"  ;\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+
+		assertEquals("unexpected errors: "+equeue, 2, equeue.errors.size());
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE;
+		Object expectedArg = "Symbols";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testIndexedGlobalScope() throws Exception {
+		String action = "$Symbols[-1]::names.add($id.text);";
+		String expecting =
+			"((Symbols_scope)Symbols_stack.elementAt(Symbols_stack.size()-1-1)).names.add(id.getText());";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void test0IndexedGlobalScope() throws Exception {
+		String action = "$Symbols[0]::names.add($id.text);";
+		String expecting =
+			"((Symbols_scope)Symbols_stack.elementAt(0)).names.add(id.getText());";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testAbsoluteIndexedGlobalScope() throws Exception {
+		String action = "$Symbols[3]::names.add($id.text);";
+		String expecting =
+			"((Symbols_scope)Symbols_stack.elementAt(3)).names.add(id.getText());";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testScopeAndAttributeWithUnderscore() throws Exception {
+		String action = "$foo_bar::a_b;";
+		String expecting = "((foo_bar_scope)foo_bar_stack.peek()).a_b;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope foo_bar {\n" +
+				"  int a_b;\n" +
+				"}\n" +
+				"a scope foo_bar; : (ID {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testSharedGlobalScope() throws Exception {
+		String action = "$Symbols::x;";
+		String expecting = "((Symbols_scope)Symbols_stack.peek()).x;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  String x;\n" +
+				"}\n" +
+				"a\n"+
+				"scope { int y; }\n"+
+				"scope Symbols;\n" +
+				" : b {"+action+"}\n" +
+				" ;\n" +
+				"b : ID {$Symbols::x=$ID.text} ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testGlobalScopeOutsideRule() throws Exception {
+		String action = "public void foo() {$Symbols::names.add('foo');}";
+		String expecting = "public void foo() {((Symbols_scope)Symbols_stack.peek()).names.add('foo');}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"@members {'+action+'}\n" +
+				"a : \n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleScopeOutsideRule() throws Exception {
+		String action = "public void foo() {$a::name;}";
+		String expecting = "public void foo() {((a_scope)a_stack.peek()).name;}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"@members {"+action+"}\n" +
+				"a\n" +
+				"scope { int name; }\n" +
+				"  : {foo();}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 null,
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testBasicRuleScope() throws Exception {
+		String action = "$a::n;";
+		String expecting = "((a_scope)a_stack.peek()).n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testUnqualifiedRuleScopeAccessInsideRule() throws Exception {
+		String action = "$n;";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE;
+		Object expectedArg = "n";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg,
+										expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testIsolatedDynamicRuleScopeRef() throws Exception {
+		String action = "$a;"; // refers to stack not top of stack
+		String expecting = "a_stack;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : b ;\n" +
+				"b : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testDynamicRuleScopeRefInSubrule() throws Exception {
+		String action = "$a::n;";
+		String expecting = "((a_scope)a_stack.peek()).n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : b ;\n" +
+				"b : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testIsolatedGlobalScopeRef() throws Exception {
+		String action = "$Symbols;";
+		String expecting = "Symbols_stack;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  String x;\n" +
+				"}\n" +
+				"a\n"+
+				"scope { int y; }\n"+
+				"scope Symbols;\n" +
+				" : b {"+action+"}\n" +
+				" ;\n" +
+				"b : ID {$Symbols::x=$ID.text} ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleScopeFromAnotherRule() throws Exception {
+		String action = "$a::n;"; // must be qualified
+		String expecting = "((a_scope)a_stack.peek()).n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : b\n" +
+				"  ;\n" +
+				"b : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception {
+		String action = "$a.i;";
+		String expecting = "i;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a[int i]: {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
+		String action = "$a.i;";
+		String expecting = "retval.i;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i, int j]: {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
+		String action = "$a.i = 1;";
+		String expecting = "retval.i = 1;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"a returns [int i, int j]: {"+action+"}\n" +
+			"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testIsolatedRefToCurrentRule() throws Exception {
+		String action = "$a;";
+		String expecting = "";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : 'a' {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "a";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg,
+										expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testIsolatedRefToRule() throws Exception {
+		String action = "$x;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x=b {"+action+"}\n" +
+				"  ;\n" +
+				"b : 'b' ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	/*  I think these have to be errors $a.x makes no sense.
+	public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception {
+			String action = "$a.x;";
+			String expecting = "x;";
+
+			ErrorQueue equeue = new ErrorQueue();
+			ErrorManager.setErrorListener(equeue);
+			Grammar g = new Grammar(
+				"grammar t;\n"+
+					"a : x='a' {"+action+"}\n" +
+					"  ;\n");
+			Tool antlr = newTool();
+			CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+			g.setCodeGenerator(generator);
+			generator.genRecognizer(); // forces load of templates
+			ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+															   new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+			String rawTranslation =
+				translator.translate();
+			StringTemplateGroup templates =
+				new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+			StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+			String found = actionST.toString();
+			assertEquals(expecting, found);
+
+			assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		}
+
+	public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception {
+		String action = "$a.x;"; // must be qualified
+		String expecting = "list_x;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x+='a' {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+														   new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+*/
+	public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception {
+		String action = "$a.st;"; // can be qualified
+		String expecting = "retval.st;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"options {output=template;}\n"+
+				"a : (A->{$A.text}) {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleRefWhenRuleHasScope() throws Exception {
+		String action = "$b.start;";
+		String expecting = "((Token)b1.start);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"a : b {###"+action+"!!!} ;\n" +
+				"b\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : 'b' \n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception {
+		String action = "$b::n;";
+		String expecting = "((b_scope)b_stack.peek()).n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"s : b ;\n"+
+				"b\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRefToTemplateAttributeForCurrentRule() throws Exception {
+		String action = "$st=null;";
+		String expecting = "retval.st =null;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"options {output=template;}\n"+
+				"a : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRefToTextAttributeForCurrentRule() throws Exception {
+		String action = "$text";
+		String expecting = "input.toString(retval.start,input.LT(-1))";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"options {output=template;}\n"+
+				"a : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRefToStartAttributeForCurrentRule() throws Exception {
+		String action = "$start;";
+		String expecting = "((Token)retval.start);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"a : {###"+action+"!!!}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testTokenLabelFromMultipleAlts() throws Exception {
+		String action = "$ID.text;"; // must be qualified
+		String action2 = "$INT.text;"; // must be qualified
+		String expecting = "ID1.getText();";
+		String expecting2 = "INT2.getText();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID {"+action+"}\n" +
+				"  | INT {"+action2+"}\n" +
+				"  ;\n" +
+				"ID : 'a';\n" +
+				"INT : '0';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		translator = new ActionTranslatorLexer(generator,
+											   "a",
+											   new antlr.CommonToken(ANTLRParser.ACTION,action2),2);
+		rawTranslation =
+			translator.translate();
+		templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		actionST = new StringTemplate(templates, rawTranslation);
+		found = actionST.toString();
+
+		assertEquals(expecting2, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleLabelFromMultipleAlts() throws Exception {
+		String action = "$b.text;"; // must be qualified
+		String action2 = "$c.text;"; // must be qualified
+		String expecting = "input.toString(b1.start,b1.stop);";
+		String expecting2 = "input.toString(c2.start,c2.stop);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : b {"+action+"}\n" +
+				"  | c {"+action2+"}\n" +
+				"  ;\n" +
+				"b : 'a';\n" +
+				"c : '0';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		translator = new ActionTranslatorLexer(generator,
+											   "a",
+											   new antlr.CommonToken(ANTLRParser.ACTION,action2),2);
+		rawTranslation =
+			translator.translate();
+		templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		actionST = new StringTemplate(templates, rawTranslation);
+		found = actionST.toString();
+
+		assertEquals(expecting2, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testUnknownDynamicAttribute() throws Exception {
+		String action = "$a::x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE;
+		Object expectedArg = "a";
+		Object expectedArg2 = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUnknownGlobalDynamicAttribute() throws Exception {
+		String action = "$Symbols::x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"}\n" +
+				"a : {'+action+'}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE;
+		Object expectedArg = "Symbols";
+		Object expectedArg2 = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUnqualifiedRuleScopeAttribute() throws Exception {
+		String action = "$n;"; // must be qualified
+		String expecting = "$n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : b\n" +
+				"  ;\n" +
+				"b : {'+action+'}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "b",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
+		Object expectedArg = "n";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleAndTokenLabelTypeMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : id='foo' id=b\n" +
+				"  ;\n" +
+				"b : ;\n");
+		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
+		Object expectedArg = "id";
+		Object expectedArg2 = "rule!=token";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testListAndTokenLabelTypeMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+='a' ids='b'\n" +
+				"  ;\n" +
+				"b : ;\n");
+		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
+		Object expectedArg = "ids";
+		Object expectedArg2 = "token!=token-list";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testListAndRuleLabelTypeMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"options {output=AST;}\n"+
+				"a : bs+=b bs=b\n" +
+				"  ;\n" +
+				"b : 'b';\n");
+		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
+		Object expectedArg = "bs";
+		Object expectedArg2 = "rule!=rule-list";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testArgReturnValueMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a[int i] returns [int x, int i]\n" +
+				"  : \n" +
+				"  ;\n" +
+				"b : ;\n");
+		int expectedMsgID = ErrorManager.MSG_ARG_RETVAL_CONFLICT;
+		Object expectedArg = "i";
+		Object expectedArg2 = "a";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testSimplePlusEqualLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : ids+=ID ( COMMA ids+=ID {"+action+"})* ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testPlusEqualStringLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+='if' ( ',' ids+=ID {"+action+"})* ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testPlusEqualSetLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+=('a'|'b') ( ',' ids+=ID {"+action+"})* ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testPlusEqualWildcardLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+=. ( ',' ids+=ID {"+action+"})* ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testImplicitTokenLabel() throws Exception {
+		String action = "$ID; $ID.text; $ID.getText()";
+		String expecting = "ID1; ID1.getText(); ID1.getText()";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testImplicitRuleLabel() throws Exception {
+		String action = "$r.start;";
+		String expecting = "((Token)r1.start);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r {###"+action+"!!!} ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception {
+		String action = "$r.start;";
+		String expecting = "((Token)x.start);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x=r {###"+action+"!!!} ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception {
+		String action = "$r.start;";
+		String expecting = "((Token)x.start);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"options {output=AST;}\n" +
+				"a : x+=r {###"+action+"!!!} ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception {
+		String action = "$ID.text;";
+		String expecting = "x.getText();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x=ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception {
+		String action = "$ID.text;";
+		String expecting = "x.getText();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x+=ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRuleLabelWithoutOutputOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar T;\n"+
+			"s : x+=a ;" +
+			"a : 'a';\n"+
+			"b : 'b';\n"+
+			"WS : ' '|'\n';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT;
+		Object expectedArg = "x";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleLabelOnTwoDifferentRulesAST() throws Exception {
+		String grammar =
+			"grammar T;\n"+
+			"options {output=AST;}\n"+
+			"s : x+=a x+=b {System.out.println($x);} ;" +
+			"a : 'a';\n"+
+			"b : 'b';\n"+
+			"WS : (' '|'\n') {skip();};\n";
+		String expecting = "[a, b]\na b\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "a b", false);
+		assertEquals(expecting, found);
+	}
+
+	public void testRuleLabelOnTwoDifferentRulesTemplate() throws Exception {
+		String grammar =
+			"grammar T;\n"+
+			"options {output=template;}\n"+
+			"s : x+=a x+=b {System.out.println($x);} ;" +
+			"a : 'a' -> {%{\"hi\"}} ;\n"+
+			"b : 'b' -> {%{\"mom\"}} ;\n"+
+			"WS : (' '|'\n') {skip();};\n";
+		String expecting = "[hi, mom]\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "a b", false);
+		assertEquals(expecting, found);
+	}
+
+	public void testMissingArgs() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r ;" +
+				"r[int i] : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
+		Object expectedArg = "r";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testArgsWhenNoneDefined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r[32,34] ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
+		Object expectedArg = "r";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testReturnInitValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"a : r ;\n" +
+			"r returns [int x=0] : 'a' {$x = 4;} ;\n");
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		Rule r = g.getRule("r");
+		AttributeScope retScope = r.returnScope;
+		List parameters = retScope.getAttributes();
+		assertNotNull("missing return action", parameters);
+		assertEquals(1, parameters.size());
+		String found = parameters.get(0).toString();
+		String expecting = "int x=0";
+		assertEquals(expecting, found);
+	}
+
+	public void testMultipleReturnInitValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"a : r ;\n" +
+			"r returns [int x=0, int y, String s=new String(\"foo\")] : 'a' {$x = 4;} ;\n");
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		Rule r = g.getRule("r");
+		AttributeScope retScope = r.returnScope;
+		List parameters = retScope.getAttributes();
+		assertNotNull("missing return action", parameters);
+		assertEquals(3, parameters.size());
+		assertEquals("int x=0", parameters.get(0).toString());
+		assertEquals("int y", parameters.get(1).toString());
+		assertEquals("String s=new String(\"foo\")", parameters.get(2).toString());
+	}
+
+	public void testCStyleReturnInitValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"a : r ;\n" +
+			"r returns [int (*x)()=NULL] : 'a' ;\n");
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		Rule r = g.getRule("r");
+		AttributeScope retScope = r.returnScope;
+		List parameters = retScope.getAttributes();
+		assertNotNull("missing return action", parameters);
+		assertEquals(1, parameters.size());
+		String found = parameters.get(0).toString();
+		String expecting = "int (*)() x=NULL";
+		assertEquals(expecting, found);
+	}
+
+	public void testArgsWithInitValues() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r[32,34] ;" +
+				"r[int x, int y=3] : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL;
+		Object expectedArg = "y";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testArgsOnToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID[32,34] ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_ARGS_ON_TOKEN_REF;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testArgsOnTokenInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' ID[32,34] ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLabelOnRuleRefInLexer() throws Exception {
+		String action = "$i.text";
+		String expecting = "i.getText()";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' i=ID {"+action+"};" +
+				"fragment ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "R",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRefToRuleRefInLexer() throws Exception {
+		String action = "$ID.text";
+		String expecting = "ID1.getText()";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "R",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testRefToRuleRefInLexerNoAttribute() throws Exception {
+		String action = "$ID";
+		String expecting = "ID1";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "R",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testCharLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x='z' ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testCharListLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x+='z' ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testWildcardCharLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x=. ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testWildcardCharListLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x+=. ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testMissingArgsInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"A : R ;" +
+				"R[int i] : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
+		Object expectedArg = "R";
+		Object expectedArg2 = null;
+		// getting a second error @1:12, probably from nextToken
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLexerRulePropertyRefs() throws Exception {
+		String action = "$text $type $line $pos $channel $index $start $stop";
+		String expecting = "getText() _type tokenStartLine tokenStartCharPositionInLine channel -1 tokenStartCharIndex (getCharIndex()-1)";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'r' {"+action+"};\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "R",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testLexerLabelRefs() throws Exception {
+		String action = "$a $b.text $c $d.text";
+		String expecting = "a b.getText() c d.getText()";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : a='c' b='hi' c=. d=DUH {"+action+"};\n" +
+				"DUH : 'd' ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "R",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testSettingLexerRulePropertyRefs() throws Exception {
+		String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index";
+		String expecting = "getText() _type=1 tokenStartLine=1 tokenStartCharPositionInLine=1 channel=1 -1";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'r' {"+action+"};\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "R",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testArgsOnTokenInLexerRuleOfCombined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : R;\n" +
+				"R : 'z' ID[32] ;\n" +
+				"ID : 'a';\n");
+
+		String lexerGrammarStr = g.getLexerGrammar();
+		StringReader sr = new StringReader(lexerGrammarStr);
+		Grammar lexerGrammar = new Grammar();
+		lexerGrammar.setFileName("<internally-generated-lexer>");
+		lexerGrammar.importTokenVocabulary(g);
+		lexerGrammar.setGrammarContent(sr);
+		sr.close();
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, lexerGrammar, "Java");
+		lexerGrammar.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, lexerGrammar, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : R;\n" +
+				"R : 'z' ID ;\n" +
+				"ID[int i] : 'a';\n");
+
+		String lexerGrammarStr = g.getLexerGrammar();
+		StringReader sr = new StringReader(lexerGrammarStr);
+		Grammar lexerGrammar = new Grammar();
+		lexerGrammar.setFileName("<internally-generated-lexer>");
+		lexerGrammar.importTokenVocabulary(g);
+		lexerGrammar.setGrammarContent(sr);
+		sr.close();
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, lexerGrammar, "Java");
+		lexerGrammar.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, lexerGrammar, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	// T R E E S
+
+	public void testTokenLabelTreeProperty() throws Exception {
+		String action = "$id.tree;";
+		String expecting = "id_tree;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : id=ID {"+action+"} ;\n" +
+				"ID : 'a';\n");
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+									  "a",
+									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testTokenRefTreeProperty() throws Exception {
+		String action = "$ID.tree;";
+		String expecting = "ID1_tree;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testAmbiguousTokenRef() throws Exception {
+		String action = "$ID;";
+		String expecting = "";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_NONUNIQUE_REF;
+		Object expectedArg = "ID";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testAmbiguousTokenRefWithProp() throws Exception {
+		String action = "$ID.text;";
+		String expecting = "";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_NONUNIQUE_REF;
+		Object expectedArg = "ID";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleRefWithDynamicScope() throws Exception {
+		String action = "$field::x = $field.st;";
+		String expecting = "((field_scope)field_stack.peek()).x = retval.st;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"field\n" +
+				"scope { StringTemplate x; }\n" +
+				"    :   'y' {"+action+"}\n" +
+				"    ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "field",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testAssignToOwnRulenameAttr() throws Exception {
+		String action = "$rule.tree = null;";
+		String expecting = "retval.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"rule\n" +
+			"    : 'y' {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testAssignToOwnParamAttr() throws Exception {
+		String action = "$rule.i = 42; $i = 23;";
+		String expecting = "i = 42; i = 23;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"rule[int i]\n" +
+			"    : 'y' {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	public void testIllegalAssignToOwnRulenameAttr() throws Exception {
+		String action = "$rule.stop = 0;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"rule\n" +
+			"    : 'y' {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "rule";
+		Object expectedArg2 = "stop";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testIllegalAssignToLocalAttr() throws Exception {
+		String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;";
+		String expecting = "retval.tree = null; retval.st = null;   ";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"rule\n" +
+			"    : 'y' {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		ArrayList expectedErrors = new ArrayList(3);
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, "start", "");
+		expectedErrors.add(expectedMessage);
+		GrammarSemanticsMessage expectedMessage2 =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, "stop", "");
+		expectedErrors.add(expectedMessage2);
+				GrammarSemanticsMessage expectedMessage3 =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, "text", "");
+		expectedErrors.add(expectedMessage3);
+		checkErrors(equeue, expectedErrors);
+
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testIllegalAssignRuleRefAttr() throws Exception {
+		String action = "$other.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"options { output = AST;}" +
+			"otherrule\n" +
+			"    : 'y' ;" +
+			"rule\n" +
+			"    : other=otherrule {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "other";
+		Object expectedArg2 = "tree";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testIllegalAssignTokenRefAttr() throws Exception {
+		String action = "$ID.text = \"test\";";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"ID\n" +
+			"    : 'y' ;" +
+			"rule\n" +
+			"    : ID {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "ID";
+		Object expectedArg2 = "text";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testAssignToTreeNodeAttribute() throws Exception {
+		String action = "$tree.scope = localScope;";
+		String expecting = "(()retval.tree).scope = localScope;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"options { output=AST; }" +
+			"rule\n" +
+			"@init {\n" +
+			"   Scope localScope=null;\n" +
+			"}\n" +
+			"@after {\n" +
+			"   $tree.scope = localScope;\n" +
+			"}\n" +
+			"   : 'a' -> ^('a')\n" +
+			";");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "rule",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals(expecting, found);
+	}
+
+	public void testDoNotTranslateAttributeCompare() throws Exception {
+		String action = "$a.line == $b.line";
+		String expecting = "a.getLine() == b.getLine()";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"lexer grammar a;\n" +
+				"RULE:\n" +
+				"     a=ID b=ID {" + action + "}" +
+				"    ;\n" +
+				"ID : 'id';"
+		);
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "RULE",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals(expecting, found);
+	}
+
+	public void testDoNotTranslateScopeAttributeCompare() throws Exception {
+		String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
+		String expecting = "if (((rule_scope)rule_stack.peek()).foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar a;\n" +
+				"rule\n" +
+				"scope {\n" +
+				"   String foo;" +
+				"} :\n" +
+				"     twoIDs" +
+				"    ;\n" +
+				"twoIDs:\n" +
+				"    ID ID {" + action + "}\n" +
+				"    ;\n" +
+				"ID : 'id';"
+		);
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "twoIDs",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		// check that we didn't use scopeSetAttributeRef int translation!
+		boolean foundScopeSetAttributeRef = false;
+		for (int i = 0; i < translator.chunks.size(); i++) {
+			Object chunk = translator.chunks.get(i);
+			if (chunk instanceof StringTemplate) {
+				if (((StringTemplate)chunk).getName().equals("scopeSetAttributeRef")) {
+					foundScopeSetAttributeRef = true;
+				}
+			}
+		}
+		assertFalse("action translator used scopeSetAttributeRef template in comparison!", foundScopeSetAttributeRef);
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals(expecting, found);
+	}
+
+	public void testTreeRuleStopAttributeIsInvalid() throws Exception {
+		String action = "$r.x; $r.start; $r.stop";
+		String expecting = "r.x; ((CommonTree)r.start); $r.stop";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar t;\n" +
+			"options {ASTLabelType=CommonTree;}\n"+
+			"a returns [int x]\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a {###"+action+"!!!}\n" +
+			"  ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE;
+		Object expectedArg = "a";
+		Object expectedArg2 = "stop";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		System.out.println("equeue:"+equeue);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRefToTextAttributeForCurrentTreeRule() throws Exception {
+		String action = "$text";
+		String expecting = "input.getTokenStream().toString(\n" +
+			"              input.getTreeAdaptor().getTokenStartIndex(retval.start),\n" +
+			"              input.getTreeAdaptor().getTokenStopIndex(retval.start))";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar t;\n" +
+			"options {ASTLabelType=CommonTree;}\n" +
+			"a : {###"+action+"!!!}\n" +
+			"  ;\n");
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		StringTemplate codeST = generator.getRecognizerST();
+		String code = codeST.toString();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+
+	// S U P P O R T
+
+	protected void checkError(ErrorQueue equeue,
+							  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+		System.out.println(equeue.infos);
+		System.out.println(equeue.warnings);
+		System.out.println(equeue.errors);
+		*/
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = (Message)equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size() > 0);
+		assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		assertEquals(expectedMessage.arg2, foundMsg.arg2);
+	}
+
+	/** Allow checking for multiple errors in one test */
+	protected void checkErrors(ErrorQueue equeue,
+							   ArrayList expectedMessages)
+			throws Exception
+	{
+		ArrayList messageExpected = new ArrayList(equeue.errors.size());
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = (Message)equeue.errors.get(i);
+			boolean foundMsg = false;
+			for (int j = 0; j < expectedMessages.size(); j++) {
+				Message em = (Message)expectedMessages.get(j);
+				if (m.msgID==em.msgID && m.arg.equals(em.arg) && m.arg2.equals(em.arg2)) {
+					foundMsg = true;
+				}
+			}
+			if (foundMsg) {
+				messageExpected.add(i, Boolean.TRUE);
+			} else
+				messageExpected.add(i, Boolean.FALSE);
+		}
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			assertTrue("unexpected error:" + equeue.errors.get(i), ((Boolean)messageExpected.get(i)).booleanValue());
+		}
+	}
+}
diff --git a/src/org/antlr/test/TestAutoAST.java b/src/org/antlr/test/TestAutoAST.java
new file mode 100644
index 0000000..46dbcc3
--- /dev/null
+++ b/src/org/antlr/test/TestAutoAST.java
@@ -0,0 +1,541 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+public class TestAutoAST extends BaseTest {
+	protected boolean debug = false;
+
+	public void testTokenList() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	public void testTokenListInSingleAltBlock() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : (ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	public void testSimpleRootAtOuterLevel() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("(abc 34)\n", found);
+	}
+
+	public void testSimpleRootAtOuterLevelReverse() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 abc", debug);
+		assertEquals("(abc 34)\n", found);
+	}
+
+	public void testBang() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT! ID! INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34 dag 4532", debug);
+		assertEquals("abc 4532\n", found);
+	}
+
+	public void testOptionalThenRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ( ID INT )? ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 1 b", debug);
+		assertEquals("(b a 1)\n", found);
+	}
+
+	public void testLabeledStringRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void'^ ID ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(void foo ;)\n", found);
+	}
+
+	public void testWildcard() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void'^ . ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(void foo ;)\n", found);
+	}
+
+	public void testWildcardRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void' .^ ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(foo void ;)\n", found);
+	}
+
+	public void testRootRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID^ INT^ ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 34 c", debug);
+		assertEquals("(34 a c)\n", found);
+	}
+
+	public void testRootRoot2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT^ ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 34 c", debug);
+		assertEquals("(c (34 a))\n", found);
+	}
+
+	public void testRootThenRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID^ (INT '*'^ ID)+ ;\n" +
+			"ID  : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 34 * b 9 * c", debug);
+		assertEquals("(* (* (a 34) b 9) c)\n", found);
+	}
+
+	public void testNestedSubrule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'void' (({;}ID|INT) ID | 'null' ) ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void a b;", debug);
+		assertEquals("void a b ;\n", found);
+	}
+
+	public void testInvokeRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : type ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("int a\n", found);
+	}
+
+	public void testInvokeRuleAsRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : type^ ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("(int a)\n", found);
+	}
+
+	public void testRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ('+'^ ID)* ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a+b+c+d", debug);
+		assertEquals("(+ (+ (+ a b) c) d)\n", found);
+	}
+
+	public void testRuleInvocationRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID (op^ ID)* ;\n" +
+			"op : {;}'+' | '-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a+b+c-d", debug);
+		assertEquals("(- (+ (+ a b) c) d)\n", found);
+	}
+
+	public void testTailRecursion() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"s : a ;\n" +
+			"a : atom ('exp'^ a)? ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "s", "3 exp 4 exp 5", debug);
+		assertEquals("(exp 3 (exp 4 5))\n", found);
+	}
+
+	public void testSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID|INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testSetRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ('+' | '-')^ ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "+abc", debug);
+		assertEquals("(+ abc)\n", found);
+	}
+
+	public void testSetAsRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID (('+'|'-')^ ID)* ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a+b-c", debug);
+		assertEquals("(- (+ a b) c)\n", found);
+	}
+
+	public void testNotSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~ID '+' INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34+2", debug);
+		assertEquals("34 + 2\n", found);
+	}
+
+	public void testNotSetRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~'+'^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 55", debug);
+		assertEquals("(34 55)\n", found);
+	}
+
+	public void testNotSetRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT (~INT^ INT)* ;\n" +
+			"blort : '+' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "3+4+5", debug);
+		assertEquals("(+ (+ 3 4) 5)\n", found);
+	}
+
+	public void testTokenLabelReuse() throws Exception {
+		// check for compilation problem due to multiple defines
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id=ID id=ID {System.out.print(\"2nd id=\"+$id.text+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("2nd id=b;a b\n", found);
+	}
+
+	public void testTokenLabelReuse2() throws Exception {
+		// check for compilation problem due to multiple defines
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id=ID id=ID^ {System.out.print(\"2nd id=\"+$id.text+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("2nd id=b;(b a)\n", found);
+	}
+
+	public void testTokenListLabelReuse() throws Exception {
+		// check for compilation problem due to multiple defines
+		// make sure ids has both ID tokens
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ids+=ID ids+=ID {System.out.print(\"id list=\"+$ids+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		String expecting = "id list=[[@0,0:0='a',<4>,1:0], [@2,2:2='b',<4>,1:2]];a b\n";
+		assertEquals(expecting, found);
+	}
+
+	public void testTokenListLabelReuse2() throws Exception {
+		// check for compilation problem due to multiple defines
+		// make sure ids has both ID tokens
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ids+=ID^ ids+=ID {System.out.print(\"id list=\"+$ids+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		String expecting = "id list=[[@0,0:0='a',<4>,1:0], [@2,2:2='b',<4>,1:2]];(a b)\n";
+		assertEquals(expecting, found);
+	}
+
+	public void testTokenListLabelRuleRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id+=ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	public void testTokenListLabelBang() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id+=ID! ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a", debug);
+		assertEquals("nil\n", found);
+	}
+
+	public void testRuleListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x+=b x+=b {" +
+				"Tree t=(Tree)$x.get(1);" +
+				"System.out.print(\"2nd x=\"+t.toStringTree()+';');} ;\n" +
+			"b : ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("2nd x=b;a b\n", found);
+	}
+
+	public void testRuleListLabelRuleRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ( x+=b^ )+ {" +
+			"System.out.print(\"x=\"+((CommonTree)$x.get(1)).toStringTree()+';');} ;\n" +
+			"b : ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("x=(b a);(b a)\n", found);
+	}
+
+	public void testRuleListLabelBang() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x+=b! x+=b {" +
+			"System.out.print(\"1st x=\"+((CommonTree)$x.get(0)).toStringTree()+';');} ;\n" +
+			"b : ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("1st x=a;b\n", found);
+	}
+
+	public void testComplicatedMelange() throws Exception {
+		// check for compilation problem
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : A b=B b=B c+=C c+=C D {$D.text;} ;\n" +
+			"A : 'a' ;\n" +
+			"B : 'b' ;\n" +
+			"C : 'c' ;\n" +
+			"D : 'd' ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b b c c d", debug);
+		assertEquals("a b b c c d\n", found);
+	}
+
+	public void testReturnValueWithAST() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID b {System.out.println($b.i);} ;\n" +
+			"b returns [int i] : INT {$i=Integer.parseInt($INT.text);} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("34\nabc 34\n", found);
+	}
+
+	public void testSetLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options { output=AST; }\n" +
+			"r : (INT|ID)+ ; \n" +
+			"ID : 'a'..'z' + ;\n" +
+			"INT : '0'..'9' +;\n" +
+			"WS: (' ' | '\\n' | '\\t')+ {$channel = HIDDEN;};\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "r", "abc 34 d", debug);
+		assertEquals("abc 34 d\n", found);
+	}
+
+
+	// S U P P O R T
+
+	public void _test() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a :  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abc 34", debug);
+		assertEquals("\n", found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestCharDFAConversion.java b/src/org/antlr/test/TestCharDFAConversion.java
new file mode 100644
index 0000000..2247096
--- /dev/null
+++ b/src/org/antlr/test/TestCharDFAConversion.java
@@ -0,0 +1,553 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DFAOptimizer;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.tool.*;
+
+import java.util.List;
+
+public class TestCharDFAConversion extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestCharDFAConversion() {
+	}
+
+	// R A N G E S  &  S E T S
+
+	public void testSimpleRangeVersusChar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a'..'z' '@' | 'k' '$' ;");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'k'->.s1\n" +
+			".s0-{'a'..'j', 'l'..'z'}->:s3=>1\n" +
+			".s1-'$'->:s2=>2\n" +
+			".s1-'@'->:s3=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testRangeWithDisjointSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a'..'z' '@'\n" +
+			"  | ('k'|'9'|'p') '$'\n" +
+			"  ;\n");
+		g.createLookaheadDFAs();
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'}
+		String expecting =
+			".s0-'9'->:s2=>2\n" +
+			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>1\n" +
+			".s0-{'k', 'p'}->.s1\n" +
+			".s1-'$'->:s2=>2\n" +
+			".s1-'@'->:s3=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testDisjointSetCollidingWithTwoRanges() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ('a'..'z'|'0'..'9') '@'\n" +
+			"  | ('k'|'9'|'p') '$'\n" +
+			"  ;\n");
+		g.createLookaheadDFAs();
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
+		// into 0..8
+		String expecting =
+			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>1\n" +
+			".s0-{'9', 'k', 'p'}->.s1\n" +
+			".s1-'$'->:s2=>2\n" +
+			".s1-'@'->:s3=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testDisjointSetCollidingWithTwoRangesCharsFirst() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ('k'|'9'|'p') '$'\n" +
+			"  | ('a'..'z'|'0'..'9') '@'\n" +
+			"  ;\n");
+		g.createLookaheadDFAs();
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
+		// into 0..8
+		String expecting =
+			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>2\n" +
+			".s0-{'9', 'k', 'p'}->.s1\n" +
+			".s1-'$'->:s3=>1\n" +
+			".s1-'@'->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testDisjointSetCollidingWithTwoRangesAsSeparateAlts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a'..'z' '@'\n" +
+			"  | 'k' '$'\n" +
+			"  | '9' '$'\n" +
+			"  | 'p' '$'\n" +
+			"  | '0'..'9' '@'\n" +
+			"  ;\n");
+		g.createLookaheadDFAs();
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
+		// into 0..8
+		String expecting =
+			".s0-'0'..'8'->:s8=>5\n" +
+			".s0-'9'->.s6\n" +
+			".s0-'k'->.s1\n" +
+			".s0-'p'->.s4\n" +
+			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>1\n" +
+			".s1-'$'->:s2=>2\n" +
+			".s1-'@'->:s3=>1\n" +
+			".s4-'$'->:s5=>4\n" +
+			".s4-'@'->:s3=>1\n" +
+			".s6-'$'->:s7=>3\n" +
+			".s6-'@'->:s8=>5\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testKeywordVersusID() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"IF : 'if' ;\n" + // choose this over ID
+			"ID : ('a'..'z')+ ;\n");
+		String expecting =
+			".s0-'a'..'z'->:s2=>1\n" +
+			".s0-<EOT>->:s1=>2\n";
+		checkDecision(g, 1, expecting, null);
+		expecting =
+			".s0-'i'->.s1\n" +
+			".s0-{'a'..'h', 'j'..'z'}->:s4=>2\n" +
+			".s1-'f'->.s2\n" +
+			".s1-<EOT>->:s4=>2\n" +
+			".s2-'a'..'z'->:s4=>2\n" +
+			".s2-<EOT>->:s3=>1\n";
+		checkDecision(g, 2, expecting, null);
+	}
+
+	public void testIdenticalRules() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a' ;\n" +
+			"B : 'a' ;\n"); // can't reach this
+		String expecting =
+			".s0-'a'->.s1\n" +
+			".s1-<EOT>->:s2=>1\n";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		checkDecision(g, 1, expecting, new int[] {2});
+
+		assertEquals("unexpected number of expected problems",
+				    1, equeue.size());
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("warning must be an unreachable alt",
+				    msg instanceof GrammarUnreachableAltsMessage);
+		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
+		assertEquals("[2]", u.alts.toString());
+
+	}
+
+	public void testAdjacentNotCharLoops() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : (~'r')+ ;\n" +
+			"B : (~'s')+ ;\n");
+		String expecting =
+			".s0-'r'->:s3=>2\n" +
+			".s0-'s'->:s2=>1\n" +
+			".s0-{'\\u0000'..'q', 't'..'\\uFFFE'}->.s1\n" +
+			".s1-'r'->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n" +
+			".s1-{'\\u0000'..'q', 't'..'\\uFFFE'}->.s1\n";
+		checkDecision(g, 3, expecting, null);
+	}
+
+	public void testNonAdjacentNotCharLoops() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : (~'r')+ ;\n" +
+			"B : (~'t')+ ;\n");
+		String expecting =
+			".s0-'r'->:s3=>2\n" +
+			".s0-'t'->:s2=>1\n" +
+			".s0-{'\\u0000'..'q', 's', 'u'..'\\uFFFE'}->.s1\n" +
+			".s1-'r'->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n" +
+			".s1-{'\\u0000'..'q', 's', 'u'..'\\uFFFE'}->.s1\n";
+		checkDecision(g, 3, expecting, null);
+	}
+
+	public void testLoopsWithOptimizedOutExitBranches() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'x'* ~'x'+ ;\n");
+		String expecting =
+			".s0-'x'->:s2=>1\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFE'}->:s1=>2\n";
+		checkDecision(g, 1, expecting, null);
+
+		// The optimizer yanks out all exit branches from EBNF blocks
+		// This is ok because we've already verified there are no problems
+		// with the enter/exit decision
+		DFAOptimizer optimizer = new DFAOptimizer(g);
+		optimizer.optimize();
+		FASerializer serializer = new FASerializer(g);
+		DFA dfa = g.getLookaheadDFA(1);
+		String result = serializer.serialize(dfa.startState);
+		expecting = ".s0-'x'->:s1=>1\n";
+		assertEquals(expecting, result);
+	}
+
+	// N O N G R E E D Y
+
+	public void testNonGreedy() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"CMT : '/*' ( options {greedy=false;} : . )* '*/' ;");
+		String expecting =
+			".s0-'*'->.s1\n" +
+			".s0-{'\\u0000'..')', '+'..'\\uFFFE'}->:s3=>1\n" +
+			".s1-'/'->:s2=>2\n" +
+			".s1-{'\\u0000'..'.', '0'..'\\uFFFE'}->:s3=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonGreedyWildcardStar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' ( options {greedy=false;} : . )* '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonGreedyByDefaultWildcardStar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' .* '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonGreedyWildcardPlus() throws Exception {
+		// same DFA as nongreedy .* but code gen checks number of
+		// iterations at runtime
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' ( options {greedy=false;} : . )+ '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonGreedyByDefaultWildcardPlus() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' .+ '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonGreedyByDefaultWildcardPlusWithParens() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' (.)+ '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonWildcardNonGreedy() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"DUH : (options {greedy=false;}:'x'|'y')* 'xy' ;");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s0-'y'->:s4=>2\n" +
+			".s1-'x'->:s3=>1\n" +
+			".s1-'y'->:s2=>3\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonWildcardEOTMakesItWorkWithoutNonGreedyOption() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"DUH : ('x'|'y')* 'xy' ;");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s0-'y'->:s3=>1\n" +
+			".s1-'x'->:s3=>1\n" +
+			".s1-'y'->.s2\n" +
+			".s2-'x'..'y'->:s3=>1\n" +
+			".s2-<EOT>->:s4=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testAltConflictsWithLoopThenExit() throws Exception {
+		// \" predicts alt 1, but wildcard then " can predict exit also
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"STRING : '\"' (options {greedy=false;}: '\\\\\"' | .)* '\"' ;\n"
+		);
+		String expecting =
+			".s0-'\"'->:s1=>3\n" +
+				".s0-'\\\\'->.s2\n" +
+				".s0-{'\\u0000'..'!', '#'..'[', ']'..'\\uFFFE'}->:s4=>2\n" +
+				".s2-'\"'->:s3=>1\n" +
+				".s2-{'\\u0000'..'!', '#'..'\\uFFFE'}->:s4=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNonGreedyLoopThatNeverLoops() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"DUH : (options {greedy=false;}:'x')+ ;"); // loop never matched
+		String expecting =
+			":s0=>2\n";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		checkDecision(g, 1, expecting, new int[] {1});
+
+		assertEquals("unexpected number of expected problems",
+				    1, equeue.size());
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("warning must be an unreachable alt",
+				   msg instanceof GrammarUnreachableAltsMessage);
+		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
+		assertEquals("[1]", u.alts.toString());
+	}
+
+	public void testRecursive() throws Exception {
+		// this is cool because the 3rd alt includes !(all other possibilities)
+		Grammar g = new Grammar(
+			"lexer grammar duh;\n" +
+			"SUBTEMPLATE\n" +
+			"        :       '{'\n" +
+			"                ( SUBTEMPLATE\n" +
+			"                | ESC\n" +
+			"                | ~('}'|'\\\\'|'{')\n" +
+			"                )*\n" +
+			"                '}'\n" +
+			"        ;\n" +
+			"fragment\n" +
+			"ESC     :       '\\\\' . ;");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'\\\\'->:s3=>2\n" +
+			".s0-'{'->:s2=>1\n" +
+			".s0-'}'->:s1=>4\n" +
+			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFE'}->:s4=>3\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testRecursive2() throws Exception {
+		// this is also cool because it resolves \\ to be ESC alt; it's just
+		// less efficient of a DFA
+		Grammar g = new Grammar(
+			"lexer grammar duh;\n" +
+			"SUBTEMPLATE\n" +
+			"        :       '{'\n" +
+			"                ( SUBTEMPLATE\n" +
+			"                | ESC\n" +
+			"                | ~('}'|'{')\n" +
+			"                )*\n" +
+			"                '}'\n" +
+			"        ;\n" +
+			"fragment\n" +
+			"ESC     :       '\\\\' . ;");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'\\\\'->.s3\n" +
+			".s0-'{'->:s2=>1\n" +
+			".s0-'}'->:s1=>4\n" +
+			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFE'}->:s5=>3\n" +
+			".s3-'\\\\'->:s8=>2\n" +
+			".s3-'{'->:s7=>2\n" +
+			".s3-'}'->.s4\n" +
+			".s3-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFE'}->:s6=>2\n" +
+			".s4-'\\u0000'..'\\uFFFE'->:s6=>2\n" +
+			".s4-<EOT>->:s5=>3\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNotFragmentInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : 'a' | ~B {;} ;\n" +
+			"fragment B : 'a' ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'b'..'\\uFFFE'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNotSetFragmentInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : B | ~B {;} ;\n" +
+			"fragment B : 'a'|'b' ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'..'b'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'c'..'\\uFFFE'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNotTokenInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : 'x' ('a' | ~B {;}) ;\n" +
+			"B : 'a' ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'b'..'\\uFFFE'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNotComplicatedSetRuleInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : B | ~B {;} ;\n" +
+			"fragment B : 'a'|'b'|'c'..'e'|C ;\n" +
+			"fragment C : 'f' ;\n"); // has to seen from B to C
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'..'f'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'g'..'\\uFFFE'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testNotSetWithRuleInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"T : ~('a' | B) | 'a';\n" +
+			"fragment\n" +
+			"B : 'b' ;\n" +
+			"C : ~'x'{;} ;"); // force Tokens to not collapse T|C
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'b'->:s3=>2\n" +
+			".s0-'x'->:s2=>1\n" +
+			".s0-{'\\u0000'..'a', 'c'..'w', 'y'..'\\uFFFE'}->.s1\n" +
+			".s1-<EOT>->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testSetCallsRuleWithNot() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar A;\n" +
+			"T : ~'x' ;\n" +
+			"S : 'x' (T | 'x') ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'x'->:s2=>2\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFE'}->:s1=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	public void testSynPredInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"LT:  '<' ' '*\n" +
+			"  |  ('<' IDENT) => '<' IDENT '>'\n" + // this was causing syntax error
+			"  ;\n" +
+			"IDENT:    'a'+;\n");
+		// basically, Tokens rule should not do set compression test
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'<'->:s1=>1\n" +
+			".s0-'a'->:s2=>2\n";
+		checkDecision(g, 4, expecting, null); // 4 is Tokens rule
+	}
+
+	// S U P P O R T
+
+	public void _template() throws Exception {
+		Grammar g = new Grammar(
+			"grammar T;\n"+
+			"a : A | B;");
+		g.createLookaheadDFAs();
+		String expecting =
+			"\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	protected void checkDecision(Grammar g,
+								 int decision,
+								 String expecting,
+								 int[] expectingUnreachableAlts)
+		throws Exception
+	{
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getCodeGenerator()==null ) {
+			CodeGenerator generator = new CodeGenerator(null, g, "Java");
+			g.setCodeGenerator(generator);
+			g.createNFAs();
+			g.createLookaheadDFAs();
+		}
+
+		DFA dfa = g.getLookaheadDFA(decision);
+		assertNotNull("unknown decision #"+decision, dfa);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		//System.out.print(result);
+		List nonDetAlts = dfa.getUnreachableAlts();
+		//System.out.println("alts w/o predict state="+nonDetAlts);
+
+		// first make sure nondeterministic alts are as expected
+		if ( expectingUnreachableAlts==null ) {
+			if ( nonDetAlts.size()!=0 ) {
+				System.err.println("nondeterministic alts (should be empty): "+nonDetAlts);
+			}
+			assertEquals("unreachable alts mismatch", 0, nonDetAlts.size());
+		}
+		else {
+			for (int i=0; i<expectingUnreachableAlts.length; i++) {
+				assertTrue("unreachable alts mismatch", nonDetAlts.contains(new Integer(expectingUnreachableAlts[i])));
+			}
+		}
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/src/org/antlr/test/TestCommonTreeNodeStream.java b/src/org/antlr/test/TestCommonTreeNodeStream.java
new file mode 100644
index 0000000..b191194
--- /dev/null
+++ b/src/org/antlr/test/TestCommonTreeNodeStream.java
@@ -0,0 +1,203 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+import org.antlr.runtime.tree.Tree;
+
+/** Tests specific to CommonTreeNodeStream */
+public class TestCommonTreeNodeStream extends TestTreeNodeStream {
+	public void testPushPop() throws Exception {
+		// ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
+		// stream has 9 real + 8 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		r0.addChild(r1);
+		Tree r2 = new CommonTree(new CommonToken(104));
+		r2.addChild(new CommonTree(new CommonToken(105)));
+		r0.addChild(r2);
+		Tree r3 = new CommonTree(new CommonToken(106));
+		r3.addChild(new CommonTree(new CommonToken(107)));
+		r0.addChild(r3);
+		r0.addChild(new CommonTree(new CommonToken(108)));
+		r0.addChild(new CommonTree(new CommonToken(109)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		String expecting = " 101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3";
+		String found = stream.toString();
+		assertEquals(expecting, found);
+
+		// Assume we want to hit node 107 and then "call 102" then return
+
+		int indexOf102 = 2;
+		int indexOf107 = 12;
+		for (int k=1; k<=indexOf107; k++) { // consume til 107 node
+			stream.consume();
+		}
+		// CALL 102
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.push(indexOf102);
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 102
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume DN
+		assertEquals(103, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 103
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		// RETURN
+		stream.pop();
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+	}
+
+	public void testNestedPushPop() throws Exception {
+		// ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
+		// stream has 9 real + 8 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		r0.addChild(r1);
+		Tree r2 = new CommonTree(new CommonToken(104));
+		r2.addChild(new CommonTree(new CommonToken(105)));
+		r0.addChild(r2);
+		Tree r3 = new CommonTree(new CommonToken(106));
+		r3.addChild(new CommonTree(new CommonToken(107)));
+		r0.addChild(r3);
+		r0.addChild(new CommonTree(new CommonToken(108)));
+		r0.addChild(new CommonTree(new CommonToken(109)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+
+		// Assume we want to hit node 107 and then "call 102", which
+		// calls 104, then return
+
+		int indexOf102 = 2;
+		int indexOf107 = 12;
+		for (int k=1; k<=indexOf107; k++) { // consume til 107 node
+			stream.consume();
+		}
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		// CALL 102
+		stream.push(indexOf102);
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 102
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume DN
+		assertEquals(103, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 103
+
+		// CALL 104
+		int indexOf104 = 6;
+		stream.push(indexOf104);
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 102
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume DN
+		assertEquals(105, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 103
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		// RETURN (to UP node in 102 subtree)
+		stream.pop();
+
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		// RETURN (to empty stack)
+		stream.pop();
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+	}
+
+	public void testPushPopFromEOF() throws Exception {
+		// ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
+		// stream has 9 real + 8 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		r0.addChild(r1);
+		Tree r2 = new CommonTree(new CommonToken(104));
+		r2.addChild(new CommonTree(new CommonToken(105)));
+		r0.addChild(r2);
+		Tree r3 = new CommonTree(new CommonToken(106));
+		r3.addChild(new CommonTree(new CommonToken(107)));
+		r0.addChild(r3);
+		r0.addChild(new CommonTree(new CommonToken(108)));
+		r0.addChild(new CommonTree(new CommonToken(109)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+
+		while ( stream.LA(1)!=Token.EOF ) {
+			stream.consume();
+		}
+		int indexOf102 = 2;
+		int indexOf104 = 6;
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+
+		// CALL 102
+		stream.push(indexOf102);
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 102
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume DN
+		assertEquals(103, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 103
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		// RETURN (to empty stack)
+		stream.pop();
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+
+		// CALL 104
+		stream.push(indexOf104);
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 102
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume DN
+		assertEquals(105, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 103
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		// RETURN (to empty stack)
+		stream.pop();
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+	}
+
+	public void testStackStretch() throws Exception {
+		// make more than INITIAL_CALL_STACK_SIZE pushes
+		Tree r0 = new CommonTree(new CommonToken(101));
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		// go 1 over initial size
+		for (int i=1; i<=CommonTreeNodeStream.INITIAL_CALL_STACK_SIZE+1; i++) {
+			stream.push(i);
+		}
+		assertEquals(10, stream.pop());
+		assertEquals(9, stream.pop());
+	}
+
+}
diff --git a/src/org/antlr/test/TestDFAConversion.java b/src/org/antlr/test/TestDFAConversion.java
new file mode 100644
index 0000000..5f5d1b4
--- /dev/null
+++ b/src/org/antlr/test/TestDFAConversion.java
@@ -0,0 +1,1275 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.misc.BitSet;
+import org.antlr.tool.*;
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+
+import java.util.*;
+
+public class TestDFAConversion extends BaseTest {
+
+	public void testA() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A C | B;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAB_or_AC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A B | A C;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s3=>1\n" +
+			".s1-C->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAB_or_AC_k2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {k=2;}\n"+
+			"a : A B | A C;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s3=>1\n" +
+			".s1-C->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAB_or_AC_k1() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {k=1;}\n"+
+			"a : A B | A C;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A" ;
+		int[] danglingAlts = new int[] {2};
+		int numWarnings = 2; // non-LL(1) abort and ambig upon A
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testselfRecurseNonDet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a X | A a Y;");
+		// nondeterministic from left edge; no stop state
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-A->:s2=>1\n"; // gets this after failing to do LL(*)
+		int[] unreachableAlts = new int[] {1,2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = null;
+		int[] danglingAlts = new int[] {1,2};
+		int numWarnings = 2; // non-LL(*) abort and ambig upon A A
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testCannotSeePastRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-B->.s3\n" +
+			".s1-L->:s2=>1\n";
+		int[] unreachableAlts = new int[] {1,2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testSynPredResolvesRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (y X)=> y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s7\n" +
+			".s0-L->.s1\n" +
+			".s1-B->.s5\n" +
+			".s1-L->.s2\n" +
+			".s2-{synpred1}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n" +
+			".s5-R->.s6\n" +
+			".s6-X&&{synpred1}?->:s3=>1\n" +
+			".s6-Y->:s4=>2\n" +
+			".s7-X&&{synpred1}?->:s3=>1\n" +
+			".s7-Y->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testSynPredResolvesRecursionInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A :     (B ';')=> B ';'\n" +
+			"  |     B '.'\n" +
+			"  ;\n" +
+			"fragment\n" +
+			"B :     '(' B ')'\n" +
+			"  |     'x'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-'x'->.s7\n" +
+			".s1-'('->.s2\n" +
+			".s1-'x'->.s5\n" +
+			".s2-{synpred1}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n" +
+			".s5-')'->.s6\n" +
+			".s6-'.'->:s4=>2\n" +
+			".s6-';'&&{synpred1}?->:s3=>1\n" +
+			".s7-'.'->:s4=>2\n" +
+			".s7-';'&&{synpred1}?->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testAutoBacktrackResolvesRecursionInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"options {backtrack=true;}\n"+
+			"A :     B ';'\n" +
+			"  |     B '.'\n" +
+			"  ;\n" +
+			"fragment\n" +
+			"B :     '(' B ')'\n" +
+			"  |     'x'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-'x'->.s7\n" +
+			".s1-'('->.s2\n" +
+			".s1-'x'->.s5\n" +
+			".s2-{synpred1}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n" +
+			".s5-')'->.s6\n" +
+			".s6-'.'->:s4=>2\n" +
+			".s6-';'->:s3=>1\n" +
+			".s7-'.'->:s4=>2\n" +
+			".s7-';'->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testAutoBacktrackResolvesRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"x   : y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s7\n" +
+				".s0-L->.s1\n" +
+				".s1-B->.s5\n" +
+				".s1-L->.s2\n" +
+				".s2-{synpred1}?->:s3=>1\n" +
+				".s2-{true}?->:s4=>2\n" +
+				".s5-R->.s6\n" +
+				".s6-X->:s3=>1\n" +
+				".s6-Y->:s4=>2\n" +
+				".s7-X->:s3=>1\n" +
+				".s7-Y->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testselfRecurseNonDet2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : P a P | P;");
+		// nondeterministic from left edge
+		String expecting =
+			".s0-P->.s1\n" +
+			".s1-EOF->:s2=>2\n"+
+			".s1-P->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "P P";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testIndirectRecursionLoop() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : b X ;\n"+
+			"b : a B ;\n");
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules =
+			new HashSet() {{add("a"); add("b");}};
+		assertEquals(expectedRules, leftRecursive);
+
+		g.createLookaheadDFAs();
+
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
+				    msg instanceof LeftRecursionCyclesMessage);
+		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
+
+		// cycle of [a, b]
+		Collection result = cyclesMsg.cycles;
+		List expecting = new ArrayList();
+		expecting.add(new HashSet() {{add("a"); add("b");}});
+		assertEquals(expecting, result);
+	}
+
+	public void testIndirectRecursionLoop2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : i b X ;\n"+ // should see through i
+			"b : a B ;\n" +
+			"i : ;\n");
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules =
+			new HashSet() {{add("a"); add("b");}};
+		assertEquals(expectedRules, leftRecursive);
+
+		g.createLookaheadDFAs();
+
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
+				    msg instanceof LeftRecursionCyclesMessage);
+		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
+
+		// cycle of [a, b]
+		Collection result = cyclesMsg.cycles;
+		List expecting = new ArrayList();
+		expecting.add(new HashSet() {{add("a"); add("b");}});
+		assertEquals(expecting, result);
+	}
+
+	public void testIndirectRecursionLoop3() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : i b X ;\n"+ // should see through i
+			"b : a B ;\n" +
+			"i : ;\n" +
+			"d : e ;\n" +
+			"e : d ;\n");
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules =
+			new HashSet() {{add("a"); add("b"); add("e"); add("d");}};
+		assertEquals(expectedRules, leftRecursive);
+
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
+				    msg instanceof LeftRecursionCyclesMessage);
+		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
+
+		// cycle of [a, b]
+		Collection result = cyclesMsg.cycles;
+		List expecting = new ArrayList();
+		expecting.add(new HashSet() {{add("a"); add("b");}});
+		expecting.add(new HashSet() {{add("d"); add("e");}});
+		assertEquals(expecting, result);
+	}
+
+	public void testifThenElse() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : IF s (E s)? | B;\n" +
+			"slist: s SEMI ;");
+		String expecting =
+			".s0-E->:s1=>1\n" +
+			".s0-SEMI->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "E";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		expecting =
+			".s0-B->:s2=>2\n" +
+			".s0-IF->:s1=>1\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0);
+	}
+
+	public void testifThenElseChecksStackSuffixConflict() throws Exception {
+		// if you don't check stack soon enough, this finds E B not just E
+		// as ambig input
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"slist: s SEMI ;\n"+
+			"s : IF s el | B;\n" +
+			"el: (E s)? ;\n");
+		String expecting =
+			".s0-E->:s1=>1\n" +
+			".s0-SEMI->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "E";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 2, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		expecting =
+			".s0-B->:s2=>2\n" +
+			".s0-IF->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testInvokeRule() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b A\n" +
+			"  | b B\n" +
+			"  | C\n" +
+			"  ;\n" +
+			"b : X\n" +
+			"  ;\n");
+		String expecting =
+			".s0-C->:s4=>3\n" +
+			".s0-X->.s1\n" +
+			".s1-A->:s3=>1\n" +
+			".s1-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testDoubleInvokeRuleLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b X\n" +
+			"  | b Y\n" +
+			"  ;\n" +
+			"b : c B\n" +
+			"  | c\n" +
+			"  ;\n" +
+			"c : C ;\n");
+		String expecting =
+			".s0-C->.s1\n" +
+			".s1-B->.s4\n" +
+			".s1-X->:s2=>1\n" +
+			".s1-Y->:s3=>2\n" +
+			".s4-X->:s2=>1\n" +
+			".s4-Y->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+		expecting =
+			".s0-C->.s1\n" +
+			".s1-B->:s3=>1\n" +
+			".s1-X..Y->:s2=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0);
+	}
+
+	public void testimmediateTailRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a | A B;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-A->:s3=>1\n" +
+			".s1-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAStar_immediateTailRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a | ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		int[] unreachableAlts = null; // without
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testNoStartRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A a | X;"); // single rule 'a' refers to itself; no start rule
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("expecting no start rules; found "+msg.getClass().getName(),
+				   msg instanceof GrammarSemanticsMessage);
+	}
+
+	public void testAStar_immediateTailRecursion2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a | A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-A->:s3=>1\n" +
+			".s1-EOF->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testimmediateLeftRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : a A | B;");
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules = new HashSet() {{add("a");}};
+		assertEquals(expectedRules, leftRecursive);
+	}
+
+	public void testIndirectLeftRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : b | A ;\n" +
+			"b : c ;\n" +
+			"c : a | C ;\n");
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules = new HashSet() {{add("a"); add("b"); add("c");}};
+		assertEquals(expectedRules, leftRecursive);
+	}
+
+	public void testLeftRecursionInMultipleCycles() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"s : a x ;\n" +
+				"a : b | A ;\n" +
+				"b : c ;\n" +
+				"c : a | C ;\n" +
+				"x : y | X ;\n" +
+				"y : x ;\n");
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules =
+			new HashSet() {{add("a"); add("b"); add("c"); add("x"); add("y");}};
+		assertEquals(expectedRules, leftRecursive);
+	}
+
+	public void testCycleInsideRuleDoesNotForceInfiniteRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : (A|)+ B;\n");
+		// before I added a visitedStates thing, it was possible to loop
+		// forever inside of a rule if there was an epsilon loop.
+		Set leftRecursive = g.getLeftRecursiveRules();
+		Set expectedRules = new HashSet();
+		assertEquals(expectedRules, leftRecursive);
+	}
+
+	// L O O P S
+
+	public void testAStar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A )* ;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAorBorCStar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A | B | C )* ;");
+		String expecting =
+			".s0-A..C->:s2=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A )+ ;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
+	}
+
+	public void testAPlusNonGreedyWhenDeterministic() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (options {greedy=false;}:A)+ ;\n");
+		// should look the same as A+ since no ambiguity
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAorBorCPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A | B | C )+ ;");
+		String expecting =
+			".s0-A..C->:s2=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testAOptional() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A )? B ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
+	}
+
+	public void testAorBorCOptional() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A | B | C )? Z ;");
+		String expecting =
+			".s0-A..C->:s1=>1\n" +
+			".s0-Z->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
+	}
+
+	// A R B I T R A R Y  L O O K A H E A D
+
+	public void testAStarBOrAStarC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A)* B | (A)* C;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-B->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-C->:s1=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->.s1\n" +
+			".s0-B->:s2=>1\n" +
+			".s0-C->:s3=>2\n" +
+			".s1-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
+	}
+
+
+	public void testAStarBOrAPlusC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A)* B | (A)+ C;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-B->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-C->:s1=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->.s1\n" +
+			".s0-B->:s2=>1\n" +
+			".s1-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
+	}
+
+
+	public void testAOrBPlusOrAPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A|B)* X | (A)+ Y;");
+		String expecting =
+			".s0-A..B->:s2=>1\n" +
+			".s0-X->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback (A|B)*
+		expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-Y->:s1=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback (A)+
+		expecting =
+			".s0-A->.s1\n" +
+			".s0-B..X->:s2=>1\n" +
+			".s1-A->.s1\n" +
+			".s1-B..X->:s2=>1\n" +
+			".s1-Y->:s3=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule
+	}
+
+	public void testLoopbackAndExit() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A|B)+ B;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-B->.s1\n" +
+			".s1-A..B->:s2=>1\n" +
+			".s1-EOF->:s3=>2\n"; // sees A|B as a set
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testOptionalAltAndBypass() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A|B)? B;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-B->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-EOF->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	// R E S O L V E  S Y N  C O N F L I C T S
+
+	public void testResolveLL1ByChoosingFirst() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A C | A C;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-C->:s2=>1\n";
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A C";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testResolveLL2ByChoosingFirst() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A B | A B;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n";
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A B";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testResolveLL2MixAlt() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A B | A C | A B | Z;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-Z->:s4=>4\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
+		int[] unreachableAlts = new int[] {3};
+		int[] nonDetAlts = new int[] {1,3};
+		String ambigInput = "A B";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testIndirectIFThenElseStyleAmbig() throws Exception {
+		// the (c)+ loopback is ambig because it could match "CASE"
+		// by entering the loop or by falling out and ignoring (s)*
+		// back falling back into (cg)* loop which stats over and
+		// calls cg again.  Either choice allows it to get back to
+		// the same node.  The software catches it as:
+		// "avoid infinite closure computation emanating from alt 1
+		// of ():27|2|[8 $]" where state 27 is the first alt of (c)+
+		// and 8 is the first alt of the (cg)* loop.
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"s : stat ;\n" +
+			"stat : LCURLY ( cg )* RCURLY | E SEMI  ;\n" +
+			"cg : (c)+ (stat)* ;\n" +
+			"c : CASE E ;\n");
+		String expecting =
+			".s0-CASE->:s2=>1\n" +
+			".s0-LCURLY..E->:s1=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "CASE";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	// S E T S
+
+	public void testComplement() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ~(A | B | C) | C {;} ;\n" +
+			"b : X Y Z ;");
+		String expecting =
+			".s0-C->:s2=>2\n" +
+			".s0-X..Z->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testComplementToken() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ~C | C {;} ;\n" +
+			"b : X Y Z ;");
+		String expecting =
+			".s0-C->:s2=>2\n" +
+			".s0-X..Z->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testComplementChar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ~'x' | 'x' {;} ;\n");
+		String expecting =
+			".s0-'x'->:s2=>2\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFE'}->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testComplementCharSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ~(' '|'\t'|'x'|'y') | 'x';\n" + // collapse into single set
+			"B : 'y' ;");
+		String expecting =
+			".s0-'y'->:s2=>2\n" +
+			".s0-{'\\u0000'..'\\b', '\\n'..'\\u001F', '!'..'x', 'z'..'\\uFFFE'}->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testNoSetCollapseWithActions() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A | B {foo}) | C;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testRuleAltsSetCollapse() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A | B | C ;"
+		);
+		String expecting = // still looks like block
+			" ( grammar t ( rule a ARG RET scope ( BLOCK ( ALT A <end-of-alt> ) ( ALT B <end-of-alt> ) ( ALT C <end-of-alt> ) <end-of-block> ) <end-of-rule> ) )";
+		assertEquals(expecting, g.getGrammarTree().toStringTree());
+	}
+
+	public void testTokensRuleAltsDoNotCollapse() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';" +
+			"B : 'b';\n"
+		);
+		String expecting =
+			".s0-'a'->:s1=>1\n" +
+			".s0-'b'->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	public void testMultipleSequenceCollision() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"a : (A{;}|B)\n" +
+			"  | (A{;}|B)\n" +
+			"  | A\n" +
+			"  ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>1\n"; // not optimized because states are nondet
+		int[] unreachableAlts = new int[] {2,3};
+		int[] nonDetAlts = new int[] {1,2,3};
+		String ambigInput = "A";
+		int[] danglingAlts = null;
+		int numWarnings = 3;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		/* There are 2 nondet errors, but the checkDecision only checks first one :(
+		The "B" conflicting input is not checked except by virtue of the
+		result DFA.
+<string>:2:5: Decision can match input such as "A" using multiple alternatives:
+alt 1 via NFA path 7,2,3
+alt 2 via NFA path 14,9,10
+alt 3 via NFA path 16,17
+As a result, alternative(s) 2,3 were disabled for that input,
+<string>:2:5: Decision can match input such as "B" using multiple alternatives:
+alt 1 via NFA path 7,8,4,5
+alt 2 via NFA path 14,15,11,12
+As a result, alternative(s) 2 were disabled for that input
+<string>:2:5: The following alternatives are unreachable: 2,3
+*/
+	}
+
+	public void testMultipleAltsSameSequenceCollision() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"a : type ID \n" +
+			"  | type ID\n" +
+			"  | type ID\n" +
+			"  | type ID\n" +
+			"  ;\n" +
+			"\n" +
+			"type : I | F;");
+		// nondeterministic from left edge; no stop state
+		String expecting =
+			".s0-I..F->.s1\n" +
+			".s1-ID->:s2=>1\n";
+		int[] unreachableAlts = new int[] {2,3,4};
+		int[] nonDetAlts = new int[] {1,2,3,4};
+		String ambigInput = "I..F ID";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testFollowReturnsToLoopReenteringSameRule() throws Exception {
+		// D07 can be matched in the (...)? or fall out of esc back into (..)*
+		// loop in sl.  Note that D07 is matched by ~(R|SLASH).  No good
+		// way to write that grammar I guess
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"sl : L ( esc | ~(R|SLASH) )* R ;\n" +
+			"\n" +
+			"esc : SLASH ( N | D03 (D07)? ) ;");
+		String expecting =
+			".s0-R->:s1=>3\n" +
+			".s0-SLASH->:s2=>1\n" +
+			".s0-{L, N..D07}->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "D07";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testTokenCallsAnotherOnLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"F   :   I '.'\n" +
+			"    ;\n" +
+			"I   :   '0'\n" +
+			"    ;\n"
+		);
+		String expecting =
+			".s0-'0'->.s1\n" +
+			".s1-'.'->:s3=>1\n" +
+			".s1-<EOT>->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+
+	public void testSelfRecursionAmbigAlts() throws Exception {
+		// ambiguous grammar for "L ID R" (alts 1,2 of a)
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a;\n" +
+			"a   :   L ID R\n" +
+			"    |   L a R\n" + // disabled for L ID R
+			"    |   b\n" +
+			"    ;\n" +
+			"\n" +
+			"b   :   ID\n" +
+			"    ;\n");
+		String expecting =
+			".s0-ID->:s5=>3\n" +
+			".s0-L->.s1\n" +
+			".s1-ID->.s2\n" +
+			".s1-L->:s4=>2\n" +
+			".s2-R->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "L ID R";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testIndirectRecursionAmbigAlts() throws Exception {
+		// ambiguous grammar for "L ID R" (alts 1,2 of a)
+		// This was derived from the java grammar 12/4/2004 when it
+		// was not handling a unaryExpression properly.  I traced it
+		// to incorrect closure-busy condition.  It thought that the trace
+		// of a->b->a->b again for "L ID" was an infinite loop, but actually
+		// the repeat call to b only happens *after* an L has been matched.
+		// I added a check to see what the initial stack looks like and it
+		// seems to work now.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s   :   a ;\n" +
+			"a   :   L ID R\n" +
+			"    |   b\n" +
+			"    ;\n" +
+			"\n" +
+			"b   :   ID\n" +
+			"    |   L a R\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->:s4=>2\n" +
+			".s0-L->.s1\n" +
+			".s1-ID->.s2\n" +
+			".s1-L->:s4=>2\n" +
+			".s2-R->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "L ID R";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testTailRecursionInvokedFromArbitraryLookaheadDecision() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b X\n" +
+			"  | b Y\n" +
+			"  ;\n" +
+			"\n" +
+			"b : A\n" +
+			"  | A b\n" +
+			"  ;\n");
+		String expecting =
+			".s0-A->.s1\n" +
+				".s1-Y->:s3=>2\n" +
+				".s1-{X, A}->:s2=>1\n";
+		int[] unreachableAlts = new int[] {1,2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testWildcardStarK1AndNonGreedyByDefaultInParser() throws Exception {
+		// no error because .* assumes it should finish when it sees R
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"s : A block EOF ;\n" +
+			"block : L .* R ;");
+		String expecting =
+			".s0-A..L->:s2=>1\n" +
+			".s0-R->:s1=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	public void testWildcardPlusK1AndNonGreedyByDefaultInParser() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"s : A block EOF ;\n" +
+			"block : L .+ R ;");
+		String expecting =
+			".s0-A..L->:s2=>1\n" +
+			".s0-R->:s1=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	// Check state table creation
+
+	public void testCyclicTableCreation() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A+ X | A+ Y ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+	}
+
+
+	// S U P P O R T
+
+	public void _template() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A | B;");
+		String expecting =
+			"\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	protected void checkDecision(Grammar g,
+								 int decision,
+								 String expecting,
+								 int[] expectingUnreachableAlts,
+								 int[] expectingNonDetAlts,
+								 String expectingAmbigInput,
+								 int[] expectingDanglingAlts,
+								 int expectingNumWarnings)
+		throws Exception
+	{
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.createNFAs();
+			g.createLookaheadDFAs();
+		}
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+
+		if ( equeue.size()!=expectingNumWarnings ) {
+			System.err.println("Warnings issued: "+equeue);
+		}
+
+		assertEquals("unexpected number of expected problems",
+				   expectingNumWarnings, equeue.size());
+
+		DFA dfa = g.getLookaheadDFA(decision);
+		assertNotNull("no DFA for decision "+decision, dfa);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+
+		List unreachableAlts = dfa.getUnreachableAlts();
+
+		// make sure unreachable alts are as expected
+		if ( expectingUnreachableAlts!=null ) {
+			BitSet s = new BitSet();
+			s.addAll(expectingUnreachableAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(unreachableAlts);
+			assertEquals("unreachable alts mismatch", s, s2);
+		}
+		else {
+			assertEquals("number of unreachable alts", 0, unreachableAlts.size());
+		}
+
+		// check conflicting input
+		if ( expectingAmbigInput!=null ) {
+			// first, find nondet message
+			Message msg = (Message)equeue.warnings.get(0);
+			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
+					    msg instanceof GrammarNonDeterminismMessage);
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			List labels =
+				nondetMsg.probe.getSampleNonDeterministicInputSequence(nondetMsg.problemState);
+			String input = nondetMsg.probe.getInputSequenceDisplay(labels);
+			assertEquals(expectingAmbigInput, input);
+		}
+
+		// check nondet alts
+		if ( expectingNonDetAlts!=null ) {
+			RecursionOverflowMessage recMsg = null;
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			List nonDetAlts = null;
+			if ( nondetMsg!=null ) {
+				nonDetAlts =
+					nondetMsg.probe.getNonDeterministicAltsForState(nondetMsg.problemState);
+			}
+			else {
+				recMsg = getRecursionOverflowMessage(equeue.warnings);
+				if ( recMsg!=null ) {
+					//nonDetAlts = new ArrayList(recMsg.alts);
+				}
+			}
+			// compare nonDetAlts with expectingNonDetAlts
+			BitSet s = new BitSet();
+			s.addAll(expectingNonDetAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(nonDetAlts);
+			assertEquals("nondet alts mismatch", s, s2);
+			assertTrue("found no nondet alts; expecting: "+
+					    str(expectingNonDetAlts),
+					    nondetMsg!=null||recMsg!=null);
+		}
+		else {
+			// not expecting any nondet alts, make sure there are none
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			assertNull("found nondet alts, but expecting none", nondetMsg);
+		}
+
+		assertEquals(expecting, result);
+	}
+
+	protected GrammarNonDeterminismMessage getNonDeterminismMessage(List warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = (Message) warnings.get(i);
+			if ( m instanceof GrammarNonDeterminismMessage ) {
+				return (GrammarNonDeterminismMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected RecursionOverflowMessage getRecursionOverflowMessage(List warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = (Message) warnings.get(i);
+			if ( m instanceof RecursionOverflowMessage ) {
+				return (RecursionOverflowMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected LeftRecursionCyclesMessage getLeftRecursionCyclesMessage(List warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = (Message) warnings.get(i);
+			if ( m instanceof LeftRecursionCyclesMessage ) {
+				return (LeftRecursionCyclesMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected GrammarDanglingStateMessage getDanglingStateMessage(List warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = (Message) warnings.get(i);
+			if ( m instanceof GrammarDanglingStateMessage ) {
+				return (GrammarDanglingStateMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected String str(int[] elements) {
+		StringBuffer buf = new StringBuffer();
+		for (int i = 0; i < elements.length; i++) {
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			int element = elements[i];
+			buf.append(element);
+		}
+		return buf.toString();
+	}
+
+}
diff --git a/src/org/antlr/test/TestDFAMatching.java b/src/org/antlr/test/TestDFAMatching.java
new file mode 100644
index 0000000..b340472
--- /dev/null
+++ b/src/org/antlr/test/TestDFAMatching.java
@@ -0,0 +1,101 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.NFA;
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.tool.Grammar;
+
+public class TestDFAMatching extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestDFAMatching() {
+    }
+
+    public void testSimpleAltCharTest() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : {;}'a' | 'b' | 'c';");
+		g.createNFAs();
+		g.createLookaheadDFAs();
+        DFA dfa = g.getLookaheadDFA(1);
+        checkPrediction(dfa,"a",1);
+        checkPrediction(dfa,"b",2);
+        checkPrediction(dfa,"c",3);
+        checkPrediction(dfa,"d", NFA.INVALID_ALT_NUMBER);
+    }
+
+    public void testSets() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : {;}'a'..'z' | ';' | '0'..'9' ;");
+		g.createNFAs();
+        g.createLookaheadDFAs();
+        DFA dfa = g.getLookaheadDFA(1);
+        checkPrediction(dfa,"a",1);
+        checkPrediction(dfa,"q",1);
+        checkPrediction(dfa,"z",1);
+        checkPrediction(dfa,";",2);
+        checkPrediction(dfa,"9",3);
+    }
+
+    public void testFiniteCommonLeftPrefixes() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : 'a' 'b' | 'a' 'c' | 'd' 'e' ;");
+		g.createNFAs();
+        g.createLookaheadDFAs();
+        DFA dfa = g.getLookaheadDFA(1);
+        checkPrediction(dfa,"ab",1);
+        checkPrediction(dfa,"ac",2);
+        checkPrediction(dfa,"de",3);
+        checkPrediction(dfa,"q", NFA.INVALID_ALT_NUMBER);
+    }
+
+    public void testSimpleLoops() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : (DIGIT)+ '.' DIGIT | (DIGIT)+ ;\n" +
+                "fragment DIGIT : '0'..'9' ;\n");
+		g.createNFAs();
+        g.createLookaheadDFAs();
+        DFA dfa = g.getLookaheadDFA(3);
+        checkPrediction(dfa,"32",2);
+        checkPrediction(dfa,"999.2",1);
+        checkPrediction(dfa,".2", NFA.INVALID_ALT_NUMBER);
+    }
+
+    protected void checkPrediction(DFA dfa, String input, int expected)
+        throws Exception
+    {
+        ANTLRStringStream stream = new ANTLRStringStream(input);
+        assertEquals(dfa.predict(stream), expected);
+    }
+
+}
diff --git a/src/org/antlr/test/TestInterpretedLexing.java b/src/org/antlr/test/TestInterpretedLexing.java
new file mode 100644
index 0000000..376d7b2
--- /dev/null
+++ b/src/org/antlr/test/TestInterpretedLexing.java
@@ -0,0 +1,175 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+import org.antlr.runtime.*;
+
+public class TestInterpretedLexing extends BaseTest {
+
+	/*
+	static class Tracer implements ANTLRDebugInterface {
+		Grammar g;
+		public DebugActions(Grammar g) {
+			this.g = g;
+		}
+		public void enterRule(String ruleName) {
+			System.out.println("enterRule("+ruleName+")");
+		}
+
+		public void exitRule(String ruleName) {
+			System.out.println("exitRule("+ruleName+")");
+		}
+
+		public void matchElement(int type) {
+			System.out.println("matchElement("+g.getTokenName(type)+")");
+		}
+
+		public void mismatchedElement(MismatchedTokenException e) {
+			System.out.println(e);
+			e.printStackTrace(System.out);
+		}
+
+		public void mismatchedSet(MismatchedSetException e) {
+			System.out.println(e);
+			e.printStackTrace(System.out);
+		}
+
+		public void noViableAlt(NoViableAltException e) {
+			System.out.println(e);
+			e.printStackTrace(System.out);
+		}
+	}
+    */
+
+    /** Public default constructor used by TestRig */
+    public TestInterpretedLexing() {
+    }
+
+	public void testSimpleAltCharTest() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : 'a' | 'b' | 'c';");
+		final int Atype = g.getTokenType("A");
+        Interpreter engine = new Interpreter(g, new ANTLRStringStream("a"));
+        engine = new Interpreter(g, new ANTLRStringStream("b"));
+		Token result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("c"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+    }
+
+    public void testSingleRuleRef() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : 'a' B 'c' ;\n" +
+                "B : 'b' ;\n");
+		final int Atype = g.getTokenType("A");
+		Interpreter engine = new Interpreter(g, new ANTLRStringStream("abc")); // should ignore the x
+		Token result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+    }
+
+    public void testSimpleLoop() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "INT : (DIGIT)+ ;\n"+
+				"fragment DIGIT : '0'..'9';\n");
+		final int INTtype = g.getTokenType("INT");
+		Interpreter engine = new Interpreter(g, new ANTLRStringStream("12x")); // should ignore the x
+		Token result = engine.scan("INT");
+		assertEquals(result.getType(), INTtype);
+		engine = new Interpreter(g, new ANTLRStringStream("1234"));
+		result = engine.scan("INT");
+		assertEquals(result.getType(), INTtype);
+    }
+
+    public void testMultAltLoop() throws Exception {
+		Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : ('0'..'9'|'a'|'b')+ ;\n");
+		final int Atype = g.getTokenType("A");
+		Interpreter engine = new Interpreter(g, new ANTLRStringStream("a"));
+		Token result = engine.scan("A");
+        engine = new Interpreter(g, new ANTLRStringStream("a"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+		engine = new Interpreter(g, new ANTLRStringStream("1234"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("aaa"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("aaaa9"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("b"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("baa"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+    }
+
+	public void testSimpleLoops() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"A : ('0'..'9')+ '.' ('0'..'9')* | ('0'..'9')+ ;\n");
+		final int Atype = g.getTokenType("A");
+		CharStream input = new ANTLRStringStream("1234.5");
+		Interpreter engine = new Interpreter(g, input);
+		Token result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+	}
+
+	public void testTokensRules() throws Exception {
+		Grammar pg = new Grammar(
+			"grammar p;\n"+
+			"a : (INT|FLOAT|WS)+;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName("<string>");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ {channel=99;};\n");
+		CharStream input = new ANTLRStringStream("123 139.52");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+		String result = tokens.toString();
+		//System.out.println(result);
+		String expecting = "123 139.52";
+		assertEquals(result,expecting);
+	}
+
+}
diff --git a/src/org/antlr/test/TestInterpretedParsing.java b/src/org/antlr/test/TestInterpretedParsing.java
new file mode 100644
index 0000000..adb56f8
--- /dev/null
+++ b/src/org/antlr/test/TestInterpretedParsing.java
@@ -0,0 +1,181 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.ParseTree;
+
+public class TestInterpretedParsing extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestInterpretedParsing() {
+    }
+
+	public void testSimpleParse() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY EOF;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName(Grammar.IGNORE_STRING_IN_GRAMMAR_FILE_NAME +"string");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=1; y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr 1) ;) (assign y = (expr 3.42) ;) (assign z = (expr y) ;) } <EOF>))";
+		assertEquals(expecting, result);
+	}
+
+	public void testMismatchedTokenError() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.setFileName(Grammar.IGNORE_STRING_IN_GRAMMAR_FILE_NAME +"string");
+		g.importTokenVocabulary(pg);
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=1 y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr 1) MismatchedTokenException(5!=9))))";
+		assertEquals(expecting, result);
+	}
+
+	public void testMismatchedSetError() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName("<string>");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=; y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr MismatchedSetException(9!={5,10,11})))))";
+		assertEquals(expecting, result);
+	}
+
+	public void testNoViableAltError() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : {;}INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName("<string>");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=; y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr NoViableAltException(9!=[4:1: expr : ( INT | FLOAT | ID );])))))";
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/src/org/antlr/test/TestIntervalSet.java b/src/org/antlr/test/TestIntervalSet.java
new file mode 100644
index 0000000..4f5326b
--- /dev/null
+++ b/src/org/antlr/test/TestIntervalSet.java
@@ -0,0 +1,389 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.analysis.Label;
+import org.antlr.misc.IntervalSet;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestIntervalSet extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestIntervalSet() {
+    }
+
+    public void testSingleElement() throws Exception {
+        IntervalSet s = IntervalSet.of(99);
+        String expecting = "99";
+        assertEquals(s.toString(), expecting);
+    }
+
+    public void testIsolatedElements() throws Exception {
+        IntervalSet s = new IntervalSet();
+        s.add(1);
+        s.add('z');
+        s.add('\uFFF0');
+        String expecting = "{1, 122, 65520}";
+        assertEquals(s.toString(), expecting);
+    }
+
+    public void testMixedRangesAndElements() throws Exception {
+        IntervalSet s = new IntervalSet();
+        s.add(1);
+        s.add('a','z');
+        s.add('0','9');
+        String expecting = "{1, 48..57, 97..122}";
+        assertEquals(s.toString(), expecting);
+    }
+
+    public void testSimpleAnd() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(13,15);
+        String expecting = "13..15";
+        String result = (s.and(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testRangeAndIsolatedElement() throws Exception {
+        IntervalSet s = IntervalSet.of('a','z');
+        IntervalSet s2 = IntervalSet.of('d');
+        String expecting = "100";
+        String result = (s.and(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+	public void testEmptyIntersection() throws Exception {
+		IntervalSet s = IntervalSet.of('a','z');
+		IntervalSet s2 = IntervalSet.of('0','9');
+		String expecting = "{}";
+		String result = (s.and(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+	public void testEmptyIntersectionSingleElements() throws Exception {
+		IntervalSet s = IntervalSet.of('a');
+		IntervalSet s2 = IntervalSet.of('d');
+		String expecting = "{}";
+		String result = (s.and(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+    public void testNotSingleElement() throws Exception {
+        IntervalSet vocabulary = IntervalSet.of(1,1000);
+        vocabulary.add(2000,3000);
+        IntervalSet s = IntervalSet.of(50,50);
+        String expecting = "{1..49, 51..1000, 2000..3000}";
+        String result = (s.complement(vocabulary)).toString();
+        assertEquals(result, expecting);
+    }
+
+	public void testNotSet() throws Exception {
+		IntervalSet vocabulary = IntervalSet.of(1,1000);
+		IntervalSet s = IntervalSet.of(50,60);
+		s.add(5);
+		s.add(250,300);
+		String expecting = "{1..4, 6..49, 61..249, 301..1000}";
+		String result = (s.complement(vocabulary)).toString();
+		assertEquals(result, expecting);
+	}
+
+	public void testNotEqualSet() throws Exception {
+		IntervalSet vocabulary = IntervalSet.of(1,1000);
+		IntervalSet s = IntervalSet.of(1,1000);
+		String expecting = "{}";
+		String result = (s.complement(vocabulary)).toString();
+		assertEquals(result, expecting);
+	}
+
+	public void testNotSetEdgeElement() throws Exception {
+		IntervalSet vocabulary = IntervalSet.of(1,2);
+		IntervalSet s = IntervalSet.of(1);
+		String expecting = "2";
+		String result = (s.complement(vocabulary)).toString();
+		assertEquals(result, expecting);
+	}
+
+    public void testNotSetFragmentedVocabulary() throws Exception {
+        IntervalSet vocabulary = IntervalSet.of(1,255);
+        vocabulary.add(1000,2000);
+        vocabulary.add(9999);
+        IntervalSet s = IntervalSet.of(50,60);
+        s.add(3);
+        s.add(250,300);
+        s.add(10000); // this is outside range of vocab and should be ignored
+        String expecting = "{1..2, 4..49, 61..249, 1000..2000, 9999}";
+        String result = (s.complement(vocabulary)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testSubtractOfCompletelyContainedRange() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(12,15);
+        String expecting = "{10..11, 16..20}";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testSubtractOfOverlappingRangeFromLeft() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(5,11);
+        String expecting = "12..20";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(5,10);
+        expecting = "11..20";
+        result = (s.subtract(s3)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testSubtractOfOverlappingRangeFromRight() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(15,25);
+        String expecting = "10..14";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(20,25);
+        expecting = "10..19";
+        result = (s.subtract(s3)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testSubtractOfCompletelyCoveredRange() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(1,25);
+        String expecting = "{}";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testSubtractOfRangeSpanningMultipleRanges() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        s.add(30,40);
+        s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60
+        IntervalSet s2 = IntervalSet.of(5,55); // covers one and touches 2nd range
+        String expecting = "56..60";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(15,55); // touches both
+        expecting = "{10..14, 56..60}";
+        result = (s.subtract(s3)).toString();
+        assertEquals(result, expecting);
+    }
+
+	/** The following was broken:
+	 	{0..113, 115..65534}-{0..115, 117..65534}=116..65534
+	 */
+	public void testSubtractOfWackyRange() throws Exception {
+		IntervalSet s = IntervalSet.of(0,113);
+		s.add(115,200);
+		IntervalSet s2 = IntervalSet.of(0,115);
+		s2.add(117,200);
+		String expecting = "116";
+		String result = (s.subtract(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+    public void testSimpleEquals() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(10,20);
+        Boolean expecting = new Boolean(true);
+        Boolean result = new Boolean(s.equals(s2));
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(15,55);
+        expecting = new Boolean(false);
+        result = new Boolean(s.equals(s3));
+        assertEquals(result, expecting);
+    }
+
+    public void testEquals() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        s.add(2);
+        s.add(499,501);
+        IntervalSet s2 = IntervalSet.of(10,20);
+        s2.add(2);
+        s2.add(499,501);
+        Boolean expecting = new Boolean(true);
+        Boolean result = new Boolean(s.equals(s2));
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(10,20);
+        s3.add(2);
+        expecting = new Boolean(false);
+        result = new Boolean(s.equals(s3));
+        assertEquals(result, expecting);
+    }
+
+    public void testSingleElementMinusDisjointSet() throws Exception {
+        IntervalSet s = IntervalSet.of(15,15);
+        IntervalSet s2 = IntervalSet.of(1,5);
+        s2.add(10,20);
+        String expecting = "{}"; // 15 - {1..5, 10..20} = {}
+        String result = s.subtract(s2).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testMembership() throws Exception {
+        IntervalSet s = IntervalSet.of(15,15);
+        s.add(50,60);
+        assertTrue(!s.member(0));
+        assertTrue(!s.member(20));
+        assertTrue(!s.member(100));
+        assertTrue(s.member(15));
+        assertTrue(s.member(55));
+        assertTrue(s.member(50));
+        assertTrue(s.member(60));
+    }
+
+    // {2,15,18} & 10..20
+    public void testIntersectionWithTwoContainedElements() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(2,2);
+        s2.add(15);
+        s2.add(18);
+        String expecting = "{15, 18}";
+        String result = (s.and(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testIntersectionWithTwoContainedElementsReversed() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(2,2);
+        s2.add(15);
+        s2.add(18);
+        String expecting = "{15, 18}";
+        String result = (s2.and(s)).toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testComplement() throws Exception {
+        IntervalSet s = IntervalSet.of(100,100);
+        s.add(101,101);
+        IntervalSet s2 = IntervalSet.of(100,102);
+        String expecting = "102";
+        String result = (s.complement(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+	public void testComplement2() throws Exception {
+		IntervalSet s = IntervalSet.of(100,101);
+		IntervalSet s2 = IntervalSet.of(100,102);
+		String expecting = "102";
+		String result = (s.complement(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+	public void testComplement3() throws Exception {
+		IntervalSet s = IntervalSet.of(1,96);
+		s.add(99,65534);
+		String expecting = "97..98";
+		String result = (s.complement(1,Label.MAX_CHAR_VALUE)).toString();
+		assertEquals(result, expecting);
+	}
+
+    public void testMergeOfRangesAndSingleValues() throws Exception {
+        // {0..41, 42, 43..65534}
+        IntervalSet s = IntervalSet.of(0,41);
+        s.add(42);
+        s.add(43,65534);
+        String expecting = "0..65534";
+        String result = s.toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testMergeOfRangesAndSingleValuesReverse() throws Exception {
+        IntervalSet s = IntervalSet.of(43,65534);
+        s.add(42);
+        s.add(0,41);
+        String expecting = "0..65534";
+        String result = s.toString();
+        assertEquals(result, expecting);
+    }
+
+    public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception {
+        // 42, 10, {0..9, 11..41, 43..65534}
+        IntervalSet s = IntervalSet.of(42);
+        s.add(10);
+        s.add(0,9);
+        s.add(43,65534);
+        s.add(11,41);
+        String expecting = "0..65534";
+        String result = s.toString();
+        assertEquals(result, expecting);
+    }
+
+	public void testMergeWithDoubleOverlap() throws Exception {
+		IntervalSet s = IntervalSet.of(1,10);
+		s.add(20,30);
+		s.add(5,25); // overlaps two!
+		String expecting = "1..30";
+		String result = s.toString();
+		assertEquals(result, expecting);
+	}
+
+	public void testSize() throws Exception {
+		IntervalSet s = IntervalSet.of(20,30);
+		s.add(50,55);
+		s.add(5,19);
+		String expecting = "32";
+		String result = String.valueOf(s.size());
+		assertEquals(result, expecting);
+	}
+
+	public void testToList() throws Exception {
+		IntervalSet s = IntervalSet.of(20,25);
+		s.add(50,55);
+		s.add(5,5);
+		String expecting = "[5, 20, 21, 22, 23, 24, 25, 50, 51, 52, 53, 54, 55]";
+		List foo = new ArrayList();
+		String result = String.valueOf(s.toList());
+		assertEquals(result, expecting);
+	}
+
+	/** The following was broken:
+	    {'\u0000'..'s', 'u'..'\uFFFE'} & {'\u0000'..'q', 's'..'\uFFFE'}=
+	    {'\u0000'..'q', 's'}!!!! broken...
+	 	'q' is 113 ascii
+	 	'u' is 117
+	*/
+	public void testNotRIntersectionNotT() throws Exception {
+		IntervalSet s = IntervalSet.of(0,'s');
+		s.add('u',200);
+		IntervalSet s2 = IntervalSet.of(0,'q');
+		s2.add('s',200);
+		String expecting = "{0..113, 115, 117..200}";
+		String result = (s.and(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+}
diff --git a/src/org/antlr/test/TestJavaCodeGeneration.java b/src/org/antlr/test/TestJavaCodeGeneration.java
new file mode 100644
index 0000000..64a23a3
--- /dev/null
+++ b/src/org/antlr/test/TestJavaCodeGeneration.java
@@ -0,0 +1,124 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import junit.framework.TestCase;
+
+/** General code generation testing; compilation and/or execution.
+ *  These tests are more about avoiding duplicate var definitions
+ *  etc... than testing a particular ANTLR feature.
+ */
+public class TestJavaCodeGeneration extends BaseTest {
+	public void testDupVarDefForPinchedState() {
+		// so->s2 and s0->s3->s1 pinches back to s1
+		// LA3_1, s1 state for DFA 3, was defined twice in similar scope
+		// just wrapped in curlies and it's cool.
+		String grammar =
+			"grammar t;\n" +
+			"a : (| A | B) X Y\n" +
+			"  | (| A | B) X Z\n" +
+			"  ;\n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"t.g", grammar, "tParser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	public void testLabeledNotSetsInLexer() {
+		// d must be an int
+		String grammar =
+			"lexer grammar t;\n" +
+			"A : d=~('x'|'y') e='0'..'9'\n" +
+			"  ; \n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"t.g", grammar, null, "tLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	public void testLabeledSetsInLexer() {
+		// d must be an int
+		String grammar =
+			"grammar T;\n" +
+			"a : A ;\n" +
+			"A : d=('x'|'y') {System.out.println((char)$d);}\n" +
+			"  ; \n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", false);
+		assertEquals("x\n", found);
+	}
+
+	public void testLabeledRangeInLexer() {
+		// d must be an int
+		String grammar =
+			"grammar T;\n" +
+			"a : A;\n" +
+			"A : d='a'..'z' {System.out.println((char)$d);} \n" +
+			"  ; \n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", false);
+		assertEquals("x\n", found);
+	}
+
+	public void testLabeledWildcardInLexer() {
+		// d must be an int
+		String grammar =
+			"grammar T;\n" +
+			"a : A;\n" +
+			"A : d=. {System.out.println((char)$d);}\n" +
+			"  ; \n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", false);
+		assertEquals("x\n", found);
+	}
+
+	public void testSynpredWithPlusLoop() {
+		String grammar =
+			"grammar T; \n" +
+			"a : (('x'+)=> 'x'+)?;\n";
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, "TParser", "TLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	public void testDoubleQuoteEscape() {
+		String grammar =
+			"lexer grammar T; \n" +
+			"A : '\\\\\"';\n"; // this is A : '\\"';
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "TLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestLexer.java b/src/org/antlr/test/TestLexer.java
new file mode 100644
index 0000000..3889140
--- /dev/null
+++ b/src/org/antlr/test/TestLexer.java
@@ -0,0 +1,199 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+public class TestLexer extends BaseTest {
+	protected boolean debug = false;
+
+	/** Public default constructor used by TestRig */
+	public TestLexer() {
+	}
+
+	public void testSetText() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A {System.out.println(input);} ;\n"+
+			"A : '\\\\' 't' {setText(\"\t\");} ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "\\t", debug);
+		assertEquals("\t\n", found);
+	}
+
+	public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF {System.out.println(input);} ;\n"+
+			"A : '-' I ;\n" +
+			"I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "-34", debug);
+		assertEquals("-34\n", found);
+	}
+
+	public void testWeCanSetType() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"tokens {X;}\n" +
+			"a : X EOF {System.out.println(input);} ;\n"+
+			"A : '-' I {$type = X;} ;\n" +
+			"I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "-34", debug);
+		assertEquals("-34\n", found);
+	}
+
+	public void testRefToFragment() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A {System.out.println(input);} ;\n"+
+			"A : '-' I ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "-34", debug);
+		assertEquals("-34\n", found);
+	}
+
+	public void testMultipleRefToFragment() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF {System.out.println(input);} ;\n"+
+			"A : I '.' I ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "3.14159", debug);
+		assertEquals("3.14159\n", found);
+	}
+
+	public void testLabelInSubrule() throws Exception {
+		// can we see v outside?
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF ;\n"+
+			"A : 'hi' WS (v=I)? {$channel=0; System.out.println($v.text);} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "hi 342", debug);
+		assertEquals("342\n", found);
+	}
+
+	public void testRefToTokenInLexer() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF ;\n"+
+			"A : I {System.out.println($I.text);} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "342", debug);
+		assertEquals("342\n", found);
+	}
+
+	public void testListLabelInLexer() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"a : A ;\n"+
+			"A : i+=I+ {for (Object t : $i) System.out.print(\" \"+((Token)t).getText());} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "33 297", debug);
+		assertEquals(" 33 297\n", found);
+	}
+
+	public void testDupListRefInLexer() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"a : A ;\n"+
+			"A : i+=I WS i+=I {$channel=0; for (Object t : $i) System.out.print(\" \"+((Token)t).getText());} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "33 297", debug);
+		assertEquals(" 33 297\n", found);
+	}
+
+	public void testCharLabelInLexer() {
+		String grammar =
+			"grammar T;\n" +
+			"a : B ;\n" +
+			"B : x='a' {System.out.println((char)$x);} ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	public void testRepeatedLabelInLexer() {
+		String grammar =
+			"lexer grammar t;\n" +
+			"B : x='a' x='b' ;\n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"t.g", grammar, null, "tLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	public void testRepeatedRuleLabelInLexer() {
+		String grammar =
+			"lexer grammar t;\n" +
+			"B : x=A x=A ;\n" +
+			"fragment A : 'a' ;\n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"t.g", grammar, null, "tLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	public void testIsolatedEOTEdge() {
+		String grammar =
+			"lexer grammar T;\n" +
+			"QUOTED_CONTENT \n" +
+			"        : 'q' (~'q')* (('x' 'q') )* 'q' ; \n";
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "TLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}	
+
+}
diff --git a/src/org/antlr/test/TestMessages.java b/src/org/antlr/test/TestMessages.java
new file mode 100644
index 0000000..e527b04
--- /dev/null
+++ b/src/org/antlr/test/TestMessages.java
@@ -0,0 +1,46 @@
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.codegen.ActionTranslatorLexer;
+import org.antlr.tool.*;
+
+
+public class TestMessages extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestMessages() {
+	}
+
+
+	public void testMessageStringificationIsConsistent() throws Exception {
+		String action = "$other.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"options { output = AST;}" +
+			"otherrule\n" +
+			"    : 'y' ;" +
+			"rule\n" +
+			"    : other=otherrule {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	"rule",
+																	new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "other";
+		Object expectedArg2 = "tree";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		String expectedMessageString = expectedMessage.toString();
+		assertEquals(expectedMessageString, expectedMessage.toString());
+	}
+}
diff --git a/src/org/antlr/test/TestNFAConstruction.java b/src/org/antlr/test/TestNFAConstruction.java
new file mode 100644
index 0000000..5d8f103
--- /dev/null
+++ b/src/org/antlr/test/TestNFAConstruction.java
@@ -0,0 +1,1192 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.analysis.State;
+import org.antlr.tool.FASerializer;
+import org.antlr.tool.Grammar;
+
+public class TestNFAConstruction extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestNFAConstruction() {
+    }
+
+    public void testA() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+				"a : A;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testAB() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : A B ;");
+        String expecting =
+                ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A->.s3\n" +
+			".s3-B->.s4\n" +
+			".s4->:s5\n" +
+			":s5-EOF->.s6\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testAorB() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : A | B {;} ;");
+        /* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
+                                |                            ^
+                               (6)--Ep-->(7)--B-->(8)--------|
+         */
+        String expecting =
+                ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s7\n" +
+			".s2-A->.s3\n" +
+			".s3->.s4\n" +
+			".s4->:s5\n" +
+			".s7->.s8\n" +
+			".s8-B->.s9\n" +
+			".s9->.s4\n" +
+			":s5-EOF->.s6\n";
+        checkRule(g, "a", expecting);
+    }
+
+	public void testRangeOrRange() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"A : ('a'..'c' 'h' | 'q' 'j'..'l') ;"
+		);
+        String expecting =
+                ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10-'q'->.s11\n" +
+			".s11-'j'..'l'->.s12\n" +
+			".s12->.s6\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3-'a'..'c'->.s4\n" +
+			".s4-'h'->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			":s7-<EOT>->.s8\n";
+        checkRule(g, "A", expecting);
+	}
+
+	public void testRange() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"A : 'a'..'c' ;"
+		);
+        String expecting =
+                ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a'..'c'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+        checkRule(g, "A", expecting);
+	}
+
+	public void testCharSetInParser() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : A|'b' ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A..'b'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testABorCD() throws Exception {
+			Grammar g = new Grammar(
+					"parser grammar P;\n"+
+					"a : A B | C D;");
+        String expecting =
+                ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s8\n" +
+			".s10-D->.s11\n" +
+			".s11->.s5\n" +
+			".s2-A->.s3\n" +
+			".s3-B->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s9\n" +
+			".s9-C->.s10\n" +
+			":s6-EOF->.s7\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testbA() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : b A ;\n"+
+                "b : B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s5\n" +
+			".s5-B->.s6\n" +
+			".s6->:s7\n" +
+			".s8-A->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n" +
+			":s7->.s8\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testbA_bC() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : b A ;\n"+
+                "b : B ;\n"+
+                "c : b C;");
+        String expecting =
+                ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s12->.s13\n" +
+			".s13-C->.s14\n" +
+			".s14->:s15\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s5\n" +
+			".s5-B->.s6\n" +
+			".s6->:s7\n" +
+			".s8-A->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n" +
+			":s15-EOF->.s16\n" +
+			":s7->.s12\n" +
+			":s7->.s8\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testAorEpsilon() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : A | ;");
+        /* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
+                                |                            ^
+                               (6)--Ep-->(7)--Ep-->(8)-------|
+         */
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s7\n" +
+			".s2-A->.s3\n" +
+			".s3->.s4\n" +
+			".s4->:s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s4\n" +
+			":s5-EOF->.s6\n";
+        checkRule(g, "a", expecting);
+    }
+
+	public void testAOptional() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (A)?;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testNakedAoptional() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : A?;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+    public void testAorBthenC() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : (A | B) C;");
+        /* expecting
+
+        (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5)--C-->(6)--Ep-->(7,end)
+                   |                            ^
+                  (8)--Ep-->(9)--B-->(10)-------|
+         */
+    }
+
+	public void testAplus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (A)+;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testNakedAplus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : A+;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAplusNonGreedy() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"A : (options {greedy=false;}:'0'..'9')+ ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-'0'..'9'->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+	}
+
+    public void testAorBplus() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : (A | B{action})+ ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testAorBorEmptyPlus() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : (A | B | )+ ;");
+        String expecting =
+            ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s10->.s13\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s13->.s14\n" +
+			".s14->.s15\n" +
+			".s15->.s6\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+        checkRule(g, "a", expecting);
+    }
+
+	public void testAStar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (A)*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testNestedAstar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (A*)*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->:s11\n" +
+			".s13->.s8\n" +
+			".s14->.s10\n" +
+			".s2->.s14\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s13\n" +
+			".s4->.s5\n" +
+			".s5->.s6\n" +
+			".s6-A->.s7\n" +
+			".s7->.s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s10\n" +
+			".s9->.s3\n" +
+			":s11-EOF->.s12\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testPlusNestedInStar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (A+)*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->:s11\n" +
+			".s13->.s10\n" +
+			".s2->.s13\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s5\n" +
+			".s5->.s6\n" +
+			".s6-A->.s7\n" +
+			".s7->.s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s10\n" +
+			".s9->.s3\n" +
+			":s11-EOF->.s12\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testStarNestedInPlus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (A*)+;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->:s11\n" +
+			".s13->.s8\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s13\n" +
+			".s4->.s5\n" +
+			".s5->.s6\n" +
+			".s6-A->.s7\n" +
+			".s7->.s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s10\n" +
+			".s9->.s3\n" +
+			":s11-EOF->.s12\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testNakedAstar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : A*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+    public void testAorBstar() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : (A | B{action})* ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s13->.s7\n" +
+			".s2->.s13\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testAorBOptionalSubrule() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : ( A | B )? ;");
+        String expecting =
+            ".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A..B->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testPredicatedAorB() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : {p1}? A | {p2}? B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s8\n" +
+			".s10-B->.s11\n" +
+			".s11->.s5\n" +
+			".s2-{p1}?->.s3\n" +
+			".s3-A->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s9\n" +
+			".s9-{p2}?->.s10\n" +
+			":s6-EOF->.s7\n";
+        checkRule(g, "a", expecting);
+    }
+
+    public void testMultiplePredicates() throws Exception {
+        Grammar g = new Grammar(
+                "parser grammar P;\n"+
+                "a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" +
+                "b : {p4}? B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s9\n" +
+			".s10-{p2}?->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s13->.s14\n" +
+			".s14->.s15\n" +
+			".s15->.s16\n" +
+			".s16->.s17\n" +
+			".s17-{p4}?->.s18\n" +
+			".s18-B->.s19\n" +
+			".s19->:s20\n" +
+			".s2-{p1}?->.s3\n" +
+			".s21->.s6\n" +
+			".s3-{p1a}?->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			".s9->.s13\n" +
+			":s20->.s21\n" +
+			":s7-EOF->.s8\n";
+        checkRule(g, "a", expecting);
+	}
+
+	public void testSets() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( A | B )+ ;\n" +
+			"b : ( A | B{;} )+ ;\n" +
+			"c : (A|B) (A|B) ;\n" +
+			"d : ( A | B )* ;\n" +
+			"e : ( A | B )? ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-A..B->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "b", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A..B->.s3\n" +
+			".s3-A..B->.s4\n" +
+			".s4->:s5\n" +
+			":s5-EOF->.s6\n";
+		checkRule(g, "c", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-A..B->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "d", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A..B->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "e", expecting);
+	}
+
+	public void testNotSet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"tokens { A; B; C; }\n"+
+			"a : ~A ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-B..C->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+
+		String expectingGrammarStr =
+			"1:8: parser grammar P;\n" +
+			"a : ~ A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testNotSingletonBlockSet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"tokens { A; B; C; }\n"+
+			"a : ~(A) ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-B..C->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+
+		String expectingGrammarStr =
+			"1:8: parser grammar P;\n" +
+			"a : ~ ( A ) ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testNotCharSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~'3' ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'\\uFFFE'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+				"A : ~ '3' ;\n"+
+				"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testNotBlockSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~('3'|'b') ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFE'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : ~ ( '3' | 'b' ) ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testNotSetLoop() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~('3')* ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-{'\\u0000'..'2', '4'..'\\uFFFE'}->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : (~ ( '3' ) )* ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testNotBlockSetLoop() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~('3'|'b')* ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFE'}->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : (~ ( '3' | 'b' ) )* ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testSetsInCombinedGrammarSentToLexer() throws Exception {
+		// not sure this belongs in this test suite, but whatever.
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"A : '{' ~('}')* '}';\n");
+		String result = g.getLexerGrammar();
+		String expecting =
+			"lexer grammar t;\n" +
+			"\n" +
+			"// $ANTLR src \"<string>\" 2\n"+
+			"A : '{' ~('}')* '}';\n";
+		assertEquals(result, expecting);
+	}
+
+	public void testLabeledNotSet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"tokens { A; B; C; }\n"+
+			"a : t=~A ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-B..C->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+
+		String expectingGrammarStr =
+			"1:8: parser grammar P;\n" +
+			"a : t=~ A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testLabeledNotCharSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : t=~'3' ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'\\uFFFE'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+				"A : t=~ '3' ;\n"+
+				"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testLabeledNotBlockSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : t=~('3'|'b') ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFE'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : t=~ ( '3' | 'b' ) ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	public void testEscapedCharLiteral() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : '\\n';");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'\\n'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testEscapedStringLiteral() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a\\nb\\u0030c\\'';");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a\\nb\\u0030c\\''->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	// AUTO BACKTRACKING STUFF
+
+	public void testAutoBacktracking_RuleBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : 'a'{;}|'b';"
+		);
+		String expecting =
+			".s0->.s1\n" +
+				".s1->.s2\n" +
+				".s1->.s8\n" +
+				".s10->.s5\n" +
+				".s2-{synpred1}?->.s3\n" +
+				".s3-'a'->.s4\n" +
+				".s4->.s5\n" +
+				".s5->:s6\n" +
+				".s8->.s9\n" +
+				".s9-'b'->.s10\n" +
+				":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_RuleSetBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : 'a'|'b';"
+		);
+		String expecting =
+			".s0->.s1\n" +
+				".s1->.s2\n" +
+				".s2-'a'..'b'->.s3\n" +
+				".s3->:s4\n" +
+				":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_SimpleBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'{;}|'b') ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10-'b'->.s11\n" +
+			".s11->.s6\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3-{synpred1}?->.s4\n" +
+			".s4-'a'->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_SetBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'|'b') ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a'..'b'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_StarBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'{;}|'b')* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+				".s1->.s2\n" +
+				".s11->.s12\n" +
+				".s12-{synpred2}?->.s13\n" +
+				".s13-'b'->.s14\n" +
+				".s14->.s7\n" +
+				".s15->.s8\n" +
+				".s2->.s15\n" +
+				".s2->.s3\n" +
+				".s3->.s11\n" +
+				".s3->.s4\n" +
+				".s4-{synpred1}?->.s5\n" +
+				".s5-'a'->.s6\n" +
+				".s6->.s7\n" +
+				".s7->.s3\n" +
+				".s7->.s8\n" +
+				".s8->:s9\n" +
+				":s9-EOF->.s10\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'|'b')* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-'a'..'b'->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_StarSetBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'|'b'{;})* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s11->.s12\n" +
+			".s12-{synpred2}?->.s13\n" +
+			".s13-'b'->.s14\n" +
+			".s14->.s7\n" +
+			".s15->.s8\n" +
+			".s2->.s15\n" +
+			".s2->.s3\n" +
+			".s3->.s11\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s7\n" +
+			".s7->.s3\n" +
+			".s7->.s8\n" +
+			".s8->:s9\n" +
+			":s9-EOF->.s10\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_StarBlock1Alt() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a')* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s7\n" +
+			".s2->.s10\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_PlusBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'{;}|'b')+ ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+				".s1->.s2\n" +
+				".s11->.s12\n" +
+				".s12-{synpred2}?->.s13\n" +
+				".s13-'b'->.s14\n" +
+				".s14->.s7\n" +
+				".s2->.s3\n" +
+				".s3->.s11\n" +
+				".s3->.s4\n" +
+				".s4-{synpred1}?->.s5\n" +
+				".s5-'a'->.s6\n" +
+				".s6->.s7\n" +
+				".s7->.s3\n" +
+				".s7->.s8\n" +
+				".s8->:s9\n" +
+				":s9-EOF->.s10\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_PlusSetBlock() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'|'b'{;})+ ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s11->.s12\n" +
+			".s12-{synpred2}?->.s13\n" +
+			".s13-'b'->.s14\n" +
+			".s14->.s7\n" +
+			".s2->.s3\n" +
+			".s3->.s11\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s7\n" +
+			".s7->.s3\n" +
+			".s7->.s8\n" +
+			".s8->:s9\n" +
+			":s9-EOF->.s10\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_PlusBlock1Alt() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a')+ ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_OptionalBlock2Alts() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a'{;}|'b')?;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+				".s1->.s2\n" +
+				".s10-{synpred2}?->.s11\n" +
+				".s11-'b'->.s12\n" +
+				".s12->.s6\n" +
+				".s13->.s6\n" +
+				".s2->.s3\n" +
+				".s2->.s9\n" +
+				".s3-{synpred1}?->.s4\n" +
+				".s4-'a'->.s5\n" +
+				".s5->.s6\n" +
+				".s6->:s7\n" +
+				".s9->.s10\n" +
+				".s9->.s13\n" +
+				":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_OptionalBlock1Alt() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a')?;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3-{synpred1}?->.s4\n" +
+			".s4-'a'->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	public void testAutoBacktracking_ExistingPred() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"options {backtrack=true;}\n"+
+				"a : ('a')=> 'a' | 'b';"
+		);
+		String expecting =
+			".s0->.s1\n" +
+				".s1->.s2\n" +
+				".s1->.s8\n" +
+				".s10->.s5\n" +
+				".s2-{synpred1}?->.s3\n" +
+				".s3-'a'->.s4\n" +
+				".s4->.s5\n" +
+				".s5->:s6\n" +
+				".s8->.s9\n" +
+				".s9-'b'->.s10\n" +
+				":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	private void checkRule(Grammar g, String rule, String expecting)
+    {
+        g.createNFAs();
+        State startState = g.getRuleStartState(rule);
+        FASerializer serializer = new FASerializer(g);
+        String result = serializer.serialize(startState);
+
+        //System.out.print(result);
+        assertEquals(expecting, result);
+    }
+
+}
diff --git a/src/org/antlr/test/TestRewriteAST.java b/src/org/antlr/test/TestRewriteAST.java
new file mode 100644
index 0000000..b68da12
--- /dev/null
+++ b/src/org/antlr/test/TestRewriteAST.java
@@ -0,0 +1,1273 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.antlr.tool.Message;
+import org.antlr.tool.Grammar;
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+
+public class TestRewriteAST extends BaseTest {
+	protected boolean debug = false;
+
+	public void testDelete() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("", found);
+	}
+
+	public void testSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testSingleTokenToNewNode() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID[\"x\"];\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testSingleTokenToNewNodeRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ^(ID[\"x\"] INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("(x INT)\n", found);
+	}
+
+	public void testSingleTokenToNewNode2() throws Exception {
+		// currently this Fails.  Allow creation of new nodes w/o args.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID[ ];\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testSingleCharLiteral() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'c' -> 'c';\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "c", debug);
+		assertEquals("c\n", found);
+	}
+
+	public void testSingleStringLiteral() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'ick' -> 'ick';\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "ick", debug);
+		assertEquals("ick\n", found);
+	}
+
+	public void testSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b -> b;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testReorderTokens() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> INT ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34 abc\n", found);
+	}
+
+	public void testReorderTokenAndRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b INT -> INT b;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34 abc\n", found);
+	}
+
+	public void testTokenTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(INT ID);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("(34 abc)\n", found);
+	}
+
+	public void testTokenTreeAfterOtherStuff() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'void' ID INT -> 'void' ^(INT ID);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "void abc 34", debug);
+		assertEquals("void (34 abc)\n", found);
+	}
+
+	public void testNestedTokenTreeWithOuterLoop() throws Exception {
+		// verify that ID and INT both iterate over outer index variable
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {DUH;}\n" +
+			"a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 1 b 2", debug);
+		assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))\n", found);
+	}
+
+	public void testOptionalSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID? ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testClosureSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ID -> ID* ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testPositiveClosureSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ID -> ID+ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testOptionalSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b -> b?;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testClosureSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b b -> b*;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testClosureOfLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x+=b x+=b -> $x*;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testOptionalLabelNoListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : (x=ID)? -> $x?;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	public void testPositiveClosureSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b b -> b+;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testSinglePredicateT() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> {true}? ID -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testSinglePredicateF() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> {false}? ID -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("", found);
+	}
+
+	public void testMultiplePredicate() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> {false}? ID\n" +
+			"           -> {true}? INT\n" +
+			"           -> \n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 2", debug);
+		assertEquals("2\n", found);
+	}
+
+	public void testMultiplePredicateTrees() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> {false}? ^(ID INT)\n" +
+			"           -> {true}? ^(INT ID)\n" +
+			"           -> ID\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 2", debug);
+		assertEquals("(2 a)\n", found);
+	}
+
+	public void testSimpleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : op INT -> ^(op INT);\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "-34", debug);
+		assertEquals("(- 34)\n", found);
+	}
+
+	public void testSimpleTree2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : op INT -> ^(INT op);\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "+ 34", debug);
+		assertEquals("(34 +)\n", found);
+	}
+
+
+	public void testNestedTrees() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "var a:int; b:float;", debug);
+		assertEquals("(var (: a int) (: b float))\n", found);
+	}
+
+	public void testImaginaryTokenCopy() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n" +
+			"a : ID (',' ID)*-> ^(VAR ID)+ ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a,b,c", debug);
+		assertEquals("(VAR a) (VAR b) (VAR c)\n", found);
+	}
+
+	public void testTokenUnreferencedOnLeftButDefined() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n" +
+			"a : b -> ID ;\n" +
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("ID\n", found);
+	}
+
+	public void testImaginaryTokenCopySetText() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n" +
+			"a : ID (',' ID)*-> ^(VAR[\"var\"] ID)+ ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a,b,c", debug);
+		assertEquals("(var a) (var b) (var c)\n", found);
+	}
+
+	public void testImaginaryTokenNoCopyFromToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "{a b c}", debug);
+		assertEquals("({ a b c)\n", found);
+	}
+
+	public void testImaginaryTokenNoCopyFromTokenSetText() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : lc='{' ID+ '}' -> ^(BLOCK[$lc,\"block\"] ID+) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "{a b c}", debug);
+		assertEquals("(block a b c)\n", found);
+	}
+
+	public void testMixedRewriteAndAutoAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : b b^ ;\n" + // 2nd b matches only an INT; can make it root
+			"b : ID INT -> INT ID\n" +
+			"  | INT\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 1 2", debug);
+		assertEquals("(2 1 a)\n", found);
+	}
+
+	public void testSubruleWithRewrite() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : b b ;\n" +
+			"b : (ID INT -> INT ID | INT INT -> INT+ )\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 1 2 3", debug);
+		assertEquals("1 a 2 3\n", found);
+	}
+
+	public void testSubruleWithRewrite2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {TYPE;}\n" +
+			"a : b b ;\n" +
+			"b : 'int'\n" +
+			"    ( ID -> ^(TYPE 'int' ID)\n" +
+			"    | ID '=' INT -> ^(TYPE 'int' ID INT)\n" +
+			"    )\n" +
+			"    ';'\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a; int b=3;", debug);
+		assertEquals("(TYPE int a) (TYPE int b 3)\n", found);
+	}
+
+	public void testNestedRewriteShutsOffAutoAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : b b ;\n" +
+			"b : ID ( ID (last=ID -> $last)+ ) ';'\n" + // get last ID
+			"  | INT\n" + // should still get auto AST construction
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b c d; 42", debug);
+		assertEquals("d 42\n", found);
+	}
+
+	public void testRewriteActions() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : atom -> ^({adaptor.create(INT,\"9\")} atom) ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3", debug);
+		assertEquals("(9 3)\n", found);
+	}
+
+	public void testRewriteActions2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : atom -> {adaptor.create(INT,\"9\")} atom ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3", debug);
+		assertEquals("9 3\n", found);
+	}
+
+	public void testRefToOldValue() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3+4+5", debug);
+		assertEquals("(+ (+ 3 4) 5)\n", found);
+	}
+
+	public void testCopySemanticsForRules() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : atom -> ^(atom atom) ;\n" + // NOT CYCLE! (dup atom)
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3", debug);
+		assertEquals("(3 3)\n", found);
+	}
+
+	public void testCopySemanticsForRules2() throws Exception {
+		// copy type as a root for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : type ID (',' ID)* ';' -> ^(type ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a,b,c;", debug);
+		assertEquals("(int a) (int b) (int c)\n", found);
+	}
+
+	public void testCopySemanticsForRules3() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"modifier : 'public' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "public int a,b,c;", debug);
+		assertEquals("(int public a) (int public b) (int public c)\n", found);
+	}
+
+	public void testCopySemanticsForRules3Double() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"modifier : 'public' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "public int a,b,c;", debug);
+		assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)\n", found);
+	}
+
+	public void testCopySemanticsForRules4() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {MOD;}\n" +
+			"a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"modifier : 'public' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "public int a,b,c;", debug);
+		assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)\n", found);
+	}
+
+	public void testCopySemanticsLists() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {MOD;}\n" +
+			"a : ID (',' ID)* ';' -> ID+ ID+ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a,b,c;", debug);
+		assertEquals("a b c a b c\n", found);
+	}
+
+	public void testCopyRuleLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b -> $x $x;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a a\n", found);
+	}
+
+	public void testCopyRuleLabel2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b -> ^($x $x);\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("(a a)\n", found);
+	}
+
+	public void testQueueingOfTokens() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a,b,c;", debug);
+		assertEquals("(int a b c)\n", found);
+	}
+
+	public void testCopyOfTokens() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID ';' -> 'int' ID 'int' ID ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a;", debug);
+		assertEquals("int a int a\n", found);
+	}
+
+	public void testTokenCopyInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a,b,c;", debug);
+		assertEquals("(int a) (int b) (int c)\n", found);
+	}
+
+	public void testTokenCopyInLoopAgainstTwoOthers() throws Exception {
+		// must smear 'int' copies across as root of multiple trees
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a:1,b:2,c:3;", debug);
+		assertEquals("(int a 1) (int b 2) (int c 3)\n", found);
+	}
+
+	public void testListRefdOneAtATime() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID+ -> ID ID ID ;\n" + // works if 3 input IDs
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b c", debug);
+		assertEquals("a b c\n", found);
+	}
+
+	public void testSplitListWithLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n"+
+			"a : first=ID others+=ID* -> $first VAR $others+ ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b c", debug);
+		assertEquals("a VAR b c\n", found);
+	}
+
+	public void testComplicatedMelange() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : A A b=B B b=B c+=C C c+=C D {$D.text;} -> A+ B+ C+ D ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"A : 'a' ;\n" +
+			"B : 'b' ;\n" +
+			"C : 'c' ;\n" +
+			"D : 'd' ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a a b b b c c c d", debug);
+		assertEquals("a a b b b c c c d\n", found);
+	}
+
+	public void testRuleLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b -> $x;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	public void testRuleListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=b x+=b -> $x+;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testRuleListLabel2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=b x+=b -> $x $x*;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testOptional() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b (y=b)? -> $x $y?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	public void testOptional2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=ID (y=b)? -> $x $y?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testOptional3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=ID (y=b)? -> ($x $y)?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testOptional4() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=ID (y=b)? -> ($x $y)?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	public void testOptional5() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : ID -> ID? ;\n"+ // match an ID to optional ID
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	public void testArbitraryExprType() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=b x+=b -> {new CommonTree()};\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		//assertEquals("[not sure what this should be!]\n", found);
+//ATTENTION: I changed this one's behavior from the above.  Is it right?
+		assertEquals("nil\n", found);
+	}
+
+	public void testSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options { output = AST; } \n" +
+			"a: (INT|ID)+ -> INT+ ID+ ;\n" +
+			"INT: '0'..'9'+;\n" +
+			"ID : 'a'..'z'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "2 a 34 de", debug);
+		assertEquals("2 34 a de\n", found);
+	}
+
+	public void testRewriteAction() throws Exception {
+		String grammar =
+			"grammar T; \n" +
+			"options { output = AST; }\n" +
+			"tokens { FLOAT; }\n" +
+			"r\n" +
+			"    : INT -> {new CommonTree(new CommonToken(FLOAT,$INT.text+\".0\"))} \n" +
+			"    ; \n" +
+			"INT : '0'..'9'+; \n" +
+			"WS: (' ' | '\\n' | '\\t')+ {$channel = HIDDEN;}; \n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "r", "25", debug);
+		assertEquals("25.0\n", found);
+	}
+
+	public void testOptionalSubruleWithoutRealElements() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;} \n" +
+			"tokens {PARMS;} \n" +
+			"\n" +
+			"modulo \n" +
+			" : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?) \n" +
+			" ; \n" +
+			"parms : '#'|ID; \n" +
+			"ID : ('a'..'z' | 'A'..'Z')+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "modulo", "modulo abc (x y #)", debug);
+		assertEquals("(modulo abc (PARMS x y #))\n", found);
+	}
+
+	// C A R D I N A L I T Y  I S S U E S
+
+	public void testCardinality() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : ID ID INT INT INT -> (ID INT)+;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+; \n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b 3 4 5", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteCardinalityException: token ID";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	public void testCardinality2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID+ -> ID ID ID ;\n" + // only 2 input IDs
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "a b", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteCardinalityException: token ID";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	public void testCardinality3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID? INT -> ID INT ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "3", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteEmptyStreamException: token ID";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	public void testLoopCardinality() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID? INT -> ID+ INT ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "3", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteEarlyExitException";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	// E R R O R S
+
+	public void testUnknownRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> ugh ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
+		Object expectedArg = "ugh";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testKnownRuleButNotInLHS() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> b ;\n" +
+			"b : 'b' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS;
+		Object expectedArg = "b";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUnknownToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> ICK ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
+		Object expectedArg = "ICK";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUnknownLabel() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> $foo ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE;
+		Object expectedArg = "foo";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUnknownCharLiteralToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> 'a' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
+		Object expectedArg = "'a'";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUnknownStringLiteralToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> 'foo' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
+		Object expectedArg = "'foo'";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	// S U P P O R T
+
+	protected void checkError(ErrorQueue equeue,
+							  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		//System.out.println("errors="+equeue);
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = (Message)equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
+		assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
+		assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		assertEquals(expectedMessage.arg2, foundMsg.arg2);
+		ErrorManager.resetErrorState(); // wack errors for next test
+	}
+
+}
diff --git a/src/org/antlr/test/TestRewriteTemplates.java b/src/org/antlr/test/TestRewriteTemplates.java
new file mode 100644
index 0000000..fbe7498
--- /dev/null
+++ b/src/org/antlr/test/TestRewriteTemplates.java
@@ -0,0 +1,319 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+
+public class TestRewriteTemplates extends BaseTest {
+	protected boolean debug = false;
+
+	public void testDelete() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("", found);
+	}
+
+	public void testAction() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> {new StringTemplate($ID.text)} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testEmbeddedLiteralConstructor() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> {%{$ID.text}} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc\n", found);
+	}
+
+	public void testInlineTemplate() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> template(x={$ID},y={$INT}) <<x:<x.text>, y:<y.text>;>> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("x:abc, y:34;\n", found);
+	}
+
+	public void testNamedTemplate() throws Exception {
+		// the support code adds template group in it's output Test.java
+		// that defines template foo.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> foo(x={$ID.text},y={$INT.text}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	public void testIndirectTemplate() throws Exception {
+		// the support code adds template group in it's output Test.java
+		// that defines template foo.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> ({\"foo\"})(x={$ID.text},y={$INT.text}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	public void testInlineTemplateInvokingLib() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> template(x={$ID.text},y={$INT.text}) \"<foo(...)>\" ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	public void testPredicatedAlts() throws Exception {
+		// the support code adds template group in it's output Test.java
+		// that defines template foo.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> {false}? foo(x={$ID.text},y={$INT.text})\n" +
+			"           -> foo(x={\"hi\"}, y={$ID.text})\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("hi abc\n", found);
+	}
+
+	public void testTemplateReturn() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : b {System.out.println($b.st);} ;\n" +
+			"b : ID INT -> foo(x={$ID.text},y={$INT.text}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	public void testReturnValueWithTemplate() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : b {System.out.println($b.i);} ;\n" +
+			"b returns [int i] : ID INT {$i=8;} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("8\n", found);
+	}
+
+	public void testTemplateRefToDynamicAttributes() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a scope {String id;} : ID {$a::id=$ID.text;} b\n" +
+			"	{System.out.println($b.st.toString());}\n" +
+			"   ;\n" +
+			"b : INT -> foo(x={$a::id}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc \n", found);
+	}
+
+	// tests for rewriting templates in tree parsers
+
+	public void testSingleNode() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template;}\n" +
+			"s : a {System.out.println($a.st);} ;\n" +
+			"a : ID -> template(x={$ID.text}) <<|<x>|>> ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc");
+		assertEquals("|abc|\n", found);
+	}
+
+	/** tree parsing with output=template and rewrite=true */
+	public void testSingleNodeRewriteMode() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"s : a {System.out.println(input.getTokenStream().toString(0,0));} ;\n" +
+			"a : ID -> template(x={$ID.text}) <<|<x>|>> ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc");
+		assertEquals("|abc|\n", found);
+	}
+
+	public void testRewriteRuleAndRewriteModeOnSimpleElements() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a: ^(A B) -> {ick}\n" +
+			" | y+=INT -> {ick}\n" +
+			" | x=ID -> {ick}\n" +
+			" | BLORT -> {ick}\n" +
+			" ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+	}
+
+	public void testRewriteRuleAndRewriteModeIgnoreActionsPredicates() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a: {action} {action2} x=A -> {ick}\n" +
+			" | {pred1}? y+=B -> {ick}\n" +
+			" | C {action} -> {ick}\n" +
+			" | {pred2}?=> z+=D -> {ick}\n" +
+			" | (E)=> ^(F G) -> {ick}\n" +
+			" ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+	}
+
+	public void testRewriteRuleAndRewriteModeNotSimple() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a  : ID+ -> {ick}\n" +
+			"   | INT INT -> {ick}\n" +
+			"   ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 2, equeue.warnings.size());
+	}
+
+	public void testRewriteRuleAndRewriteModeRefRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a  : b+ -> {ick}\n" +
+			"   | b b A -> {ick}\n" +
+			"   ;\n" +
+			"b  : B ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 2, equeue.warnings.size());
+	}
+
+}
diff --git a/src/org/antlr/test/TestSemanticPredicateEvaluation.java b/src/org/antlr/test/TestSemanticPredicateEvaluation.java
new file mode 100644
index 0000000..c2eacc9
--- /dev/null
+++ b/src/org/antlr/test/TestSemanticPredicateEvaluation.java
@@ -0,0 +1,237 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+public class TestSemanticPredicateEvaluation extends BaseTest {
+	public void testSimpleCyclicDFAWithPredicate() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
+			"  | {true}?  'x'* 'y' {System.out.println(\"alt2\");}\n" +
+			"  ;\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "xxxy", false);
+		assertEquals("alt2\n", found);
+	}
+
+	public void testSimpleCyclicDFAWithInstanceVarPredicate() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"@members {boolean v=true;}\n" +
+			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
+			"  | {v}?     'x'* 'y' {System.out.println(\"alt2\");}\n" +
+			"  ;\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "xxxy", false);
+		assertEquals("alt2\n", found);
+	}
+
+	public void testPredicateValidation() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"@members {\n" +
+			"public void reportError(RecognitionException e) {\n" +
+			"    System.out.println(\"error: \"+e.toString());\n" +
+			"}\n" +
+			"}\n" +
+			"\n" +
+			"a : {false}? 'x'\n" +
+			"  ;\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "x", false);
+		assertEquals("error: FailedPredicateException(a,{false}?)\n", found);
+	}
+
+	public void testLexerPreds() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=false;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? 'a'  {System.out.println(\"token 1\");} ;\n" +
+			"B : {!p}? 'a' {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "a", false);
+		// "a" is ambig; can match both A, B.  Pred says match 2
+		assertEquals("token 2\n", found);
+	}
+
+	public void testLexerPreds2() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? 'a' {System.out.println(\"token 1\");} ;\n" +
+			"B : ('a'|'b')+ {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "a", false);
+		// "a" is ambig; can match both A, B.  Pred says match 1
+		assertEquals("token 1\n", found);
+	}
+
+	public void testLexerPredInExitBranch() throws Exception {
+		// p says it's ok to exit; it has precendence over the !p loopback branch
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : ('a' {System.out.print(\"1\");})*\n" +
+			"    {p}?\n" +
+			"    ('a' {System.out.print(\"2\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaa", false);
+		assertEquals("222\n", found);
+	}
+
+	public void testLexerPredInExitBranch2() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : ({p}? 'a' {System.out.print(\"1\");})*\n" +
+			"    ('a' {System.out.print(\"2\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaa", false);
+		assertEquals("111\n", found);
+	}
+
+	public void testLexerPredInExitBranch3() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : ({p}? 'a' {System.out.print(\"1\");} | )\n" +
+			"    ('a' {System.out.print(\"2\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaa", false);
+		assertEquals("122\n", found);
+	}
+
+	public void testLexerPredInExitBranch4() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"a : (A|B)+ ;\n" +
+			"A @init {int n=0;} : ({n<2}? 'a' {System.out.print(n++);})+\n" +
+			"    ('a' {System.out.print(\"x\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaaaa", false);
+		assertEquals("01xxx\n", found);
+	}
+
+	public void testLexerPredsInCyclicDFA() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=false;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? ('a')+ 'x'  {System.out.println(\"token 1\");} ;\n" +
+			"B :      ('a')+ 'x' {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aax", false);
+		assertEquals("token 2\n", found);
+	}
+
+	public void testLexerPredsInCyclicDFA2() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=false;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? ('a')+ 'x' ('y')? {System.out.println(\"token 1\");} ;\n" +
+			"B :      ('a')+ 'x' {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aax", false);
+		assertEquals("token 2\n", found);
+	}
+
+	public void testGatedPred() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"a : (A|B)+ ;\n" +
+			"A : {true}?=> 'a' {System.out.println(\"token 1\");} ;\n" +
+			"B : {false}?=>('a'|'b')+ {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aa", false);
+		// "a" is ambig; can match both A, B.  Pred says match A twice
+		assertEquals("token 1\ntoken 1\n", found);
+	}
+
+	public void testGatedPred2() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"@lexer::members {boolean sig=false;}\n"+
+			"a : (A|B)+ ;\n" +
+			"A : 'a' {System.out.print(\"A\"); sig=true;} ;\n" +
+			"B : 'b' ;\n" +
+			"C : {sig}?=> ('a'|'b') {System.out.print(\"C\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aa", false);
+		assertEquals("AC\n", found);
+	}
+
+	public void testPredWithActionTranslation() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"a : b[2] ;\n" +
+			"b[int i]\n" +
+			"  : {$i==1}?   'a' {System.out.println(\"alt 1\");}\n" +
+			"  | {$b.i==2}? 'a' {System.out.println(\"alt 2\");}\n" +
+			"  ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aa", false);
+		assertEquals("alt 2\n", found);
+	}
+
+	public void testPredicatesOnEOTTarget() throws Exception {
+		String grammar =
+			"grammar foo; \n" +
+			"@lexer::members {boolean p=true, q=false;}" +
+			"a : B ;\n" +
+			"A: '</'; \n" +
+			"B: {p}? '<!' {System.out.println(\"B\");};\n" +
+			"C: {q}? '<' {System.out.println(\"C\");}; \n" +
+			"D: '<';\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "<!", false);
+		assertEquals("B\n", found);
+	}
+
+
+	// S U P P O R T
+
+	public void _test() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a :  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {channel=99;} ;\n";
+		String found = execParser("t.g", grammar, "T", "TLexer",
+				    "a", "abc 34", false);
+		assertEquals("\n", found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestSemanticPredicates.java b/src/org/antlr/test/TestSemanticPredicates.java
new file mode 100644
index 0000000..4568808
--- /dev/null
+++ b/src/org/antlr/test/TestSemanticPredicates.java
@@ -0,0 +1,707 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.BitSet;
+import org.antlr.tool.*;
+
+import java.util.List;
+
+public class TestSemanticPredicates extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestSemanticPredicates() {
+	}
+
+	public void testPredsButSyntaxResolves() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {p2}? B ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testLL_1_Pred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {p2}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testLL_2_Pred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A B | {p2}? A B ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->.s2\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testPredicatedLoop() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( {p1}? A | {p2}? A )+;");
+		String expecting =                   // loop back
+			".s0-A->.s2\n" +
+			".s0-EOF->:s1=>3\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testPredicatedToStayInLoop() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( {p1}? A )+ (A)+;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{!(p1)}?->:s2=>1\n" +
+			".s1-{p1}?->:s3=>2\n";       // loop back
+	}
+
+	public void testAndPredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? {p1a}? A | {p2}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{(p1&&p1a)}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testOrPredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | {p2}? A ;\n" +
+			"b : {p1}? A | {p1a}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{(p1||p1a)}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testIgnoresHoistingDepthGreaterThanZero() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A {p1}? | A {p2}?;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A", null, null, 2);
+	}
+
+	public void testHoist2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | c ;\n" +
+			"b : {p1}? A ;\n" +
+			"c : {p2}? A ;\n");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testHoistCorrectContext() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | {p2}? ID ;\n" +
+			"b : {p1}? ID | INT ;\n");
+		String expecting =  // only tests after ID, not INT :)
+			".s0-ID->.s1\n" +
+			".s0-INT->:s2=>1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testDefaultPredNakedAltIsLast() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | ID ;\n" +
+			"b : {p1}? ID | INT ;\n");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT->:s2=>1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testDefaultPredNakedAltNotLast() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ID | b ;\n" +
+			"b : {p1}? ID | INT ;\n");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT->:s3=>2\n" +
+			".s1-{!(p1)}?->:s2=>1\n" +
+			".s1-{p1}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testLeftRecursivePred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"s : a ;\n" +
+			"a : {p1}? a | ID ;\n");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.createNFAs();
+			g.createLookaheadDFAs();
+		}
+
+		DFA dfa = g.getLookaheadDFA(1);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		assertEquals(expecting, result);
+
+		assertEquals("unexpected number of expected problems", 1, equeue.size());
+		Message msg = (Message)equeue.warnings.get(0);
+		assertTrue("warning must be a recursion overflow msg",
+				    msg instanceof RecursionOverflowMessage);
+	}
+
+	public void testIgnorePredFromLL2AltLastAltIsDefaultTrue() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A B | A C | {p2}? A | {p3}? A | A ;\n");
+		// two situations of note:
+		// 1. A B syntax is enough to predict that alt, so p1 is not used
+		//    to distinguish it from alts 2..5
+		// 2. Alts 3, 4, 5 are nondeterministic with upon A.  p2, p3 and the
+		//    complement of p2||p3 is sufficient to resolve the conflict. Do
+		//    not include alt 1's p1 pred in the "complement of other alts"
+		//    because it is not considered nondeterministic with alts 3..5
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n" +
+			".s1-{p2}?->:s4=>3\n" +
+			".s1-{p3}?->:s5=>4\n" +
+			".s1-{true}?->:s6=>5\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testIgnorePredFromLL2AltPredUnionNeeded() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A B | A C | {p2}? A | A | {p3}? A ;\n");
+		// two situations of note:
+		// 1. A B syntax is enough to predict that alt, so p1 is not used
+		//    to distinguish it from alts 2..5
+		// 2. Alts 3, 4, 5 are nondeterministic with upon A.  p2, p3 and the
+		//    complement of p2||p3 is sufficient to resolve the conflict. Do
+		//    not include alt 1's p1 pred in the "complement of other alts"
+		//    because it is not considered nondeterministic with alts 3..5
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n" +
+			".s1-{!((p3||p2))}?->:s5=>4\n" +
+			".s1-{p2}?->:s4=>3\n" +
+			".s1-{p3}?->:s6=>5\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testPredGets2SymbolSyntacticContext() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | A B | C ;\n" +
+			"b : {p1}? A B ;\n");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-C->:s5=>3\n" +
+			".s1-B->.s2\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testMatchesLongestThenTestPred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | c ;\n" +
+			"b : {p}? A ;\n" +
+			"c : {q}? (A|B)+ ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-B->:s3=>2\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{q}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testPredsUsedAfterRecursionOverflow() throws Exception {
+		Grammar g = new Grammar(
+			"grammar P;\n"+
+			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
+			"e : '(' e ')' | INT ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-INT->.s7\n" +
+			".s1-'('->.s2\n" +
+			".s1-INT->.s5\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n" +
+			".s5-')'->.s6\n" +
+			".s6-'.'->:s3=>1\n" +
+			".s6-':'->:s4=>2\n" +
+			".s7-'.'->:s3=>1\n" +
+			".s7-':'->:s4=>2\n";
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.createNFAs();
+			g.createLookaheadDFAs();
+		}
+
+		assertEquals("unexpected number of expected problems", 0, equeue.size());
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testLexerMatchesLongestThenTestPred() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"B : {p}? 'a' ;\n" +
+			"C : {q}? ('a'|'b')+ ;");
+		String expecting =
+			".s0-'a'->.s1\n" +
+			".s0-'b'->:s4=>2\n" +
+			".s1-'a'..'b'->:s4=>2\n" +
+			".s1-<EOT>->.s2\n" +
+			".s2-{p}?->:s3=>1\n" +
+			".s2-{q}?->:s4=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testGatedPred() throws Exception {
+		// gated preds are present on all arcs in predictor
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"B : {p}? => 'a' ;\n" +
+			"C : {q}? => ('a'|'b')+ ;");
+		String expecting =
+			".s0-'a'&&{(p||q)}?->.s1\n" +
+			".s0-'b'&&{q}?->:s4=>2\n" +
+			".s1-'a'..'b'&&{q}?->:s4=>2\n" +
+			".s1-<EOT>&&{(p||q)}?->.s2\n" +
+			".s2-{p}?->:s3=>1\n" +
+			".s2-{q}?->:s4=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testGatedPredHoistsAndCanBeInStopState() throws Exception {
+		// I found a bug where merging stop states made us throw away
+		// a stop state with a gated pred!
+		Grammar g = new Grammar(
+			"grammar u;\n" +
+			"a : b+ ;\n" +
+			"b : 'x' | {p}?=> 'y' ;");
+		String expecting =
+			".s0-'x'->:s2=>1\n" +
+			".s0-'y'&&{p}?->:s3=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testGatedPredInCyclicDFA() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : {p}?=> ('a')+ 'x' ;\n" +
+			"B : {q}?=> ('a'|'b')+ 'x' ;");
+		String expecting =
+			".s0-'a'&&{(p||q)}?->.s1\n" +
+			".s0-'b'&&{q}?->:s5=>2\n" +
+			".s1-'a'&&{(p||q)}?->.s1\n" +
+			".s1-'b'&&{q}?->:s5=>2\n" +
+			".s1-'x'&&{(p||q)}?->.s2\n" +
+			".s2-<EOT>&&{(p||q)}?->.s3\n" +
+			".s3-{p}?->:s4=>1\n" +
+			".s3-{q}?->:s5=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testGatedPredNotActuallyUsedOnEdges() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ('a' | {p}?=> 'a')\n" +
+			"  | 'a' 'b'\n" +
+			"  ;");
+		String expecting1 =
+			".s0-'a'->.s1\n" +
+			".s1-{!(p)}?->:s2=>1\n" +  	// Used to disambig subrule
+			".s1-{p}?->:s3=>2\n";
+		// rule A decision can't test p from s0->1 because 'a' is valid
+		// for alt1 *and* alt2 w/o p.  Can't test p from s1 to s3 because
+		// we might have passed the first alt of subrule.  The same state
+		// is listed in s2 in 2 different configurations: one with and one
+		// w/o p.  Can't test therefore.  p||true == true.
+		String expecting2 =
+			".s0-'a'->.s1\n" +
+			".s1-'b'->:s2=>2\n" +
+			".s1-<EOT>->:s3=>1\n";
+		checkDecision(g, 1, expecting1, null, null, null, null, null, 0);
+		checkDecision(g, 2, expecting2, null, null, null, null, null, 0);
+	}
+
+	public void testGatedPredDoesNotForceAllToBeGated() throws Exception {
+		Grammar g = new Grammar(
+			"grammar w;\n" +
+			"a : b | c ;\n" +
+			"b : {p}? B ;\n" +
+			"c : {q}?=> d ;\n" +
+			"d : {r}? C ;\n");
+		String expecting =
+			".s0-B->:s1=>1\n" +
+			".s0-C&&{q}?->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testGatedPredDoesNotForceAllToBeGated2() throws Exception {
+		Grammar g = new Grammar(
+			"grammar w;\n" +
+			"a : b | c ;\n" +
+			"b : {p}? B ;\n" +
+			"c : {q}?=> d ;\n" +
+			"d : {r}?=> C\n" +
+			"  | B\n" +
+			"  ;\n");
+		String expecting =
+			".s0-B->.s1\n" +
+			".s0-C&&{(q&&r)}?->:s3=>2\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{q}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	public void testORGatedPred() throws Exception {
+		Grammar g = new Grammar(
+			"grammar w;\n" +
+			"a : b | c ;\n" +
+			"b : {p}? B ;\n" +
+			"c : {q}?=> d ;\n" +
+			"d : {r}?=> C\n" +
+			"  | {s}?=> B\n" +
+			"  ;\n");
+		String expecting =
+			".s0-B->.s1\n" +
+			".s0-C&&{(q&&r)}?->:s3=>2\n" +
+			".s1-{(q&&s)}?->:s3=>2\n" +
+			".s1-{p}?->:s2=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+	}
+
+	/** The following grammar should yield an error that rule 'a' has
+	 *  insufficient semantic info pulled from 'b'.
+	 */
+	public void testIncompleteSemanticHoistedContext() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | B;\n" +
+			"b : {p1}? B | B ;");
+		String expecting =
+			".s0-B->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "B", new int[] {1}, null, 3);
+	}
+
+	/** The following grammar should yield an error that rule 'a' has
+	 *  insufficient semantic info pulled from 'b'.  This is the same
+	 *  as the previous case except that the D prevents the B path from
+	 *  "pinching" together into a single NFA state.
+	 *
+	 *  This test also demonstrates that just because B D could predict
+	 *  alt 1 in rule 'a', it is unnecessary to continue NFA->DFA
+	 *  conversion to include an edge for D.  Alt 1 is the only possible
+	 *  prediction because we resolve the ambiguity by choosing alt 1.
+	 */
+	public void testIncompleteSemanticHoistedContext2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | B;\n" +
+			"b : {p1}? B | B D ;");
+		String expecting =
+			".s0-B->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "B", new int[] {1},
+					  null, 3);
+	}
+
+	public void testTooFewSemanticPredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : {p1}? A | A | A ;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2,3},
+					  new int[] {1,2,3}, "A",
+					  null, null, 2);
+	}
+
+	public void testPredWithK1() throws Exception {
+		Grammar g = new Grammar(
+			"\tlexer grammar TLexer;\n" +
+			"A\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : {p1}? ('x')+ '.'\n" +
+			"  | {p2}? ('x')+ '.'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] insufficientPredAlts = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings);
+	}
+
+	public void testPredWithArbitraryLookahead() throws Exception {
+		Grammar g = new Grammar(
+			"\tlexer grammar TLexer;\n" +
+			"A : {p1}? ('x')+ '.'\n" +
+			"  | {p2}? ('x')+ '.'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s1-'.'->.s2\n" +
+			".s1-'x'->.s1\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] insufficientPredAlts = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings);
+	}
+
+
+	/** For a DFA state with lots of configurations that have the same
+	 *  predicate, don't just OR them all together as it's a waste to
+	 *  test a||a||b||a||a etc...  ANTLR makes a unique set and THEN
+	 *  OR's them together.
+	 */
+	public void testUniquePredicateOR() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar v;\n" +
+			"\n" +
+			"a : {a}? b\n" +
+			"  | {b}? b\n" +
+			"  ;\n" +
+			"\n" +
+			"b : {c}? (X)+ ;\n" +
+			"\n" +
+			"c : a\n" +
+			"  | b\n" +
+			"  ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+			".s1-{((b&&c)||(a&&c))}?->:s2=>1\n" +
+			".s1-{c}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] insufficientPredAlts = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings);
+	}
+
+	// S U P P O R T
+
+	public void _template() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A | B;");
+		String expecting =
+			"\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "L ID R";
+		int[] insufficientPredAlts = new int[] {1};
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings);
+	}
+
+	protected void checkDecision(Grammar g,
+								 int decision,
+								 String expecting,
+								 int[] expectingUnreachableAlts,
+								 int[] expectingNonDetAlts,
+								 String expectingAmbigInput,
+								 int[] expectingInsufficientPredAlts,
+								 int[] expectingDanglingAlts,
+								 int expectingNumWarnings)
+		throws Exception
+	{
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.createNFAs();
+			g.createLookaheadDFAs();
+		}
+
+		if ( equeue.size()!=expectingNumWarnings ) {
+			System.err.println("Warnings issued: "+equeue);
+		}
+
+		assertEquals("unexpected number of expected problems",
+				   expectingNumWarnings, equeue.size());
+
+		DFA dfa = g.getLookaheadDFA(decision);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		//System.out.print(result);
+		List unreachableAlts = dfa.getUnreachableAlts();
+
+		// make sure unreachable alts are as expected
+		if ( expectingUnreachableAlts!=null ) {
+			BitSet s = new BitSet();
+			s.addAll(expectingUnreachableAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(unreachableAlts);
+			assertEquals("unreachable alts mismatch", s, s2);
+		}
+		else {
+			assertEquals("unreachable alts mismatch", 0, unreachableAlts.size());
+		}
+
+		// check conflicting input
+		if ( expectingAmbigInput!=null ) {
+			// first, find nondet message
+			Message msg = (Message)equeue.warnings.get(0);
+			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
+			msg instanceof GrammarNonDeterminismMessage);
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			List labels =
+				nondetMsg.probe.getSampleNonDeterministicInputSequence(nondetMsg.problemState);
+			String input = nondetMsg.probe.getInputSequenceDisplay(labels);
+			assertEquals(expectingAmbigInput, input);
+		}
+
+		// check nondet alts
+		if ( expectingNonDetAlts!=null ) {
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			assertNotNull("found no nondet alts; expecting: "+
+										str(expectingNonDetAlts), nondetMsg);
+			List nonDetAlts =
+				nondetMsg.probe.getNonDeterministicAltsForState(nondetMsg.problemState);
+			// compare nonDetAlts with expectingNonDetAlts
+			BitSet s = new BitSet();
+			s.addAll(expectingNonDetAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(nonDetAlts);
+			assertEquals("nondet alts mismatch", s, s2);
+		}
+		else {
+			// not expecting any nondet alts, make sure there are none
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			assertNull("found nondet alts, but expecting none", nondetMsg);
+		}
+
+		assertEquals(expecting, result);
+	}
+
+	protected GrammarNonDeterminismMessage getNonDeterminismMessage(List warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = (Message) warnings.get(i);
+			if ( m instanceof GrammarNonDeterminismMessage ) {
+				return (GrammarNonDeterminismMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected String str(int[] elements) {
+		StringBuffer buf = new StringBuffer();
+		for (int i = 0; i < elements.length; i++) {
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			int element = elements[i];
+			buf.append(element);
+		}
+		return buf.toString();
+	}
+}
diff --git a/src/org/antlr/test/TestSets.java b/src/org/antlr/test/TestSets.java
new file mode 100644
index 0000000..614fcc0
--- /dev/null
+++ b/src/org/antlr/test/TestSets.java
@@ -0,0 +1,260 @@
+package org.antlr.test;
+
+/** Test the set stuff in lexer and parser */
+public class TestSets extends BaseTest {
+	protected boolean debug = false;
+
+	/** Public default constructor used by TestRig */
+	public TestSets() {
+	}
+
+	public void testSeqDoesNotBecomeSet() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n" +
+			"a : C {System.out.println(input);} ;\n" +
+			"fragment A : '1' | '2';\n" +
+			"fragment B : '3' '4';\n" +
+			"C : A | B;\n";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+								  "a", "34", debug);
+		assertEquals("34\n", found);
+	}
+
+	public void testParserSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : t=('x'|'y') {System.out.println($t.text);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testParserNotSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "zz", debug);
+		assertEquals("z\n", found);
+	}
+
+	public void testParserNotToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ~'x' 'z' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "zz", debug);
+		assertEquals("zz\n", found);
+	}
+
+	public void testParserNotTokenWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : t=~'x' 'z' {System.out.println($t.text);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "zz", debug);
+		assertEquals("z\n", found);
+	}
+
+	public void testRuleAsSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a @after {System.out.println(input);} : 'a' | 'b' |'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "b", debug);
+		assertEquals("b\n", found);
+	}
+
+	public void testRuleAsSetAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'a' | 'b' |'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "b", debug);
+		assertEquals("b\n", found);
+	}
+
+	public void testNotChar() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~'b' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testOptionalSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A? 'c' {System.out.println(input);} ;\n" +
+			"A : 'b' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bc", debug);
+		assertEquals("bc\n", found);
+	}
+
+	public void testOptionalLexerSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : 'b'? 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bc", debug);
+		assertEquals("bc\n", found);
+	}
+
+	public void testStarLexerSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : 'b'* 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bbbbc", debug);
+		assertEquals("bbbbc\n", found);
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "c", debug);
+		assertEquals("c\n", found);
+	}
+
+	public void testPlusLexerSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : 'b'+ 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bbbbc", debug);
+		assertEquals("bbbbc\n", found);
+	}
+
+	public void testOptionalSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ('a'|'b')? 'c' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "ac", debug);
+		assertEquals("ac\n", found);
+	}
+
+	public void testStarSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ('a'|'b')* 'c' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	public void testPlusSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ('a'|'b')+ 'c' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	public void testLexerOptionalSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : ('a'|'b')? 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "ac", debug);
+		assertEquals("ac\n", found);
+	}
+
+	public void testLexerStarSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : ('a'|'b')* 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	public void testLexerPlusSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : ('a'|'b')+ 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	public void testNotCharSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~('b'|'c') ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testNotCharSetWithLabel() throws Exception {
+		// This doesn't work in lexer yet.
+		// Generates: h=input.LA(1); but h is defined as a Token
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : h=~('b'|'c') ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testNotCharSetWithRuleRef() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~('a'|B) ;\n" +
+			"B : 'b' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testNotCharSetWithRuleRef2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~('a'|B) ;\n" +
+			"B : 'b'|'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testNotCharSetWithRuleRef3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ('a'|B) ;\n" +
+			"fragment\n" +
+			"B : ~('a'|'c') ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	public void testNotCharSetWithRuleRef4() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ('a'|B) ;\n" +
+			"fragment\n" +
+			"B : ~('a'|C) ;\n" +
+			"fragment\n" +
+			"C : 'c'|'d' ;\n ";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestSymbolDefinitions.java b/src/org/antlr/test/TestSymbolDefinitions.java
new file mode 100644
index 0000000..6160972
--- /dev/null
+++ b/src/org/antlr/test/TestSymbolDefinitions.java
@@ -0,0 +1,892 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.*;
+
+import java.io.StringReader;
+import java.util.*;
+
+public class TestSymbolDefinitions extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestSymbolDefinitions() {
+    }
+
+	public void testParserSimpleTokens() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : A | B;\n" +
+				"b : C ;");
+		String rules = "a, b";
+		String tokenNames = "A, B, C";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testParserTokensSection() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar t;\n" +
+				"tokens {\n" +
+				"  C;\n" +
+				"  D;" +
+				"}\n"+
+				"a : A | B;\n" +
+				"b : C ;");
+		String rules = "a, b";
+		String tokenNames = "A, B, C, D";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testLexerTokensSection() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar t;\n" +
+				"tokens {\n" +
+				"  C;\n" +
+				"  D;" +
+				"}\n"+
+				"A : 'a';\n" +
+				"C : 'c' ;");
+		String rules = "A, C, Tokens";
+		String tokenNames = "A, C, D";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testTokensSectionWithAssignmentSection() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"tokens {\n" +
+				"  C='c';\n" +
+				"  D;" +
+				"}\n"+
+				"a : A | B;\n" +
+				"b : C ;");
+		String rules = "a, b";
+		String tokenNames = "A, B, C, D, 'c'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testCombinedGrammarLiterals() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : 'begin' b 'end';\n" +
+				"b : C ';' ;\n" +
+				"ID : 'a' ;\n" +
+				"FOO : 'foo' ;\n" +  // "foo" is not a token name
+				"C : 'c' ;\n");        // nor is 'c'
+		String rules = "a, b";
+		String tokenNames = "C, FOO, ID, 'begin', 'end', ';'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testLiteralInParserAndLexer() throws Exception {
+		// 'x' is token and char in lexer rule
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"a : 'x' E ; \n" +
+				"E: 'x' '0' ;\n");        // nor is 'c'
+		String literals = "['x']";
+		String foundLiterals = g.getStringLiterals().toString();
+		assertEquals(literals, foundLiterals);
+
+		String implicitLexer =
+			"lexer grammar t;\n" +
+			"\n" +
+			"T5 : 'x' ;\n" +
+			"\n" +
+			"// $ANTLR src \"<string>\" 3\n" +
+			"E: 'x' '0' ;\n";
+		assertEquals(implicitLexer, g.getLexerGrammar());
+	}
+
+	public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : 'a' ;\n" +
+				"A : 'a' ;\n");
+		String rules = "a";
+		String tokenNames = "A, 'a'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testSetDoesNotMissTokenAliases() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : 'a'|'b' ;\n" +
+				"A : 'a' ;\n" +
+				"B : 'b' ;\n");
+		String rules = "a";
+		String tokenNames = "A, 'a', B, 'b'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	public void testSimplePlusEqualLabel() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : ids+=ID ( COMMA ids+=ID )* ;\n");
+		String rule = "a";
+		String tokenLabels = "ids";
+		String ruleLabels = null;
+		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
+	}
+
+	public void testMixedPlusEqualLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"options {output=AST;}\n" +
+				"a : id+=ID ( ',' e+=expr )* ;\n" +
+				"expr : 'e';\n" +
+				"ID : 'a';\n");
+		String rule = "a";
+		String tokenLabels = "id";
+		String ruleLabels = "e";
+		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
+	}
+
+	// T E S T  L I T E R A L  E S C A P E S
+
+	public void testParserCharLiteralWithEscape() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : '\\n';\n");
+		Set literals = g.getStringLiterals();
+		// must store literals how they appear in the antlr grammar
+		assertEquals("'\\n'", literals.toArray()[0]);
+	}
+
+	public void testTokenInTokensSectionAndTokenRuleDef() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n" +
+			"tokens { B='}'; }\n"+
+			"a : A B {System.out.println(input);} ;\n"+
+			"A : 'a' ;\n" +
+			"B : '}' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+								  "a", "a}", false);
+		assertEquals("a}\n", found);
+	}
+
+	public void testTokenInTokensSectionAndTokenRuleDef2() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n" +
+			"tokens { B='}'; }\n"+
+			"a : A '}' {System.out.println(input);} ;\n"+
+			"A : 'a' ;\n" +
+			"B : '}' {/* */} ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+								  "a", "a}", false);
+		assertEquals("a}\n", found);
+	}
+
+
+	public void testRefToRuleWithNoReturnValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammarStr =
+			"grammar P;\n" +
+			"a : x=b ;\n" +
+			"b : B ;\n" +
+			"B : 'b' ;\n";
+		Grammar g = new Grammar(grammarStr);
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		StringTemplate recogST = generator.genRecognizer();
+		String code = recogST.toString();
+		assertTrue("not expecting label", code.indexOf("x=b();")<0);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	// T E S T  E R R O R S
+
+	public void testParserStringLiterals() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : 'begin' b ;\n" +
+				"b : C ;");
+		Object expectedArg = "'begin'";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testParserCharLiterals() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : '(' b ;\n" +
+				"b : C ;");
+		Object expectedArg = "'('";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testEmptyNotChar() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar foo;\n" +
+				"a : (~'x')+ ;\n");
+		g.createNFAs();
+		Object expectedArg = "'x'";
+		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testEmptyNotToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar foo;\n" +
+				"a : (~A)+ ;\n");
+		g.createNFAs();
+		Object expectedArg = "A";
+		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testEmptyNotSet() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar foo;\n" +
+				"a : (~(A|B))+ ;\n");
+		g.createNFAs();
+		Object expectedArg = null;
+		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testStringLiteralInParserTokensSection() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n" +
+				"tokens {\n" +
+				"  B='begin';\n" +
+				"}\n"+
+				"a : A B;\n" +
+				"b : C ;");
+		Object expectedArg = "'begin'";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testCharLiteralInParserTokensSection() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n" +
+				"tokens {\n" +
+				"  B='(';\n" +
+				"}\n"+
+				"a : A B;\n" +
+				"b : C ;");
+		Object expectedArg = "'('";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testCharLiteralInLexerTokensSection() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"lexer grammar t;\n" +
+				"tokens {\n" +
+				"  B='(';\n" +
+				"}\n"+
+				"ID : 'a';\n");
+		Object expectedArg = "'('";
+		int expectedMsgID = ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleRedefinition() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : A | B;\n" +
+				"a : C ;");
+
+		Object expectedArg = "a";
+		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLexerRuleRedefinition() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"ID : 'a' ;\n" +
+				"ID : 'd' ;");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testCombinedRuleRedefinition() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"x : ID ;\n" +
+				"ID : 'a' ;\n" +
+				"x : ID ID ;");
+
+		Object expectedArg = "x";
+		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testUndefinedToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"x : ID ;");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_NO_TOKEN_DEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkWarning(equeue, expectedMessage);
+	}
+
+	public void testUndefinedTokenOkInParser() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"x : ID ;");
+        assertEquals("should not be an error", 0, equeue.errors.size());
+	}
+
+	public void testUndefinedRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"x : r ;");
+
+		Object expectedArg = "r";
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLexerRuleInParser() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"X : ;");
+
+		Object expectedArg = "X";
+		int expectedMsgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testParserRuleInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"a : ;");
+
+		Object expectedArg = "a";
+		int expectedMsgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"scope a {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : \n" +
+			"  ;\n");
+
+		Object expectedArg = "a";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testTokenRuleScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"scope ID {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"ID : 'a'\n" +
+			"  ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testTokenScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"tokens { ID; }\n"+
+			"scope ID {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : \n" +
+			"  ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testTokenRuleScopeConflictInLexerGrammar() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"scope ID {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"ID : 'a'\n" +
+			"  ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testTokenLabelScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"scope s {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : s=ID \n" +
+			"  ;\n");
+
+		Object expectedArg = "s";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleLabelScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"scope s {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : s=b \n" +
+			"  ;\n" +
+			"b : ;\n");
+
+		Object expectedArg = "s";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLabelAndRuleNameConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : c=b \n" +
+			"  ;\n" +
+			"b : ;\n" +
+			"c : ;\n");
+
+		Object expectedArg = "c";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLabelAndTokenNameConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ID=b \n" +
+			"  ;\n" +
+			"b : ID ;\n" +
+			"c : ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLabelAndArgConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[int i] returns [int x]: i=ID \n" +
+			"  ;\n");
+
+		Object expectedArg = "i";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLabelAndParameterConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[int i] returns [int x]: x=ID \n" +
+			"  ;\n");
+
+		Object expectedArg = "x";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testLabelRuleScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a\n" +
+			"scope {" +
+			"  int n;" +
+			"}\n" +
+			"  : n=ID\n" +
+			"  ;\n");
+
+		Object expectedArg = "n";
+		Object expectedArg2 = "a";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleScopeArgConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[int n]\n" +
+			"scope {" +
+			"  int n;" +
+			"}\n" +
+			"  : \n" +
+			"  ;\n");
+
+		Object expectedArg = "n";
+		Object expectedArg2 = "a";
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleScopeReturnValueConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a returns [int n]\n" +
+			"scope {" +
+			"  int n;" +
+			"}\n" +
+			"  : \n" +
+			"  ;\n");
+
+		Object expectedArg = "n";
+		Object expectedArg2 = "a";
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testRuleScopeRuleNameConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a\n" +
+			"scope {" +
+			"  int a;" +
+			"}\n" +
+			"  : \n" +
+			"  ;\n");
+
+		Object expectedArg = "a";
+		Object expectedArg2 = null;
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testBadGrammarOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Tool antlr = newTool();
+		Grammar g = new Grammar(antlr,
+								"t",
+								new StringReader(
+									"grammar t;\n"+
+									"options {foo=3; language=Java;}\n" +
+									"a : 'a';\n"));
+
+		Object expectedArg = "foo";
+		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testBadRuleOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a\n"+
+				"options {k=3; tokenVocab=blort;}\n" +
+				"  : 'a';\n");
+
+		Object expectedArg = "tokenVocab";
+		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testBadSubRuleOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : ( options {k=3; language=Java;}\n" +
+				"    : 'a'\n" +
+				"    | 'b'\n" +
+				"    )\n" +
+				"  ;\n");
+		Object expectedArg = "language";
+		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	protected void checkError(ErrorQueue equeue,
+							  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+		System.out.println(equeue.infos);
+		System.out.println(equeue.warnings);
+		System.out.println(equeue.errors);
+		assertTrue("number of errors mismatch", n, equeue.errors.size());
+				   */
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = (Message)equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+	}
+
+	protected void checkWarning(ErrorQueue equeue,
+								GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.warnings.size(); i++) {
+			Message m = (Message)equeue.warnings.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+	}
+
+	protected void checkPlusEqualsLabels(Grammar g,
+										 String ruleName,
+										 String tokenLabelsStr,
+										 String ruleLabelsStr)
+		throws Exception
+	{
+		// make sure expected += labels are there
+		Rule r = g.getRule(ruleName);
+		StringTokenizer st = new StringTokenizer(tokenLabelsStr, ", ");
+		Set tokenLabels = null;
+		while ( st.hasMoreTokens() ) {
+			if ( tokenLabels==null ) {
+				tokenLabels = new HashSet();
+			}
+			String labelName = st.nextToken();
+			tokenLabels.add(labelName);
+		}
+		Set ruleLabels = null;
+		if ( ruleLabelsStr!=null ) {
+			st = new StringTokenizer(ruleLabelsStr, ", ");
+			ruleLabels = new HashSet();
+			while ( st.hasMoreTokens() ) {
+				String labelName = st.nextToken();
+				ruleLabels.add(labelName);
+			}
+		}
+		assertTrue("token += labels mismatch; "+tokenLabels+"!="+r.tokenListLabels,
+				   (tokenLabels!=null && r.tokenListLabels!=null) ||
+				   (tokenLabels==null && r.tokenListLabels==null));
+		assertTrue("rule += labels mismatch; "+ruleLabels+"!="+r.ruleListLabels,
+				   (ruleLabels!=null && r.ruleListLabels!=null) ||
+				   (ruleLabels==null && r.ruleListLabels==null));
+		if ( tokenLabels!=null ) {
+			assertEquals(tokenLabels, r.tokenListLabels.keySet());
+		}
+		if ( ruleLabels!=null ) {
+			assertEquals(ruleLabels, r.ruleListLabels.keySet());
+		}
+	}
+
+	protected void checkSymbols(Grammar g,
+								String rulesStr,
+								String tokensStr)
+		throws Exception
+	{
+		Set tokens = g.getTokenDisplayNames();
+
+		// make sure expected tokens are there
+		StringTokenizer st = new StringTokenizer(tokensStr, ", ");
+		while ( st.hasMoreTokens() ) {
+			String tokenName = st.nextToken();
+			assertTrue("token "+tokenName+" expected",
+					   g.getTokenType(tokenName)!=Label.INVALID);
+			tokens.remove(tokenName);
+		}
+		// make sure there are not any others (other than <EOF> etc...)
+        for (Iterator iter = tokens.iterator(); iter.hasNext();) {
+			String tokenName = (String) iter.next();
+			assertTrue("unexpected token name "+tokenName,
+					    g.getTokenType(tokenName)<Label.MIN_TOKEN_TYPE);
+		}
+
+		// make sure all expected rules are there
+		st = new StringTokenizer(rulesStr, ", ");
+		int n = 0;
+		while ( st.hasMoreTokens() ) {
+			String ruleName = st.nextToken();
+			assertNotNull("rule "+ruleName+" expected", g.getRule(ruleName));
+			n++;
+		}
+		Collection rules = g.getRules();
+		//System.out.println("rules="+rules);
+		// make sure there are no extra rules
+		assertEquals("number of rules mismatch; expecting "+n+"; found "+rules.size(), n, rules.size());
+
+	}
+
+}
diff --git a/src/org/antlr/test/TestSyntacticPredicateEvaluation.java b/src/org/antlr/test/TestSyntacticPredicateEvaluation.java
new file mode 100644
index 0000000..2944974
--- /dev/null
+++ b/src/org/antlr/test/TestSyntacticPredicateEvaluation.java
@@ -0,0 +1,414 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+public class TestSyntacticPredicateEvaluation extends BaseTest {
+	public void testTwoPredsWithNakedAlt() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : (a ';')+ ;\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : (b '.')=> b '.' {System.out.println(\"alt 1\");}\n" +
+			"  | (b)=> b {System.out.println(\"alt 2\");}\n" +
+			"  | c       {System.out.println(\"alt 3\");}\n" +
+			"  ;\n" +
+			"b\n" +
+			"@init {System.out.println(\"enter b\");}\n" +
+			"   : '(' 'x' ')' ;\n" +
+			"c\n" +
+			"@init {System.out.println(\"enter c\");}\n" +
+			"   : '(' c ')' | 'x' ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "(x) ;", false);
+		String expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"enter b\n" +
+			"alt 2\n";
+		assertEquals(expecting, found);
+
+		found = execParser("t.g", grammar, "tParser", "tLexer",
+			    "a", "(x). ;", false);
+		expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"alt 1\n";
+		assertEquals(expecting, found);
+
+		found = execParser("t.g", grammar, "tParser", "tLexer",
+			    "a", "((x)) ;", false);
+		expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"alt 3\n";
+		assertEquals(expecting, found);
+	}
+
+	public void testTwoPredsWithNakedAltNotLast() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : (a ';')+ ;\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : (b '.')=> b '.' {System.out.println(\"alt 1\");}\n" +
+			"  | c       {System.out.println(\"alt 2\");}\n" +
+			"  | (b)=> b {System.out.println(\"alt 3\");}\n" +
+			"  ;\n" +
+			"b\n" +
+			"@init {System.out.println(\"enter b\");}\n" +
+			"   : '(' 'x' ')' ;\n" +
+			"c\n" +
+			"@init {System.out.println(\"enter c\");}\n" +
+			"   : '(' c ')' | 'x' ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "(x) ;", false);
+		String expecting =
+			"enter b\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"alt 2\n";
+		assertEquals(expecting, found);
+
+		found = execParser("t.g", grammar, "tParser", "tLexer",
+			    "a", "(x). ;", false);
+		expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"alt 1\n";
+		assertEquals(expecting, found);
+
+		found = execParser("t.g", grammar, "tParser", "tLexer",
+			    "a", "((x)) ;", false);
+		expecting =
+			"enter b\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"alt 2\n";
+		assertEquals(expecting, found);
+	}
+
+	public void testLexerPred() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : A ;\n" +
+			"A options {k=1;}\n" + // force backtracking
+			"  : (B '.')=>B '.' {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}" +
+			"  ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "s", "xxx", false);
+
+		assertEquals("alt2\n", found);
+
+		found = execParser("t.g", grammar, "tParser", "tLexer",
+			    "s", "xxx.", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+	public void testLexerWithPredLongerThanAlt() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : A ;\n" +
+			"A options {k=1;}\n" + // force backtracking
+			"  : (B '.')=>B {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}" +
+			"  ;\n" +
+			"D : '.' {System.out.println(\"D\");} ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "s", "xxx", false);
+
+		assertEquals("alt2\n", found);
+
+		found = execParser("t.g", grammar, "tParser", "tLexer",
+			    "s", "xxx.", false);
+
+		assertEquals("alt1\nD\n", found);
+	}
+
+	public void testLexerPredCyclicPrediction() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : A ;\n" +
+			"A : (B)=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}\n" +
+			"  | 'y'+ ';'" +
+			"  ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "s", "xxx", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+	public void testLexerPredCyclicPrediction2() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : A ;\n" +
+			"A : (B '.')=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}\n" +
+			"  | 'y'+ ';'" +
+			"  ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "s", "xxx", false);
+		assertEquals("alt2\n", found);
+	}
+
+	public void testSimpleNestedPred() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : (expr ';')+ ;\n" +
+			"expr\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"@init {System.out.println(\"enter expr \"+input.LT(1).getText());}\n" +
+			"  : (atom 'x') => atom 'x'\n" +
+			"  | atom\n" +
+			";\n" +
+			"atom\n" +
+			"@init {System.out.println(\"enter atom \"+input.LT(1).getText());}\n" +
+			"   : '(' expr ')'\n" +
+			"   | INT\n" +
+			"   ;\n" +
+			"INT: '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "s", "(34)x;", false);
+		String expecting =
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr 34\n" +
+			"enter atom 34\n" +
+			"enter atom 34\n" +
+			"enter atom (\n" +
+			"enter expr 34\n" +
+			"enter atom 34\n" +
+			"enter atom 34\n";
+		assertEquals(expecting, found);
+	}
+
+	public void testTripleNestedPredInLexer() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+			"s : (.)+ {System.out.println(\"done\");} ;\n" +
+			"EXPR\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"@init {System.out.println(\"enter expr \"+(char)input.LT(1));}\n" +
+			"  : (ATOM 'x') => ATOM 'x' {System.out.println(\"ATOM x\");}\n" +
+			"  | ATOM {System.out.println(\"ATOM \"+$ATOM.text);}\n" +
+			";\n" +
+			"fragment ATOM\n" +
+			"@init {System.out.println(\"enter atom \"+(char)input.LT(1));}\n" +
+			"   : '(' EXPR ')'\n" +
+			"   | INT\n" +
+			"   ;\n" +
+			"fragment INT: '0'..'9'+ ;\n" +
+			"fragment WS : (' '|'\\n')+ \n" +
+			"   ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "s", "((34)x)x", false);
+		String expecting = // has no memoization
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"enter atom (\n" +
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"ATOM 34\n" +
+			"ATOM x\n" +
+			"ATOM x\n" +
+			"done\n";
+		assertEquals(expecting, found);
+	}
+
+	public void testTreeParserWithSynPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT+ (PERIOD|SEMI);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"PERIOD : '.' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n" +
+			"options {k=1; backtrack=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID INT+ PERIOD {System.out.print(\"alt 1\");}"+
+			"  | ID INT+ SEMI   {System.out.print(\"alt 2\");}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3;");
+		assertEquals("alt 2\n", found);
+	}
+
+	public void testTreeParserWithNestedSynPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT+ (PERIOD|SEMI);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"PERIOD : '.' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		// backtracks in a and b due to k=1
+		String treeGrammar =
+			"tree grammar TP;\n" +
+			"options {k=1; backtrack=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID b {System.out.print(\" a:alt 1\");}"+
+			"  | ID INT+ SEMI   {System.out.print(\" a:alt 2\");}\n" +
+			"  ;\n" +
+			"b : INT PERIOD  {System.out.print(\"b:alt 1\");}" + // choose this alt for just one INT
+			"  | INT+ PERIOD {System.out.print(\"b:alt 2\");}" +
+			"  ;";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3.");
+		assertEquals("b:alt 2 a:alt 1\n", found);
+	}
+
+	public void testSynPredWithOutputTemplate() throws Exception {
+		// really just seeing if it will compile
+		String grammar =
+			"grammar t;\n" +
+			"options {output=template;}\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : ('x'+ 'y')=> 'x'+ 'y' -> template(a={$text}) <<1:<a>;>>\n" +
+			"  | 'x'+ 'z' -> template(a={$text}) <<2:<a>;>>\n"+
+			"  ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "xxxy", false);
+
+		assertEquals("1:xxxy;\n", found);
+	}
+
+	public void testSynPredWithOutputAST() throws Exception {
+		// really just seeing if it will compile
+		String grammar =
+			"grammar t;\n" +
+			"options {output=AST;}\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : ('x'+ 'y')=> 'x'+ 'y'\n" +
+			"  | 'x'+ 'z'\n"+
+			"  ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "xxxy", false);
+
+		assertEquals("x x x y\n", found);
+	}
+
+	public void testOptionalBlockWithSynPred() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+				"\n" +
+				"a : ( (b)=> b {System.out.println(\"b\");})? b ;\n" +
+				"b : 'x' ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "xx", false);
+
+		assertEquals("b\n", found);
+	}
+
+	public void testSynPredK2() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+				"\n" +
+				"a : (b)=> b {System.out.println(\"alt1\");} | 'a' 'c' ;\n" +
+				"b : 'a' 'b' ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "ab", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+	public void testSynPredKStar() throws Exception {
+		String grammar =
+			"grammar t;\n" +
+				"\n" +
+				"a : (b)=> b {System.out.println(\"alt1\");} | 'a'+ 'c' ;\n" +
+				"b : 'a'+ 'b' ;\n" ;
+		String found = execParser("t.g", grammar, "tParser", "tLexer",
+				    "a", "aaab", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestTemplates.java b/src/org/antlr/test/TestTemplates.java
new file mode 100644
index 0000000..be4500e
--- /dev/null
+++ b/src/org/antlr/test/TestTemplates.java
@@ -0,0 +1,343 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.tool.*;
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.codegen.ActionTranslatorLexer;
+
+/** Test templates in actions; %... shorthands */
+public class TestTemplates extends BaseTest {
+	private static final String LINE_SEP = System.getProperty("line.separator");
+
+	public void testTemplateConstructor() throws Exception {
+		String action = "x = %foo(name={$ID.text});";
+		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
+			LINE_SEP + "  new STAttrMap().put(\"name\", ID1.getText()));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+										"a",
+										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	public void testTemplateConstructorNoArgs() throws Exception {
+		String action = "x = %foo();";
+		String expecting = "x = templateLib.getInstanceOf(\"foo\");";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+										"a",
+										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	public void testIndirectTemplateConstructor() throws Exception {
+		String action = "x = %({\"foo\"})(name={$ID.text});";
+		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
+			LINE_SEP + "  new STAttrMap().put(\"name\", ID1.getText()));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+										"a",
+										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	public void testStringConstructor() throws Exception {
+		String action = "x = %{$ID.text};";
+		String expecting = "x = new StringTemplate(templateLib,ID1.getText());";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	public void testSetAttr() throws Exception {
+		String action = "%x.y = z;";
+		String expecting = "(x).setAttribute(\"y\", z);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator =
+			new ActionTranslatorLexer(generator,
+										"a",
+										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	public void testSetAttrOfExpr() throws Exception {
+		String action = "%{foo($ID.text).getST()}.y = z;";
+		String expecting = "(foo(ID1.getText()).getST()).setAttribute(\"y\", z);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+																	 "a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	public void testCannotHaveSpaceBeforeDot() throws Exception {
+		String action = "%x .y = z;";
+		String expecting = null;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_TEMPLATE_ACTION;
+		Object expectedArg = "%x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	public void testCannotHaveSpaceAfterDot() throws Exception {
+		String action = "%x. y = z;";
+		String expecting = null;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_TEMPLATE_ACTION;
+		Object expectedArg = "%x.";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	protected void checkError(ErrorQueue equeue,
+							  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+		System.out.println(equeue.infos);
+		System.out.println(equeue.warnings);
+		System.out.println(equeue.errors);
+		*/
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = (Message)equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
+		assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
+		assertTrue("couldn't find expected error: "+expectedMessage.msgID, foundMsg!=null);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		assertEquals(expectedMessage.arg2, foundMsg.arg2);
+	}
+
+	// S U P P O R T
+	private void assertNoErrors(ErrorQueue equeue) {
+		assertTrue("unexpected errors: "+equeue, equeue.errors.size()==0);
+	}
+}
\ No newline at end of file
diff --git a/src/org/antlr/test/TestTokenRewriteStream.java b/src/org/antlr/test/TestTokenRewriteStream.java
new file mode 100644
index 0000000..18d1190
--- /dev/null
+++ b/src/org/antlr/test/TestTokenRewriteStream.java
@@ -0,0 +1,462 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.TokenRewriteStream;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+
+public class TestTokenRewriteStream extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestTokenRewriteStream() {
+    }
+
+	public void testInsertBeforeIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "0");
+		String result = tokens.toString();
+		String expecting = "0abc";
+		assertEquals(result, expecting);
+	}
+
+	public void testInsertAfterLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertAfter(2, "x");
+		String result = tokens.toString();
+		String expecting = "abcx";
+		assertEquals(result, expecting);
+	}
+
+	public void test2InsertBeforeAfterMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.insertAfter(1, "x");
+		String result = tokens.toString();
+		String expecting = "axbxc";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, "x");
+		String result = tokens.toString();
+		String expecting = "xbc";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, "x");
+		String result = tokens.toString();
+		String expecting = "abx";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, "x");
+		String result = tokens.toString();
+		String expecting = "axc";
+		assertEquals(result, expecting);
+	}
+
+	public void test2ReplaceMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, "x");
+		tokens.replace(1, "y");
+		String result = tokens.toString();
+		String expecting = "ayc";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThenDeleteMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, "x");
+		tokens.delete(1);
+		String result = tokens.toString();
+		String expecting = "ac";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThenInsertSameIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, "x");
+		tokens.insertBefore(0, "0");
+		String result = tokens.toString();
+		String expecting = "0xbc";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThen2InsertSameIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, "x");
+		tokens.insertBefore(0, "y");
+		tokens.insertBefore(0, "z");
+		String result = tokens.toString();
+		String expecting = "zyxbc";
+		assertEquals(result, expecting);
+	}
+
+	public void testInsertThenReplaceSameIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "0");
+		tokens.replace(0, "x");
+		String result = tokens.toString();
+		String expecting = "0xbc";
+		assertEquals(result, expecting);
+	}
+
+	public void test2InsertMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(1, "y");
+		String result = tokens.toString();
+		String expecting = "ayxbc";
+		assertEquals(result, expecting);
+	}
+
+	public void test2InsertThenReplaceIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "x");
+		tokens.insertBefore(0, "y");
+		tokens.replace(0, "z");
+		String result = tokens.toString();
+		String expecting = "yxzbc";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThenInsertBeforeLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, "x");
+		tokens.insertBefore(2, "y");
+		String result = tokens.toString();
+		String expecting = "abyx";
+		assertEquals(result, expecting);
+	}
+
+	public void testInsertThenReplaceLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(2, "y");
+		tokens.replace(2, "x");
+		String result = tokens.toString();
+		String expecting = "abyx";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThenInsertAfterLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, "x");
+		tokens.insertAfter(2, "y");
+		String result = tokens.toString();
+		String expecting = "abxy";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceRangeThenInsertInMiddle() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(3, "y"); // no effect; can't insert in middle of replaced region
+		String result = tokens.toString();
+		String expecting = "abxba";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(2, "y");
+		String result = tokens.toString();
+		String expecting = "abyxba";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(4, "y"); // no effect; within range of a replace
+		String result = tokens.toString();
+		String expecting = "abxba";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertAfter(4, "y");
+		String result = tokens.toString();
+		String expecting = "abxyba";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceAll() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, 6, "x");
+		String result = tokens.toString();
+		String expecting = "x";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceSubsetThenFetch() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "xyz");
+		String result = tokens.toString(0,6);
+		String expecting = "abxyzba";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThenReplaceSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "xyz");
+		tokens.replace(2, 5, "foo"); // kills previous replace
+		String result = tokens.toString();
+		String expecting = "abfooa";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "xyz");
+		tokens.replace(1, 3, "foo"); // executes first since 1<2; then ignores replace at 2 as it skips over 1..3
+		String result = tokens.toString();
+		String expecting = "afoocba";
+		assertEquals(result, expecting);
+	}
+
+	public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 2, "xyz");
+		tokens.replace(0, 3, "foo");
+		String result = tokens.toString();
+		String expecting = "fooa";
+		assertEquals(result, expecting);
+	}
+
+}
diff --git a/src/org/antlr/test/TestTreeNodeStream.java b/src/org/antlr/test/TestTreeNodeStream.java
new file mode 100644
index 0000000..4e48135
--- /dev/null
+++ b/src/org/antlr/test/TestTreeNodeStream.java
@@ -0,0 +1,339 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.*;
+
+/** Test the tree node stream. */
+public class TestTreeNodeStream extends BaseTest {
+
+	/** Build new stream; let's us override to test other streams. */
+	public TreeNodeStream newStream(Object t) {
+		return new CommonTreeNodeStream(t);
+	}
+
+	public void testSingleNode() throws Exception {
+		Tree t = new CommonTree(new CommonToken(101));
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = " 101";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101";
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void test4Nodes() throws Exception {
+		// ^(101 ^(102 103) 104)
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		t.addChild(new CommonTree(new CommonToken(104)));
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = " 101 102 103 104";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 2 102 2 103 3 104 3";
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testList() throws Exception {
+		Tree root = new CommonTree((Token)null);
+
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		t.addChild(new CommonTree(new CommonToken(104)));
+
+		Tree u = new CommonTree(new CommonToken(105));
+
+		root.addChild(t);
+		root.addChild(u);
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(root);
+		String expecting = " 101 102 103 104 105";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 2 102 2 103 3 104 3 105";
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testFlatList() throws Exception {
+		Tree root = new CommonTree((Token)null);
+
+		root.addChild(new CommonTree(new CommonToken(101)));
+		root.addChild(new CommonTree(new CommonToken(102)));
+		root.addChild(new CommonTree(new CommonToken(103)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(root);
+		String expecting = " 101 102 103";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 102 103";
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testListWithOneNode() throws Exception {
+		Tree root = new CommonTree((Token)null);
+
+		root.addChild(new CommonTree(new CommonToken(101)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(root);
+		String expecting = " 101";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101";
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testAoverB() throws Exception {
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = " 101 102";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 2 102 3";
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	public void testLT() throws Exception {
+		// ^(101 ^(102 103) 104)
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		t.addChild(new CommonTree(new CommonToken(104)));
+
+		TreeNodeStream stream = newStream(t);
+		assertEquals(101, ((Tree)stream.LT(1)).getType());
+		assertEquals(Token.DOWN, ((Tree)stream.LT(2)).getType());
+		assertEquals(102, ((Tree)stream.LT(3)).getType());
+		assertEquals(Token.DOWN, ((Tree)stream.LT(4)).getType());
+		assertEquals(103, ((Tree)stream.LT(5)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(6)).getType());
+		assertEquals(104, ((Tree)stream.LT(7)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(8)).getType());
+		assertEquals(Token.EOF, ((Tree)stream.LT(9)).getType());
+		// check way ahead
+		assertEquals(Token.EOF, ((Tree)stream.LT(100)).getType());
+	}
+
+	public void testMarkRewindEntire() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		int m = stream.mark(); // MARK
+		for (int k=1; k<=13; k++) { // consume til end
+			stream.LT(1);
+			stream.consume();
+		}
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
+		stream.rewind(m);      // REWIND
+
+		// consume til end again :)
+		for (int k=1; k<=13; k++) { // consume til end
+			stream.LT(1);
+			stream.consume();
+		}
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
+	}
+
+	public void testMarkRewindInMiddle() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		for (int k=1; k<=7; k++) { // consume til middle
+			//System.out.println(((Tree)stream.LT(1)).getType());
+			stream.consume();
+		}
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		int m = stream.mark(); // MARK
+		stream.consume(); // consume 107
+		stream.consume(); // consume UP
+		stream.consume(); // consume UP
+		stream.consume(); // consume 104
+		stream.rewind(m);      // REWIND
+
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		// now we're past rewind position
+		assertEquals(105, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
+	}
+
+	public void testMarkRewindNested() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		int m = stream.mark(); // MARK at start
+		stream.consume(); // consume 101
+		stream.consume(); // consume DN
+		int m2 = stream.mark(); // MARK on 102
+		stream.consume(); // consume 102
+		stream.consume(); // consume DN
+		stream.consume(); // consume 103
+		stream.consume(); // consume 106
+		stream.rewind(m2);      // REWIND to 102
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		// stop at 103 and rewind to start
+		stream.rewind(m); // REWIND to 101
+		assertEquals(101, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+	}
+
+	public void testSeek() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		stream.consume(); // consume 101
+		stream.consume(); // consume DN
+		stream.consume(); // consume 102
+		stream.seek(7);   // seek to 107
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 107
+		stream.consume(); // consume UP
+		stream.consume(); // consume UP
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+	}
+
+	public void testSeekFromStart() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		stream.seek(7);   // seek to 107
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 107
+		stream.consume(); // consume UP
+		stream.consume(); // consume UP
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+	}
+
+	public String toNodesOnlyString(TreeNodeStream nodes) {
+		StringBuffer buf = new StringBuffer();
+		for (int i=0; i<nodes.size(); i++) {
+			Object t = nodes.LT(i+1);
+			int type = nodes.getTreeAdaptor().getType(t);
+			if ( !(type==Token.DOWN||type==Token.UP) ) {
+				buf.append(" ");
+				buf.append(type);
+			}
+		}
+		return buf.toString();
+	}
+}
diff --git a/src/org/antlr/test/TestTreeParsing.java b/src/org/antlr/test/TestTreeParsing.java
new file mode 100644
index 0000000..e8d90a1
--- /dev/null
+++ b/src/org/antlr/test/TestTreeParsing.java
@@ -0,0 +1,245 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+public class TestTreeParsing extends BaseTest {
+	public void testFlatList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ID INT\n" +
+			"    {System.out.println($ID+\", \"+$INT);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc, 34\n", found);
+	}
+
+	public void testSimpleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT)\n" +
+			"    {System.out.println($ID+\", \"+$INT);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc, 34\n", found);
+	}
+
+	public void testFlatVsTreeDecision() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID INT -> ^(ID INT);\n" +
+			"c : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : b b ;\n" +
+			"b : ID INT    {System.out.print($ID+\" \"+$INT);}\n" +
+			"  | ^(ID INT) {System.out.print(\"^(\"+$ID+\" \"+$INT+')');}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 b 2");
+		assertEquals("^(a 1)b 2\n", found);
+	}
+
+	public void testFlatVsTreeDecision2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID INT+ -> ^(ID INT+);\n" +
+			"c : ID INT+;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : b b ;\n" +
+			"b : ID INT+    {System.out.print($ID+\" \"+$INT);}\n" +
+			"  | ^(x=ID (y=INT)+) {System.out.print(\"^(\"+$x+' '+$y+')');}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a",
+				    "a 1 2 3 b 4 5");
+		assertEquals("^(a 3)b 5\n", found);
+	}
+
+	public void testCyclicDFALookahead() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT+ PERIOD;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"PERIOD : '.' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ID INT+ PERIOD {System.out.print(\"alt 1\");}"+
+			"  | ID INT+ SEMI   {System.out.print(\"alt 2\");}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3.");
+		assertEquals("alt 1\n", found);
+	}
+
+	public void testTemplateOutput() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n" +
+			"options {output=template; ASTLabelType=CommonTree;}\n" +
+			"s : a {System.out.println($a.st);};\n" +
+			"a : ID INT -> {new StringTemplate($INT.text)}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34\n", found);
+	}
+
+	public void testNullableChildList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT? -> ^(ID INT?);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT?)\n" +
+			"    {System.out.println($ID);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("abc\n", found);
+	}
+
+	public void testNullableChildList2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT? SEMI -> ^(ID INT?) SEMI ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT?) SEMI\n" +
+			"    {System.out.println($ID);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
+		assertEquals("abc\n", found);
+	}
+
+	public void testNullableChildList3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT? b) SEMI\n" +
+			"    {System.out.println($ID+\", \"+$b.text);}\n" +
+			"  ;\n"+
+			"b : ID? ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc def;");
+		assertEquals("abc, def\n", found);
+	}
+
+	public void testActionsAfterRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT? SEMI -> ^($x INT?) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a @init {int x=0;} : ^(ID {x=1;} {x=2;} INT?)\n" +
+			"    {System.out.println($ID+\", \"+x);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
+		assertEquals("abc, 2\n", found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestTreeWizard.java b/src/org/antlr/test/TestTreeWizard.java
new file mode 100644
index 0000000..1113fd5
--- /dev/null
+++ b/src/org/antlr/test/TestTreeWizard.java
@@ -0,0 +1,388 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.tree.*;
+
+import java.util.Map;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.HashMap;
+
+public class TestTreeWizard extends BaseTest {
+	protected static final String[] tokens =
+		new String[] {"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"};
+	protected static final TreeAdaptor adaptor = new CommonTreeAdaptor();
+
+	public void testSingleNode() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("ID");
+		String found = t.toStringTree();
+		String expecting = "ID";
+		assertEquals(expecting, found);
+	}
+
+	public void testSingleNodeWithArg() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("ID[foo]");
+		String found = t.toStringTree();
+		String expecting = "foo";
+		assertEquals(expecting, found);
+	}
+
+	public void testSingleNodeTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A)");
+		String found = t.toStringTree();
+		String expecting = "A";
+		assertEquals(expecting, found);
+	}
+
+	public void testSingleLevelTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C D)");
+		String found = t.toStringTree();
+		String expecting = "(A B C D)";
+		assertEquals(expecting, found);
+	}
+
+	public void testListTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
+		String found = t.toStringTree();
+		String expecting = "A B C";
+		assertEquals(expecting, found);
+	}
+
+	public void testInvalidListTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("A B C");
+		assertTrue(t==null);
+	}
+
+	public void testDoubleLevelTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A (B C) (B D) E)");
+		String found = t.toStringTree();
+		String expecting = "(A (B C) (B D) E)";
+		assertEquals(expecting, found);
+	}
+
+	public void testSingleNodeIndex() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("ID");
+		Map m = wiz.index(t);
+		String found = m.toString();
+		String expecting = "{10=[ID]}";
+		assertEquals(expecting, found);
+	}
+
+	public void testNoRepeatsIndex() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C D)");
+		Map m = wiz.index(t);
+		String found = m.toString();
+		String expecting = "{8=[D], 6=[B], 7=[C], 5=[A]}";
+		assertEquals(expecting, found);
+	}
+
+	public void testRepeatsIndex() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		Map m = wiz.index(t);
+		String found = m.toString();
+		String expecting = "{8=[D, D], 6=[B, B, B], 7=[C], 5=[A, A]}";
+		assertEquals(expecting, found);
+	}
+
+	public void testNoRepeatsVisit() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, wiz.getTokenType("B"), new TreeWizard.Visitor() {
+			public void visit(Object t) {
+				elements.add(t);
+			}
+		});
+		String found = elements.toString();
+		String expecting = "[B]";
+		assertEquals(expecting, found);
+	}
+
+	public void testNoRepeatsVisit2() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, wiz.getTokenType("C"),
+					   new TreeWizard.Visitor() {
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[C]";
+		assertEquals(expecting, found);
+	}
+
+	public void testRepeatsVisit() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, wiz.getTokenType("B"),
+					   new TreeWizard.Visitor() {
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[B, B, B]";
+		assertEquals(expecting, found);
+	}
+
+	public void testRepeatsVisit2() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, wiz.getTokenType("A"),
+					   new TreeWizard.Visitor() {
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[A, A]";
+		assertEquals(expecting, found);
+	}
+
+	public void testRepeatsVisitWithContext() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, wiz.getTokenType("B"),
+		   new TreeWizard.ContextVisitor() {
+			   public void visit(Object t, Object parent, int childIndex, Map labels) {
+				   elements.add(adaptor.getText(t)+"@"+
+								(parent!=null?adaptor.getText(parent):"nil")+
+								"["+childIndex+"]");
+			   }
+		   });
+		String found = elements.toString();
+		String expecting = "[B at A[0], B at A[1], B at A[2]]";
+		assertEquals(expecting, found);
+	}
+
+	public void testRepeatsVisitWithNullParentAndContext() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, wiz.getTokenType("A"),
+		   new TreeWizard.ContextVisitor() {
+			   public void visit(Object t, Object parent, int childIndex, Map labels) {
+				   elements.add(adaptor.getText(t)+"@"+
+								(parent!=null?adaptor.getText(parent):"nil")+
+								"["+childIndex+"]");
+			   }
+		   });
+		String found = elements.toString();
+		String expecting = "[A at nil[0], A at A[1]]";
+		assertEquals(expecting, found);
+	}
+
+	public void testVisitPattern() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A B) D)");
+		final List elements = new ArrayList();
+		wiz.visit(t, "(A B)",
+					   new TreeWizard.Visitor() {
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[A]"; // shouldn't match overall root, just (A B)
+		assertEquals(expecting, found);
+	}
+
+	public void testVisitPatternMultiple() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A B) (D (A B)))");
+		final List elements = new ArrayList();
+		wiz.visit(t, "(A B)",
+					   new TreeWizard.ContextVisitor() {
+						   public void visit(Object t, Object parent, int childIndex, Map labels) {
+							   elements.add(adaptor.getText(t)+"@"+
+											(parent!=null?adaptor.getText(parent):"nil")+
+											"["+childIndex+"]");
+						   }
+					   });
+		String found = elements.toString();
+		String expecting = "[A at A[2], A at D[0]]"; // shouldn't match overall root, just (A B)
+		assertEquals(expecting, found);
+	}
+
+	public void testVisitPatternMultipleWithLabels() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
+		final List elements = new ArrayList();
+		wiz.visit(t, "(%a:A %b:B)",
+					   new TreeWizard.ContextVisitor() {
+						   public void visit(Object t, Object parent, int childIndex, Map labels) {
+							   elements.add(adaptor.getText(t)+"@"+
+											(parent!=null?adaptor.getText(parent):"nil")+
+											"["+childIndex+"]"+labels.get("a")+"&"+labels.get("b"));
+						   }
+					   });
+		String found = elements.toString();
+		String expecting = "[foo at A[2]foo&bar, big at D[0]big&dog]";
+		assertEquals(expecting, found);
+	}
+
+	public void testParse() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		boolean valid = wiz.parse(t, "(A B C)");
+		assertTrue(valid);
+	}
+
+	public void testParseSingleNode() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("A");
+		boolean valid = wiz.parse(t, "A");
+		assertTrue(valid);
+	}
+
+	public void testParseFlatTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
+		boolean valid = wiz.parse(t, "(nil A B C)");
+		assertTrue(valid);
+	}
+
+	public void testWildcard() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		boolean valid = wiz.parse(t, "(A . .)");
+		assertTrue(valid);
+	}
+
+	public void testParseWithText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B[foo] C[bar])");
+		// C pattern has no text arg so despite [bar] in t, no need
+		// to match text--check structure only.
+		boolean valid = wiz.parse(t, "(A B[foo] C)");
+		assertTrue(valid);
+	}
+
+	public void testParseWithTextFails() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		boolean valid = wiz.parse(t, "(A[foo] B C)");
+		assertTrue(!valid); // fails
+	}
+
+	public void testParseLabels() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		Map labels = new HashMap();
+		boolean valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels);
+		assertTrue(valid);
+		assertEquals("A", labels.get("a").toString());
+		assertEquals("B", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+	}
+
+	public void testParseWithWildcardLabels() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		Map labels = new HashMap();
+		boolean valid = wiz.parse(t, "(A %b:. %c:.)", labels);
+		assertTrue(valid);
+		assertEquals("B", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+	}
+
+	public void testParseLabelsAndTestText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B[foo] C)");
+		Map labels = new HashMap();
+		boolean valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels);
+		assertTrue(valid);
+		assertEquals("A", labels.get("a").toString());
+		assertEquals("foo", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+	}
+
+	public void testParseLabelsInNestedTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A (B C) (D E))");
+		Map labels = new HashMap();
+		boolean valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels);
+		assertTrue(valid);
+		assertEquals("A", labels.get("a").toString());
+		assertEquals("B", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+		assertEquals("D", labels.get("d").toString());
+		assertEquals("E", labels.get("e").toString());
+	}
+
+	public void testEquals() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t1 = (CommonTree)wiz.create("(A B C)");
+		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
+		boolean same = TreeWizard.equals(t1, t2, adaptor);
+		assertTrue(same);
+	}
+
+	public void testEqualsWithText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
+		CommonTree t2 = (CommonTree)wiz.create("(A B[foo] C)");
+		boolean same = TreeWizard.equals(t1, t2, adaptor);
+		assertTrue(same);
+	}
+	
+	public void testEqualsWithMismatchedText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
+		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
+		boolean same = TreeWizard.equals(t1, t2, adaptor);
+		assertTrue(!same);
+	}
+
+	public void testFindPattern() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
+		final List subtrees = wiz.find(t, "(A B)");
+		List elements = subtrees;
+		String found = elements.toString();
+		String expecting = "[foo, big]";
+		assertEquals(expecting, found);
+	}
+	
+}
\ No newline at end of file
diff --git a/src/org/antlr/test/TestUnBufferedTreeNodeStream.java b/src/org/antlr/test/TestUnBufferedTreeNodeStream.java
new file mode 100644
index 0000000..8baaeee
--- /dev/null
+++ b/src/org/antlr/test/TestUnBufferedTreeNodeStream.java
@@ -0,0 +1,111 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.tree.*;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+
+/**
+ * Created by IntelliJ IDEA.
+ * User: parrt
+ * Date: Dec 22, 2006
+ * Time: 11:47:55 AM
+ * To change this template use File | Settings | File Templates.
+ */
+public class TestUnBufferedTreeNodeStream extends TestTreeNodeStream {
+
+	public TreeNodeStream newStream(Object t) {
+		return new UnBufferedTreeNodeStream(t);
+	}
+
+	public void testBufferOverflow() throws Exception {
+		StringBuffer buf = new StringBuffer();
+		StringBuffer buf2 = new StringBuffer();
+		// make ^(101 102 ... n)
+		Tree t = new CommonTree(new CommonToken(101));
+		buf.append(" 101");
+		buf2.append(" 101");
+		buf2.append(" ");
+		buf2.append(Token.DOWN);
+		for (int i=0; i<= UnBufferedTreeNodeStream.INITIAL_LOOKAHEAD_BUFFER_SIZE+10; i++) {
+			t.addChild(new CommonTree(new CommonToken(102+i)));
+			buf.append(" ");
+			buf.append(102+i);
+			buf2.append(" ");
+			buf2.append(102+i);
+		}
+		buf2.append(" ");
+		buf2.append(Token.UP);
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = buf.toString();
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = buf2.toString();
+		found = stream.toString();
+		assertEquals(expecting, found);
+	}
+
+	/** Test what happens when tail hits the end of the buffer, but there
+	 *  is more room left.  Specifically that would mean that head is not
+	 *  at 0 but has advanced somewhere to the middle of the lookahead
+	 *  buffer.
+	 *
+	 *  Use consume() to advance N nodes into lookahead.  Then use LT()
+	 *  to load at least INITIAL_LOOKAHEAD_BUFFER_SIZE-N nodes so the
+	 *  buffer has to wrap.
+	 */
+	public void testBufferWrap() throws Exception {
+		int N = 10;
+		// make tree with types: 1 2 ... INITIAL_LOOKAHEAD_BUFFER_SIZE+N
+		Tree t = new CommonTree((Token)null);
+		for (int i=0; i<UnBufferedTreeNodeStream.INITIAL_LOOKAHEAD_BUFFER_SIZE+N; i++) {
+			t.addChild(new CommonTree(new CommonToken(i+1)));
+		}
+
+		// move head to index N
+		TreeNodeStream stream = newStream(t);
+		for (int i=1; i<=N; i++) { // consume N
+			Tree node = (Tree)stream.LT(1);
+			assertEquals(i, node.getType());
+			stream.consume();
+		}
+
+		// now use LT to lookahead past end of buffer
+		int remaining = UnBufferedTreeNodeStream.INITIAL_LOOKAHEAD_BUFFER_SIZE-N;
+		int wrapBy = 4; // wrap around by 4 nodes
+		assertTrue("bad test code; wrapBy must be less than N", wrapBy<N);
+		for (int i=1; i<=remaining+wrapBy; i++) { // wrap past end of buffer
+			Tree node = (Tree)stream.LT(i); // look ahead to ith token
+			assertEquals(N + i, node.getType());
+		}
+	}
+
+}
diff --git a/src/org/antlr/tool/ANTLRErrorListener.java b/src/org/antlr/tool/ANTLRErrorListener.java
new file mode 100644
index 0000000..32237a3
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRErrorListener.java
@@ -0,0 +1,42 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+/** Defines behavior of object able to handle error messages from ANTLR including
+ *  both tool errors like "can't write file" and grammar ambiguity warnings.
+ *  To avoid having to change tools that use ANTLR (like GUIs), I am
+ *  wrapping error data in Message objects and passing them to the listener.
+ *  In this way, users of this interface are less sensitive to changes in
+ *  the info I need for error messages.
+ */
+public interface ANTLRErrorListener {
+	public void info(String msg);
+	public void error(Message msg);
+	public void warning(Message msg);
+	public void error(ToolMessage msg);
+}
diff --git a/src/org/antlr/tool/ANTLRLexer.java b/src/org/antlr/tool/ANTLRLexer.java
new file mode 100644
index 0000000..5e94d77
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRLexer.java
@@ -0,0 +1,1794 @@
+// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRLexer.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import java.io.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import antlr.*;
+
+import java.io.InputStream;
+import antlr.TokenStreamException;
+import antlr.TokenStreamIOException;
+import antlr.TokenStreamRecognitionException;
+import antlr.CharStreamException;
+import antlr.CharStreamIOException;
+import antlr.ANTLRException;
+import java.io.Reader;
+import java.util.Hashtable;
+import antlr.CharScanner;
+import antlr.InputBuffer;
+import antlr.ByteBuffer;
+import antlr.CharBuffer;
+import antlr.Token;
+import antlr.CommonToken;
+import antlr.RecognitionException;
+import antlr.NoViableAltForCharException;
+import antlr.MismatchedCharException;
+import antlr.TokenStream;
+import antlr.ANTLRHashString;
+import antlr.LexerSharedInputState;
+import antlr.collections.impl.BitSet;
+import antlr.SemanticException;
+
+public class ANTLRLexer extends antlr.CharScanner implements ANTLRTokenTypes, TokenStream
+ {
+
+    /** advance the current column number by one; don't do tabs.
+     *  we want char position in line to be sent to AntlrWorks.
+     */
+    public void tab() {
+		setColumn( getColumn()+1 );
+    }
+public ANTLRLexer(InputStream in) {
+	this(new ByteBuffer(in));
+}
+public ANTLRLexer(Reader in) {
+	this(new CharBuffer(in));
+}
+public ANTLRLexer(InputBuffer ib) {
+	this(new LexerSharedInputState(ib));
+}
+public ANTLRLexer(LexerSharedInputState state) {
+	super(state);
+	caseSensitiveLiterals = true;
+	setCaseSensitive(true);
+	literals = new Hashtable();
+	literals.put(new ANTLRHashString("lexer", this), new Integer(40));
+	literals.put(new ANTLRHashString("scope", this), new Integer(32));
+	literals.put(new ANTLRHashString("finally", this), new Integer(64));
+	literals.put(new ANTLRHashString("throws", this), new Integer(58));
+	literals.put(new ANTLRHashString("fragment", this), new Integer(36));
+	literals.put(new ANTLRHashString("private", this), new Integer(54));
+	literals.put(new ANTLRHashString("grammar", this), new Integer(42));
+	literals.put(new ANTLRHashString("tokens", this), new Integer(5));
+	literals.put(new ANTLRHashString("options", this), new Integer(4));
+	literals.put(new ANTLRHashString("parser", this), new Integer(6));
+	literals.put(new ANTLRHashString("tree", this), new Integer(41));
+	literals.put(new ANTLRHashString("protected", this), new Integer(52));
+	literals.put(new ANTLRHashString("returns", this), new Integer(57));
+	literals.put(new ANTLRHashString("public", this), new Integer(53));
+	literals.put(new ANTLRHashString("catch", this), new Integer(63));
+}
+
+public Token nextToken() throws TokenStreamException {
+	Token theRetToken=null;
+tryAgain:
+	for (;;) {
+		Token _token = null;
+		int _ttype = Token.INVALID_TYPE;
+		resetText();
+		try {   // for char stream error handling
+			try {   // for lexical error handling
+				switch ( LA(1)) {
+				case '\t':  case '\n':  case '\r':  case ' ':
+				{
+					mWS(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '/':
+				{
+					mCOMMENT(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '>':
+				{
+					mCLOSE_ELEMENT_OPTION(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '@':
+				{
+					mAMPERSAND(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case ',':
+				{
+					mCOMMA(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '?':
+				{
+					mQUESTION(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '(':
+				{
+					mLPAREN(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case ')':
+				{
+					mRPAREN(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case ':':
+				{
+					mCOLON(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '*':
+				{
+					mSTAR(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '-':
+				{
+					mREWRITE(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case ';':
+				{
+					mSEMI(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '!':
+				{
+					mBANG(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '|':
+				{
+					mOR(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '~':
+				{
+					mNOT(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '}':
+				{
+					mRCURLY(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '$':
+				{
+					mDOLLAR(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '\'':
+				{
+					mCHAR_LITERAL(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '"':
+				{
+					mDOUBLE_QUOTE_STRING_LITERAL(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '0':  case '1':  case '2':  case '3':
+				case '4':  case '5':  case '6':  case '7':
+				case '8':  case '9':
+				{
+					mINT(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '[':
+				{
+					mARG_ACTION(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case '{':
+				{
+					mACTION(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case 'A':  case 'B':  case 'C':  case 'D':
+				case 'E':  case 'F':  case 'G':  case 'H':
+				case 'I':  case 'J':  case 'K':  case 'L':
+				case 'M':  case 'N':  case 'O':  case 'P':
+				case 'Q':  case 'R':  case 'S':  case 'T':
+				case 'U':  case 'V':  case 'W':  case 'X':
+				case 'Y':  case 'Z':
+				{
+					mTOKEN_REF(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				case 'a':  case 'b':  case 'c':  case 'd':
+				case 'e':  case 'f':  case 'g':  case 'h':
+				case 'i':  case 'j':  case 'k':  case 'l':
+				case 'm':  case 'n':  case 'o':  case 'p':
+				case 'q':  case 'r':  case 's':  case 't':
+				case 'u':  case 'v':  case 'w':  case 'x':
+				case 'y':  case 'z':
+				{
+					mRULE_REF(true);
+					theRetToken=_returnToken;
+					break;
+				}
+				default:
+					if ((LA(1)=='^') && (LA(2)=='(')) {
+						mTREE_BEGIN(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='+') && (LA(2)=='=')) {
+						mPLUS_ASSIGN(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='=') && (LA(2)=='>')) {
+						mIMPLIES(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='.') && (LA(2)=='.')) {
+						mRANGE(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='<') && (LA(2)=='<')) {
+						mDOUBLE_ANGLE_STRING_LITERAL(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='<') && (true)) {
+						mOPEN_ELEMENT_OPTION(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='+') && (true)) {
+						mPLUS(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='=') && (true)) {
+						mASSIGN(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='^') && (true)) {
+						mROOT(true);
+						theRetToken=_returnToken;
+					}
+					else if ((LA(1)=='.') && (true)) {
+						mWILDCARD(true);
+						theRetToken=_returnToken;
+					}
+				else {
+					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+				}
+				}
+				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+				_ttype = _returnToken.getType();
+				_returnToken.setType(_ttype);
+				return _returnToken;
+			}
+			catch (RecognitionException e) {
+				throw new TokenStreamRecognitionException(e);
+			}
+		}
+		catch (CharStreamException cse) {
+			if ( cse instanceof CharStreamIOException ) {
+				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+			}
+			else {
+				throw new TokenStreamException(cse.getMessage());
+			}
+		}
+	}
+}
+
+	public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = WS;
+		int _saveIndex;
+		
+		{
+		switch ( LA(1)) {
+		case ' ':
+		{
+			match(' ');
+			break;
+		}
+		case '\t':
+		{
+			match('\t');
+			break;
+		}
+		case '\n':  case '\r':
+		{
+			{
+			switch ( LA(1)) {
+			case '\r':
+			{
+				match('\r');
+				break;
+			}
+			case '\n':
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+			}
+			}
+			}
+			match('\n');
+			if ( inputState.guessing==0 ) {
+				newline();
+			}
+			break;
+		}
+		default:
+		{
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		}
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = COMMENT;
+		int _saveIndex;
+		Token t=null;
+		
+		{
+		if ((LA(1)=='/') && (LA(2)=='/')) {
+			mSL_COMMENT(false);
+		}
+		else if ((LA(1)=='/') && (LA(2)=='*')) {
+			mML_COMMENT(true);
+			t=_returnToken;
+			if ( inputState.guessing==0 ) {
+				_ttype = t.getType();
+			}
+		}
+		else {
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = SL_COMMENT;
+		int _saveIndex;
+		
+		match("//");
+		{
+		boolean synPredMatched139 = false;
+		if (((LA(1)==' ') && (LA(2)=='$'))) {
+			int _m139 = mark();
+			synPredMatched139 = true;
+			inputState.guessing++;
+			try {
+				{
+				match(" $ANTLR");
+				}
+			}
+			catch (RecognitionException pe) {
+				synPredMatched139 = false;
+			}
+			rewind(_m139);
+inputState.guessing--;
+		}
+		if ( synPredMatched139 ) {
+			match(" $ANTLR ");
+			mSRC(false);
+			{
+			switch ( LA(1)) {
+			case '\r':
+			{
+				match('\r');
+				break;
+			}
+			case '\n':
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+			}
+			}
+			}
+			match('\n');
+		}
+		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+			{
+			_loop142:
+			do {
+				// nongreedy exit test
+				if ((LA(1)=='\n'||LA(1)=='\r') && (true)) break _loop142;
+				if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+					matchNot(EOF_CHAR);
+				}
+				else {
+					break _loop142;
+				}
+				
+			} while (true);
+			}
+			{
+			switch ( LA(1)) {
+			case '\r':
+			{
+				match('\r');
+				break;
+			}
+			case '\n':
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+			}
+			}
+			}
+			match('\n');
+		}
+		else {
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		
+		}
+		if ( inputState.guessing==0 ) {
+			newline();
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ML_COMMENT;
+		int _saveIndex;
+		
+		match("/*");
+		{
+		if (((LA(1)=='*') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&( LA(2)!='/' )) {
+			match('*');
+			if ( inputState.guessing==0 ) {
+				_ttype = DOC_COMMENT;
+			}
+		}
+		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+		}
+		else {
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		
+		}
+		{
+		_loop148:
+		do {
+			// nongreedy exit test
+			if ((LA(1)=='*') && (LA(2)=='/')) break _loop148;
+			switch ( LA(1)) {
+			case '\r':
+			{
+				match('\r');
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			case '\n':
+			{
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			default:
+				if ((_tokenSet_0.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+					{
+					match(_tokenSet_0);
+					}
+				}
+			else {
+				break _loop148;
+			}
+			}
+		} while (true);
+		}
+		match("*/");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+	protected final void mSRC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = SRC;
+		int _saveIndex;
+		Token file=null;
+		Token line=null;
+		
+		match("src");
+		match(' ');
+		mACTION_STRING_LITERAL(true);
+		file=_returnToken;
+		match(' ');
+		mINT(true);
+		line=_returnToken;
+		if ( inputState.guessing==0 ) {
+			
+					newline();
+					setFilename(file.getText().substring(1,file.getText().length()-1));
+					setLine(Integer.parseInt(line.getText())-1);  // -1 because SL_COMMENT will increment the line no. KR
+					_ttype = Token.SKIP; // don't let this go to the parser
+					
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mOPEN_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = OPEN_ELEMENT_OPTION;
+		int _saveIndex;
+		
+		match('<');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mCLOSE_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = CLOSE_ELEMENT_OPTION;
+		int _saveIndex;
+		
+		match('>');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mAMPERSAND(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = AMPERSAND;
+		int _saveIndex;
+		
+		match('@');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mCOMMA(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = COMMA;
+		int _saveIndex;
+		
+		match(',');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mQUESTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = QUESTION;
+		int _saveIndex;
+		
+		match('?');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mTREE_BEGIN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = TREE_BEGIN;
+		int _saveIndex;
+		
+		match("^(");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = LPAREN;
+		int _saveIndex;
+		
+		match('(');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = RPAREN;
+		int _saveIndex;
+		
+		match(')');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mCOLON(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = COLON;
+		int _saveIndex;
+		
+		match(':');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mSTAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = STAR;
+		int _saveIndex;
+		
+		match('*');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mPLUS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = PLUS;
+		int _saveIndex;
+		
+		match('+');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ASSIGN;
+		int _saveIndex;
+		
+		match('=');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mPLUS_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = PLUS_ASSIGN;
+		int _saveIndex;
+		
+		match("+=");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mIMPLIES(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = IMPLIES;
+		int _saveIndex;
+		
+		match("=>");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mREWRITE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = REWRITE;
+		int _saveIndex;
+		
+		match("->");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mSEMI(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = SEMI;
+		int _saveIndex;
+		
+		match(';');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mROOT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ROOT;
+		int _saveIndex;
+		
+		match('^');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mBANG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = BANG;
+		int _saveIndex;
+		
+		match('!');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = OR;
+		int _saveIndex;
+		
+		match('|');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mWILDCARD(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = WILDCARD;
+		int _saveIndex;
+		
+		match('.');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mRANGE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = RANGE;
+		int _saveIndex;
+		
+		match("..");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mNOT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = NOT;
+		int _saveIndex;
+		
+		match('~');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mRCURLY(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = RCURLY;
+		int _saveIndex;
+		
+		match('}');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mDOLLAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = DOLLAR;
+		int _saveIndex;
+		
+		match('$');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mCHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = CHAR_LITERAL;
+		int _saveIndex;
+		
+		match('\'');
+		{
+		_loop175:
+		do {
+			switch ( LA(1)) {
+			case '\\':
+			{
+				mESC(false);
+				break;
+			}
+			case '\n':
+			{
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			default:
+				if ((_tokenSet_1.member(LA(1)))) {
+					matchNot('\'');
+				}
+			else {
+				break _loop175;
+			}
+			}
+		} while (true);
+		}
+		match('\'');
+		if ( inputState.guessing==0 ) {
+			
+					StringBuffer s = Grammar.getUnescapedStringFromGrammarStringLiteral(new String(text.getBuffer(),_begin,text.length()-_begin));
+					if ( s.length()>1 ) {
+						_ttype = STRING_LITERAL;
+					}
+					
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ESC;
+		int _saveIndex;
+		
+		match('\\');
+		{
+		if ((LA(1)=='n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('n');
+		}
+		else if ((LA(1)=='r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('r');
+		}
+		else if ((LA(1)=='t') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('t');
+		}
+		else if ((LA(1)=='b') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('b');
+		}
+		else if ((LA(1)=='f') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('f');
+		}
+		else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('"');
+		}
+		else if ((LA(1)=='\'') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('\'');
+		}
+		else if ((LA(1)=='\\') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('\\');
+		}
+		else if ((LA(1)=='>') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			match('>');
+		}
+		else if (((LA(1) >= '0' && LA(1) <= '3')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			{
+			matchRange('0','3');
+			}
+			{
+			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				{
+				matchRange('0','9');
+				}
+				{
+				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+					matchRange('0','9');
+				}
+				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+				}
+				else {
+					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+				}
+				
+				}
+			}
+			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+			}
+			else {
+				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+			}
+			
+			}
+		}
+		else if (((LA(1) >= '4' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			{
+			matchRange('4','7');
+			}
+			{
+			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				{
+				matchRange('0','9');
+				}
+			}
+			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+			}
+			else {
+				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+			}
+			
+			}
+		}
+		else if ((LA(1)=='u') && (_tokenSet_2.member(LA(2)))) {
+			match('u');
+			mXDIGIT(false);
+			mXDIGIT(false);
+			mXDIGIT(false);
+			mXDIGIT(false);
+		}
+		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+			matchNot(EOF_CHAR);
+		}
+		else {
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mDOUBLE_QUOTE_STRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = DOUBLE_QUOTE_STRING_LITERAL;
+		int _saveIndex;
+		
+		match('"');
+		{
+		_loop178:
+		do {
+			switch ( LA(1)) {
+			case '\\':
+			{
+				_saveIndex=text.length();
+				match('\\');
+				text.setLength(_saveIndex);
+				match('"');
+				break;
+			}
+			case '\n':
+			{
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			default:
+				if ((_tokenSet_3.member(LA(1)))) {
+					matchNot('"');
+				}
+			else {
+				break _loop178;
+			}
+			}
+		} while (true);
+		}
+		match('"');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mDOUBLE_ANGLE_STRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = DOUBLE_ANGLE_STRING_LITERAL;
+		int _saveIndex;
+		
+		match("<<");
+		{
+		_loop181:
+		do {
+			// nongreedy exit test
+			if ((LA(1)=='>') && (LA(2)=='>')) break _loop181;
+			if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+			}
+			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				matchNot(EOF_CHAR);
+			}
+			else {
+				break _loop181;
+			}
+			
+		} while (true);
+		}
+		match(">>");
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = XDIGIT;
+		int _saveIndex;
+		
+		switch ( LA(1)) {
+		case '0':  case '1':  case '2':  case '3':
+		case '4':  case '5':  case '6':  case '7':
+		case '8':  case '9':
+		{
+			matchRange('0','9');
+			break;
+		}
+		case 'a':  case 'b':  case 'c':  case 'd':
+		case 'e':  case 'f':
+		{
+			matchRange('a','f');
+			break;
+		}
+		case 'A':  case 'B':  case 'C':  case 'D':
+		case 'E':  case 'F':
+		{
+			matchRange('A','F');
+			break;
+		}
+		default:
+		{
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = DIGIT;
+		int _saveIndex;
+		
+		matchRange('0','9');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = INT;
+		int _saveIndex;
+		
+		{
+		int _cnt195=0;
+		_loop195:
+		do {
+			if (((LA(1) >= '0' && LA(1) <= '9'))) {
+				matchRange('0','9');
+			}
+			else {
+				if ( _cnt195>=1 ) { break _loop195; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+			}
+			
+			_cnt195++;
+		} while (true);
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ARG_ACTION;
+		int _saveIndex;
+		
+		mNESTED_ARG_ACTION(false);
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mNESTED_ARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = NESTED_ARG_ACTION;
+		int _saveIndex;
+		
+		_saveIndex=text.length();
+		match('[');
+		text.setLength(_saveIndex);
+		{
+		_loop199:
+		do {
+			switch ( LA(1)) {
+			case '[':
+			{
+				mNESTED_ARG_ACTION(false);
+				break;
+			}
+			case '\r':
+			{
+				match('\r');
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			case '\n':
+			{
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			case '"':
+			{
+				mACTION_STRING_LITERAL(false);
+				break;
+			}
+			default:
+				if ((_tokenSet_4.member(LA(1)))) {
+					matchNot(']');
+				}
+			else {
+				break _loop199;
+			}
+			}
+		} while (true);
+		}
+		_saveIndex=text.length();
+		match(']');
+		text.setLength(_saveIndex);
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mACTION_STRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ACTION_STRING_LITERAL;
+		int _saveIndex;
+		
+		match('"');
+		{
+		_loop211:
+		do {
+			switch ( LA(1)) {
+			case '\\':
+			{
+				mACTION_ESC(false);
+				break;
+			}
+			case '\n':
+			{
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			default:
+				if ((_tokenSet_3.member(LA(1)))) {
+					matchNot('"');
+				}
+			else {
+				break _loop211;
+			}
+			}
+		} while (true);
+		}
+		match('"');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ACTION;
+		int _saveIndex;
+		int actionLine=getLine(); int actionColumn = getColumn();
+		
+		mNESTED_ACTION(false);
+		{
+		if ((LA(1)=='?')) {
+			_saveIndex=text.length();
+			match('?');
+			text.setLength(_saveIndex);
+			if ( inputState.guessing==0 ) {
+				_ttype = SEMPRED;
+			}
+		}
+		else {
+		}
+		
+		}
+		if ( inputState.guessing==0 ) {
+			
+						Token t = makeToken(_ttype);
+						String action = new String(text.getBuffer(),_begin,text.length()-_begin);
+						action = action.substring(1,action.length()-1);
+						t.setText(action);
+						t.setLine(actionLine);			// set action line to start
+						t.setColumn(actionColumn);
+						_token = t;
+					
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mNESTED_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = NESTED_ACTION;
+		int _saveIndex;
+		
+		match('{');
+		{
+		_loop205:
+		do {
+			// nongreedy exit test
+			if ((LA(1)=='}') && (true)) break _loop205;
+			if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				{
+				switch ( LA(1)) {
+				case '\r':
+				{
+					match('\r');
+					match('\n');
+					if ( inputState.guessing==0 ) {
+						newline();
+					}
+					break;
+				}
+				case '\n':
+				{
+					match('\n');
+					if ( inputState.guessing==0 ) {
+						newline();
+					}
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+				}
+				}
+				}
+			}
+			else if ((LA(1)=='{') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				mNESTED_ACTION(false);
+			}
+			else if ((LA(1)=='\'') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				mACTION_CHAR_LITERAL(false);
+			}
+			else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+				mCOMMENT(false);
+			}
+			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				mACTION_STRING_LITERAL(false);
+			}
+			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+				matchNot(EOF_CHAR);
+			}
+			else {
+				break _loop205;
+			}
+			
+		} while (true);
+		}
+		match('}');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mACTION_CHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ACTION_CHAR_LITERAL;
+		int _saveIndex;
+		
+		match('\'');
+		{
+		_loop208:
+		do {
+			switch ( LA(1)) {
+			case '\\':
+			{
+				mACTION_ESC(false);
+				break;
+			}
+			case '\n':
+			{
+				match('\n');
+				if ( inputState.guessing==0 ) {
+					newline();
+				}
+				break;
+			}
+			default:
+				if ((_tokenSet_1.member(LA(1)))) {
+					matchNot('\'');
+				}
+			else {
+				break _loop208;
+			}
+			}
+		} while (true);
+		}
+		match('\'');
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mACTION_ESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = ACTION_ESC;
+		int _saveIndex;
+		
+		if ((LA(1)=='\\') && (LA(2)=='\'')) {
+			match("\\'");
+		}
+		else if ((LA(1)=='\\') && (LA(2)=='"')) {
+			match("\\\"");
+		}
+		else if ((LA(1)=='\\') && (_tokenSet_5.member(LA(2)))) {
+			match('\\');
+			{
+			match(_tokenSet_5);
+			}
+		}
+		else {
+			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+		}
+		
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mTOKEN_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = TOKEN_REF;
+		int _saveIndex;
+		
+		matchRange('A','Z');
+		{
+		_loop216:
+		do {
+			switch ( LA(1)) {
+			case 'a':  case 'b':  case 'c':  case 'd':
+			case 'e':  case 'f':  case 'g':  case 'h':
+			case 'i':  case 'j':  case 'k':  case 'l':
+			case 'm':  case 'n':  case 'o':  case 'p':
+			case 'q':  case 'r':  case 's':  case 't':
+			case 'u':  case 'v':  case 'w':  case 'x':
+			case 'y':  case 'z':
+			{
+				matchRange('a','z');
+				break;
+			}
+			case 'A':  case 'B':  case 'C':  case 'D':
+			case 'E':  case 'F':  case 'G':  case 'H':
+			case 'I':  case 'J':  case 'K':  case 'L':
+			case 'M':  case 'N':  case 'O':  case 'P':
+			case 'Q':  case 'R':  case 'S':  case 'T':
+			case 'U':  case 'V':  case 'W':  case 'X':
+			case 'Y':  case 'Z':
+			{
+				matchRange('A','Z');
+				break;
+			}
+			case '_':
+			{
+				match('_');
+				break;
+			}
+			case '0':  case '1':  case '2':  case '3':
+			case '4':  case '5':  case '6':  case '7':
+			case '8':  case '9':
+			{
+				matchRange('0','9');
+				break;
+			}
+			default:
+			{
+				break _loop216;
+			}
+			}
+		} while (true);
+		}
+		_ttype = testLiteralsTable(_ttype);
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	public final void mRULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = RULE_REF;
+		int _saveIndex;
+		
+			int t=0;
+		
+		
+		t=mINTERNAL_RULE_REF(false);
+		if ( inputState.guessing==0 ) {
+			_ttype=t;
+		}
+		{
+		if (( true )&&(t==OPTIONS)) {
+			mWS_LOOP(false);
+			{
+			if ((LA(1)=='{')) {
+				match('{');
+				if ( inputState.guessing==0 ) {
+					_ttype = OPTIONS;
+				}
+			}
+			else {
+			}
+			
+			}
+		}
+		else if (( true )&&(t==TOKENS)) {
+			mWS_LOOP(false);
+			{
+			if ((LA(1)=='{')) {
+				match('{');
+				if ( inputState.guessing==0 ) {
+					_ttype = TOKENS;
+				}
+			}
+			else {
+			}
+			
+			}
+		}
+		else {
+		}
+		
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final int  mINTERNAL_RULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int t;
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = INTERNAL_RULE_REF;
+		int _saveIndex;
+		
+			t = RULE_REF;
+		
+		
+		matchRange('a','z');
+		{
+		_loop226:
+		do {
+			switch ( LA(1)) {
+			case 'a':  case 'b':  case 'c':  case 'd':
+			case 'e':  case 'f':  case 'g':  case 'h':
+			case 'i':  case 'j':  case 'k':  case 'l':
+			case 'm':  case 'n':  case 'o':  case 'p':
+			case 'q':  case 'r':  case 's':  case 't':
+			case 'u':  case 'v':  case 'w':  case 'x':
+			case 'y':  case 'z':
+			{
+				matchRange('a','z');
+				break;
+			}
+			case 'A':  case 'B':  case 'C':  case 'D':
+			case 'E':  case 'F':  case 'G':  case 'H':
+			case 'I':  case 'J':  case 'K':  case 'L':
+			case 'M':  case 'N':  case 'O':  case 'P':
+			case 'Q':  case 'R':  case 'S':  case 'T':
+			case 'U':  case 'V':  case 'W':  case 'X':
+			case 'Y':  case 'Z':
+			{
+				matchRange('A','Z');
+				break;
+			}
+			case '_':
+			{
+				match('_');
+				break;
+			}
+			case '0':  case '1':  case '2':  case '3':
+			case '4':  case '5':  case '6':  case '7':
+			case '8':  case '9':
+			{
+				matchRange('0','9');
+				break;
+			}
+			default:
+			{
+				break _loop226;
+			}
+			}
+		} while (true);
+		}
+		if ( inputState.guessing==0 ) {
+			t = testLiteralsTable(t);
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+		return t;
+	}
+	
+	protected final void mWS_LOOP(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = WS_LOOP;
+		int _saveIndex;
+		
+		{
+		_loop223:
+		do {
+			switch ( LA(1)) {
+			case '\t':  case '\n':  case '\r':  case ' ':
+			{
+				mWS(false);
+				break;
+			}
+			case '/':
+			{
+				mCOMMENT(false);
+				break;
+			}
+			default:
+			{
+				break _loop223;
+			}
+			}
+		} while (true);
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	protected final void mWS_OPT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+		int _ttype; Token _token=null; int _begin=text.length();
+		_ttype = WS_OPT;
+		int _saveIndex;
+		
+		{
+		if ((_tokenSet_6.member(LA(1)))) {
+			mWS(false);
+		}
+		else {
+		}
+		
+		}
+		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+			_token = makeToken(_ttype);
+			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+		}
+		_returnToken = _token;
+	}
+	
+	
+	private static final long[] mk_tokenSet_0() {
+		long[] data = new long[8];
+		data[0]=-9224L;
+		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+		return data;
+	}
+	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+	private static final long[] mk_tokenSet_1() {
+		long[] data = new long[8];
+		data[0]=-549755814920L;
+		data[1]=-268435457L;
+		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+		return data;
+	}
+	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+	private static final long[] mk_tokenSet_2() {
+		long[] data = { 287948901175001088L, 541165879422L, 0L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+	private static final long[] mk_tokenSet_3() {
+		long[] data = new long[8];
+		data[0]=-17179870216L;
+		data[1]=-268435457L;
+		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+		return data;
+	}
+	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+	private static final long[] mk_tokenSet_4() {
+		long[] data = new long[8];
+		data[0]=-17179878408L;
+		data[1]=-671088641L;
+		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+		return data;
+	}
+	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+	private static final long[] mk_tokenSet_5() {
+		long[] data = new long[8];
+		data[0]=-566935683080L;
+		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+		return data;
+	}
+	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+	private static final long[] mk_tokenSet_6() {
+		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+	
+	}
diff --git a/src/org/antlr/tool/ANTLRLexer.smap b/src/org/antlr/tool/ANTLRLexer.smap
new file mode 100644
index 0000000..21339b2
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRLexer.smap
@@ -0,0 +1,1203 @@
+SMAP
+ANTLRLexer.java
+G
+*S G
+*F
++ 0 antlr.g
+antlr.g
+*L
+0:113
+0:119
+0:125
+0:131
+0:137
+0:143
+0:149
+0:155
+0:161
+0:167
+0:173
+0:179
+0:185
+0:191
+0:197
+0:203
+0:209
+0:215
+0:221
+0:229
+0:235
+0:241
+0:253
+0:265
+0:271
+0:275
+0:279
+0:283
+0:287
+0:291
+0:295
+0:299
+0:303
+0:307
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+1:32
+1:33
+1:34
+1:35
+913:64
+914:65
+915:66
+916:67
+917:68
+918:69
+921:335
+921:336
+921:337
+921:338
+921:341
+921:342
+921:343
+921:344
+921:377
+921:378
+921:379
+921:380
+921:381
+921:383
+921:384
+921:385
+921:386
+921:387
+921:388
+922:347
+922:348
+922:349
+923:352
+923:353
+923:355
+923:356
+923:357
+923:358
+923:365
+923:366
+923:367
+923:368
+923:369
+923:371
+923:372
+923:373
+927:390
+927:391
+927:392
+927:393
+927:412
+927:413
+927:414
+927:415
+927:416
+927:417
+928:394
+928:397
+928:398
+928:399
+928:400
+928:401
+928:402
+928:403
+928:404
+928:406
+928:407
+928:408
+928:409
+932:419
+932:420
+932:421
+932:422
+932:506
+932:507
+932:508
+932:509
+932:510
+932:511
+933:424
+934:426
+934:427
+934:428
+934:429
+934:430
+934:431
+934:433
+934:435
+934:436
+934:437
+934:438
+934:439
+934:440
+934:441
+934:442
+934:443
+934:444
+934:446
+934:447
+934:448
+934:449
+934:456
+934:457
+934:458
+934:459
+934:460
+934:462
+934:463
+934:497
+934:498
+934:499
+934:500
+935:464
+935:465
+935:466
+935:467
+935:468
+935:469
+935:470
+935:471
+935:472
+935:473
+935:474
+935:475
+935:477
+935:478
+935:480
+935:481
+935:482
+935:483
+935:490
+935:491
+935:492
+935:493
+935:494
+935:496
+937:503
+937:504
+941:513
+941:514
+941:515
+941:516
+941:569
+941:570
+941:571
+941:572
+941:573
+941:574
+942:518
+943:520
+943:521
+943:522
+943:523
+943:525
+943:527
+943:528
+943:529
+943:530
+946:533
+946:534
+946:535
+946:536
+946:537
+946:538
+946:556
+946:561
+946:562
+946:563
+946:564
+946:565
+946:566
+946:567
+950:539
+950:540
+950:541
+950:542
+950:543
+950:544
+951:548
+951:549
+951:550
+951:551
+951:552
+952:557
+952:559
+954:568
+957:609
+957:610
+957:611
+957:612
+957:615
+957:616
+957:617
+957:618
+957:619
+957:620
+958:614
+961:622
+961:623
+961:624
+961:625
+961:628
+961:629
+961:630
+961:631
+961:632
+961:633
+962:627
+965:635
+965:636
+965:637
+965:638
+965:640
+965:641
+965:642
+965:643
+965:644
+965:645
+965:646
+967:648
+967:649
+967:650
+967:651
+967:653
+967:654
+967:655
+967:656
+967:657
+967:658
+967:659
+969:661
+969:662
+969:663
+969:664
+969:666
+969:667
+969:668
+969:669
+969:670
+969:671
+969:672
+971:674
+971:675
+971:676
+971:677
+971:679
+971:680
+971:681
+971:682
+971:683
+971:684
+971:685
+973:687
+973:688
+973:689
+973:690
+973:692
+973:693
+973:694
+973:695
+973:696
+973:697
+973:698
+975:700
+975:701
+975:702
+975:703
+975:705
+975:706
+975:707
+975:708
+975:709
+975:710
+975:711
+977:713
+977:714
+977:715
+977:716
+977:718
+977:719
+977:720
+977:721
+977:722
+977:723
+977:724
+979:726
+979:727
+979:728
+979:729
+979:731
+979:732
+979:733
+979:734
+979:735
+979:736
+979:737
+981:739
+981:740
+981:741
+981:742
+981:744
+981:745
+981:746
+981:747
+981:748
+981:749
+981:750
+983:752
+983:753
+983:754
+983:755
+983:757
+983:758
+983:759
+983:760
+983:761
+983:762
+983:763
+985:765
+985:766
+985:767
+985:768
+985:770
+985:771
+985:772
+985:773
+985:774
+985:775
+985:776
+987:778
+987:779
+987:780
+987:781
+987:783
+987:784
+987:785
+987:786
+987:787
+987:788
+987:789
+989:791
+989:792
+989:793
+989:794
+989:796
+989:797
+989:798
+989:799
+989:800
+989:801
+989:802
+991:804
+991:805
+991:806
+991:807
+991:809
+991:810
+991:811
+991:812
+991:813
+991:814
+991:815
+993:817
+993:818
+993:819
+993:820
+993:822
+993:823
+993:824
+993:825
+993:826
+993:827
+993:828
+995:830
+995:831
+995:832
+995:833
+995:835
+995:836
+995:837
+995:838
+995:839
+995:840
+995:841
+997:843
+997:844
+997:845
+997:846
+997:848
+997:849
+997:850
+997:851
+997:852
+997:853
+997:854
+999:856
+999:857
+999:858
+999:859
+999:861
+999:862
+999:863
+999:864
+999:865
+999:866
+999:867
+1001:869
+1001:870
+1001:871
+1001:872
+1001:874
+1001:875
+1001:876
+1001:877
+1001:878
+1001:879
+1001:880
+1003:882
+1003:883
+1003:884
+1003:885
+1003:887
+1003:888
+1003:889
+1003:890
+1003:891
+1003:892
+1003:893
+1005:895
+1005:896
+1005:897
+1005:898
+1005:900
+1005:901
+1005:902
+1005:903
+1005:904
+1005:905
+1005:906
+1007:908
+1007:909
+1007:910
+1007:911
+1007:913
+1007:914
+1007:915
+1007:916
+1007:917
+1007:918
+1007:919
+1009:921
+1009:922
+1009:923
+1009:924
+1009:963
+1009:964
+1009:965
+1009:966
+1009:967
+1009:968
+1010:926
+1010:927
+1010:928
+1010:929
+1010:930
+1010:931
+1010:932
+1010:933
+1010:936
+1010:937
+1010:938
+1010:939
+1010:940
+1010:944
+1010:945
+1010:946
+1010:947
+1010:948
+1010:949
+1010:950
+1010:951
+1010:952
+1010:953
+1010:954
+1011:955
+1012:957
+1013:958
+1014:959
+1015:960
+1019:1073
+1019:1074
+1019:1075
+1019:1076
+1019:1110
+1019:1111
+1019:1112
+1019:1113
+1019:1114
+1019:1115
+1020:1078
+1020:1079
+1020:1080
+1020:1081
+1020:1082
+1020:1083
+1020:1084
+1020:1085
+1020:1086
+1020:1087
+1020:1088
+1020:1091
+1020:1092
+1020:1093
+1020:1094
+1020:1095
+1020:1099
+1020:1100
+1020:1101
+1020:1102
+1020:1103
+1020:1104
+1020:1105
+1020:1106
+1020:1107
+1020:1108
+1020:1109
+1023:1117
+1023:1118
+1023:1119
+1023:1120
+1023:1144
+1023:1145
+1023:1146
+1023:1147
+1023:1148
+1023:1149
+1024:1122
+1024:1123
+1024:1124
+1024:1125
+1024:1126
+1024:1127
+1024:1128
+1024:1129
+1024:1130
+1024:1131
+1024:1133
+1024:1134
+1024:1135
+1024:1136
+1024:1137
+1024:1138
+1024:1139
+1024:1141
+1024:1142
+1024:1143
+1028:970
+1028:971
+1028:972
+1028:973
+1028:975
+1028:1066
+1028:1067
+1028:1068
+1028:1069
+1028:1070
+1028:1071
+1029:977
+1029:978
+1029:979
+1029:982
+1029:985
+1029:988
+1029:991
+1029:994
+1029:997
+1029:1000
+1029:1003
+1029:1032
+1029:1050
+1029:1057
+1029:1060
+1029:1061
+1029:1062
+1029:1063
+1030:980
+1030:981
+1031:983
+1031:984
+1032:986
+1032:987
+1033:989
+1033:990
+1034:992
+1034:993
+1035:995
+1035:996
+1036:998
+1036:999
+1037:1001
+1037:1002
+1038:1004
+1038:1006
+1039:1024
+1039:1026
+1039:1027
+1039:1028
+1039:1029
+1044:1009
+1044:1011
+1045:1016
+1045:1018
+1045:1019
+1045:1020
+1045:1021
+1050:1014
+1050:1015
+1053:1033
+1053:1035
+1054:1042
+1054:1044
+1054:1045
+1054:1046
+1054:1047
+1059:1038
+1059:1040
+1061:1051
+1061:1052
+1061:1053
+1061:1054
+1061:1055
+1061:1056
+1062:1058
+1062:1059
+1067:1188
+1067:1189
+1067:1190
+1067:1191
+1067:1194
+1067:1195
+1067:1196
+1067:1197
+1067:1198
+1067:1199
+1068:1193
+1072:1151
+1072:1152
+1072:1153
+1072:1154
+1072:1156
+1072:1176
+1072:1177
+1072:1178
+1072:1179
+1072:1180
+1072:1181
+1072:1182
+1072:1183
+1072:1184
+1072:1185
+1072:1186
+1073:1157
+1073:1158
+1073:1159
+1073:1160
+1073:1161
+1074:1164
+1074:1165
+1074:1166
+1074:1167
+1075:1170
+1075:1171
+1075:1172
+1075:1173
+1078:1201
+1078:1202
+1078:1203
+1078:1204
+1078:1207
+1078:1208
+1078:1209
+1078:1210
+1078:1211
+1078:1212
+1078:1213
+1078:1214
+1078:1215
+1078:1217
+1078:1218
+1078:1219
+1078:1220
+1078:1221
+1078:1222
+1078:1223
+1078:1224
+1078:1225
+1081:1227
+1081:1228
+1081:1229
+1081:1230
+1081:1233
+1081:1234
+1081:1235
+1081:1236
+1081:1237
+1081:1238
+1083:1232
+1087:1240
+1087:1241
+1087:1242
+1087:1243
+1087:1292
+1087:1293
+1087:1294
+1087:1295
+1087:1296
+1087:1297
+1088:1245
+1088:1246
+1088:1247
+1089:1248
+1089:1249
+1089:1250
+1089:1251
+1089:1279
+1089:1282
+1089:1283
+1089:1284
+1089:1285
+1089:1286
+1089:1287
+1089:1288
+1090:1252
+1090:1253
+1090:1254
+1091:1257
+1091:1258
+1091:1259
+1091:1260
+1091:1261
+1091:1262
+1092:1266
+1092:1267
+1092:1268
+1092:1269
+1092:1270
+1093:1274
+1093:1275
+1093:1276
+1094:1280
+1094:1281
+1096:1289
+1096:1290
+1096:1291
+1099:1340
+1099:1341
+1099:1342
+1099:1343
+1099:1344
+1099:1371
+1099:1372
+1099:1373
+1099:1374
+1099:1375
+1099:1376
+1101:1346
+1102:1348
+1102:1349
+1102:1350
+1102:1351
+1102:1352
+1102:1353
+1102:1355
+1102:1357
+1103:1360
+1104:1362
+1105:1363
+1106:1364
+1107:1365
+1108:1366
+1109:1367
+1110:1368
+1115:1378
+1115:1379
+1115:1380
+1115:1381
+1115:1438
+1115:1439
+1115:1440
+1115:1441
+1115:1442
+1115:1443
+1116:1383
+1117:1384
+1117:1385
+1117:1386
+1117:1387
+1117:1388
+1117:1415
+1117:1418
+1117:1421
+1117:1424
+1117:1427
+1117:1430
+1117:1431
+1117:1432
+1117:1433
+1117:1435
+1117:1436
+1122:1389
+1122:1391
+1122:1409
+1122:1410
+1122:1411
+1122:1412
+1122:1413
+1123:1392
+1123:1393
+1123:1394
+1123:1395
+1123:1396
+1123:1397
+1124:1401
+1124:1402
+1124:1403
+1124:1404
+1124:1405
+1126:1416
+1126:1417
+1127:1419
+1127:1420
+1128:1422
+1128:1423
+1129:1425
+1129:1426
+1130:1428
+1130:1429
+1132:1437
+1136:1445
+1136:1446
+1136:1447
+1136:1448
+1136:1479
+1136:1480
+1136:1481
+1136:1482
+1136:1483
+1136:1484
+1137:1450
+1137:1451
+1137:1452
+1137:1453
+1137:1454
+1137:1455
+1137:1456
+1137:1457
+1137:1460
+1137:1461
+1137:1462
+1137:1463
+1137:1464
+1137:1468
+1137:1469
+1137:1470
+1137:1471
+1137:1472
+1137:1473
+1137:1474
+1137:1475
+1137:1476
+1137:1477
+1137:1478
+1141:1299
+1141:1300
+1141:1301
+1141:1302
+1141:1333
+1141:1334
+1141:1335
+1141:1336
+1141:1337
+1141:1338
+1142:1304
+1142:1305
+1142:1306
+1142:1307
+1142:1308
+1142:1309
+1142:1310
+1142:1311
+1142:1314
+1142:1315
+1142:1316
+1142:1317
+1142:1318
+1142:1322
+1142:1323
+1142:1324
+1142:1325
+1142:1326
+1142:1327
+1142:1328
+1142:1329
+1142:1330
+1142:1331
+1142:1332
+1146:1486
+1146:1487
+1146:1488
+1146:1489
+1146:1493
+1146:1496
+1146:1502
+1146:1503
+1146:1504
+1146:1505
+1146:1507
+1146:1508
+1146:1509
+1146:1510
+1146:1511
+1146:1512
+1147:1491
+1147:1492
+1148:1494
+1148:1495
+1149:1497
+1149:1498
+1149:1500
+1152:1514
+1152:1515
+1152:1516
+1152:1517
+1152:1565
+1152:1566
+1152:1567
+1152:1568
+1152:1569
+1152:1570
+1152:1571
+1154:1519
+1155:1520
+1155:1521
+1155:1522
+1155:1523
+1155:1558
+1155:1559
+1155:1560
+1155:1561
+1155:1562
+1155:1563
+1155:1564
+1160:1524
+1160:1525
+1160:1526
+1160:1527
+1160:1528
+1160:1529
+1160:1530
+1160:1531
+1160:1532
+1160:1535
+1160:1536
+1160:1537
+1160:1538
+1160:1539
+1160:1540
+1160:1541
+1160:1542
+1160:1543
+1160:1546
+1160:1547
+1160:1548
+1160:1551
+1160:1552
+1160:1553
+1160:1554
+1160:1555
+1165:1573
+1165:1574
+1165:1575
+1165:1576
+1165:1618
+1165:1619
+1165:1620
+1165:1621
+1165:1622
+1165:1623
+1166:1578
+1169:1581
+1169:1582
+1169:1583
+1170:1586
+1170:1587
+1170:1589
+1170:1590
+1170:1591
+1170:1592
+1170:1594
+1170:1596
+1170:1599
+1170:1613
+1170:1615
+1171:1600
+1171:1601
+1171:1603
+1171:1604
+1171:1605
+1171:1606
+1171:1608
+1171:1610
+1177:1691
+1177:1692
+1177:1693
+1177:1694
+1177:1717
+1177:1718
+1177:1719
+1177:1720
+1177:1721
+1177:1722
+1178:1696
+1178:1697
+1178:1698
+1178:1699
+1178:1710
+1178:1711
+1178:1712
+1178:1713
+1178:1714
+1178:1715
+1178:1716
+1183:1700
+1183:1701
+1183:1702
+1184:1705
+1184:1706
+1184:1707
+1189:1625
+1189:1626
+1189:1627
+1189:1628
+1189:1629
+1189:1683
+1189:1684
+1189:1685
+1189:1686
+1189:1687
+1189:1688
+1189:1689
+1190:1631
+1193:1634
+1194:1635
+1194:1636
+1194:1637
+1194:1638
+1194:1673
+1194:1674
+1194:1675
+1194:1676
+1194:1677
+1194:1678
+1194:1679
+1199:1639
+1199:1640
+1199:1641
+1199:1642
+1199:1643
+1199:1644
+1199:1645
+1199:1646
+1199:1647
+1199:1650
+1199:1651
+1199:1652
+1199:1653
+1199:1654
+1199:1655
+1199:1656
+1199:1657
+1199:1658
+1199:1661
+1199:1662
+1199:1663
+1199:1666
+1199:1667
+1199:1668
+1199:1669
+1199:1670
+1201:1680
+1201:1681
+1205:1724
+1205:1725
+1205:1726
+1205:1727
+1205:1737
+1205:1738
+1205:1739
+1205:1740
+1205:1741
+1205:1742
+1206:1730
+1206:1731
+1206:1732
+1206:1734
+1214:576
+1214:580
+1214:581
+1214:582
+1214:583
+1214:584
+1214:585
+1214:587
+1214:588
+1214:589
+1214:590
+1214:591
+1214:592
+1214:593
+1214:602
+1214:603
+1214:604
+1214:605
+1214:606
+1214:607
+1215:577
+1215:594
+1216:578
+1216:596
+1217:579
+1217:597
+1218:598
+1219:599
+*E
diff --git a/src/org/antlr/tool/ANTLRParser.java b/src/org/antlr/tool/ANTLRParser.java
new file mode 100644
index 0000000..bda3da7
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRParser.java
@@ -0,0 +1,4172 @@
+// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRParser.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import java.io.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import antlr.*;
+
+import antlr.TokenBuffer;
+import antlr.TokenStreamException;
+import antlr.TokenStreamIOException;
+import antlr.ANTLRException;
+import antlr.LLkParser;
+import antlr.Token;
+import antlr.TokenStream;
+import antlr.RecognitionException;
+import antlr.NoViableAltException;
+import antlr.MismatchedTokenException;
+import antlr.SemanticException;
+import antlr.ParserSharedInputState;
+import antlr.collections.impl.BitSet;
+import antlr.collections.AST;
+import java.util.Hashtable;
+import antlr.ASTFactory;
+import antlr.ASTPair;
+import antlr.collections.impl.ASTArray;
+
+/** Read in an ANTLR grammar and build an AST.  Try not to do
+ *  any actions, just build the tree.
+ *
+ *  The phases are:
+ *
+ *		antlr.g (this file)
+ *		assign.types.g
+ *		define.g
+ *		buildnfa.g
+ *		antlr.print.g (optional)
+ *		codegen.g
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  2005
+ */
+public class ANTLRParser extends antlr.LLkParser       implements ANTLRTokenTypes
+ {
+
+	Grammar grammar = null;
+	protected int gtype = 0;
+	protected String currentRuleName = null;
+	protected GrammarAST currentBlockAST = null;
+
+	/* this next stuff supports construction of the Tokens artificial rule.
+	   I hate having some partial functionality here, I like doing everything
+	   in future tree passes, but the Tokens rule is sensitive to filter mode.
+	   And if it adds syn preds, future tree passes will need to process the
+	   fragments defined in Tokens; a cyclic dependency.
+	   As of 1-17-06 then, Tokens is created for lexer grammars in the
+	   antlr grammar parser itself.
+
+	   This grammar is also sensitive to the backtrack grammar option that
+	   tells ANTLR to automatically backtrack when it can't compute a DFA.
+
+	   7-2-06 I moved all option processing to antlr.g from define.g as I
+	   need backtrack option etc... for blocks.  Got messy.
+	*/
+	protected List lexerRuleNames = new ArrayList();
+	public List getLexerRuleNames() { return lexerRuleNames; }
+
+	protected GrammarAST setToBlockWithSet(GrammarAST b) {
+		GrammarAST alt = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT,"ALT")).add(b).add((GrammarAST)astFactory.create(EOA,"<end-of-alt>")));
+		prefixWithSynPred(alt);
+		return (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK,"BLOCK")).add(alt).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));
+	}
+
+	/** Create a copy of the alt and make it into a BLOCK; all actions,
+	 *  labels, tree operators, rewrites are removed.
+	 */
+	protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {
+		//GrammarAST nalt = (GrammarAST)astFactory.dupTree(alt);
+		GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);
+		GrammarAST blk = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK,"BLOCK")).add(nalt).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));
+		return blk;
+	}
+
+	/** Rewrite alt to have a synpred as first element;
+	 *  (xxx)=>xxx
+	 *  but only if they didn't specify one manually.
+	 */
+	protected void prefixWithSynPred(GrammarAST alt) {
+		// if they want backtracking and it's not a lexer rule in combined grammar
+		String autoBacktrack = (String)currentBlockAST.getOption("backtrack");
+		if ( autoBacktrack==null ) {
+			autoBacktrack = (String)grammar.getOption("backtrack");
+		}
+		if ( autoBacktrack!=null&&autoBacktrack.equals("true") &&
+			 !(gtype==COMBINED_GRAMMAR &&
+			 Character.isUpperCase(currentRuleName.charAt(0))) &&
+			 alt.getFirstChild().getType()!=SYN_SEMPRED )
+		{
+			// duplicate alt and make a synpred block around that dup'd alt
+			GrammarAST synpredBlockAST = createBlockFromDupAlt(alt);
+
+			// Create a BACKTRACK_SEMPRED node as if user had typed this in
+			// Effectively we replace (xxx)=>xxx with {synpredxxx}? xxx
+			GrammarAST synpredAST = createSynSemPredFromBlock(synpredBlockAST,
+															  BACKTRACK_SEMPRED);
+
+			// insert BACKTRACK_SEMPRED as first element of alt
+			synpredAST.getLastSibling().setNextSibling(alt.getFirstChild());
+			alt.setFirstChild(synpredAST);
+		}
+	}
+
+	protected GrammarAST createSynSemPredFromBlock(GrammarAST synpredBlockAST,
+												   int synpredTokenType)
+	{
+		// add grammar fragment to a list so we can make fake rules for them
+		// later.
+		String predName = grammar.defineSyntacticPredicate(synpredBlockAST,currentRuleName);
+		// convert (alpha)=> into {synpredN}? where N is some pred count
+		// during code gen we convert to function call with templates
+		String synpredinvoke = predName;
+		GrammarAST p = (GrammarAST)astFactory.create(synpredTokenType,synpredinvoke);
+		p.setEnclosingRule(currentRuleName);
+		// track how many decisions have synpreds
+		grammar.blocksWithSynPreds.add(currentBlockAST);
+		return p;
+	}
+
+	public GrammarAST createSimpleRuleAST(String name,
+										  GrammarAST block,
+										  boolean fragment)
+   {
+   		GrammarAST modifier = null;
+   		if ( fragment ) {
+   			modifier = (GrammarAST)astFactory.create(FRAGMENT,"fragment");
+   		}
+   		GrammarAST EORAST = (GrammarAST)astFactory.create(EOR,"<end-of-rule>");
+   		GrammarAST EOBAST = block.getLastChild();
+		EORAST.setLine(EOBAST.getLine());
+		EORAST.setColumn(EOBAST.getColumn());
+		GrammarAST ruleAST =
+		   (GrammarAST)astFactory.make( (new ASTArray(8)).add((GrammarAST)astFactory.create(RULE,"rule")).add((GrammarAST)astFactory.create(ID,name)).add(modifier).add((GrammarAST)astFactory.create(ARG,"ARG")).add((GrammarAST)astFactory.create(RET,"RET")).add((GrammarAST)astFactory.create(SCOPE,"scope")).add(block).add(EORAST));
+		ruleAST.setLine(block.getLine());
+		ruleAST.setColumn(block.getColumn());
+		return ruleAST;
+	}
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		try {
+			token = LT(1);
+		}
+		catch (TokenStreamException tse) {
+			ErrorManager.internalError("can't get token???", tse);
+		}
+		ErrorManager.syntaxError(
+			ErrorManager.MSG_SYNTAX_ERROR,
+			grammar,
+			token,
+			"antlr: "+ex.toString(),
+			ex);
+    }
+
+    public void cleanup(GrammarAST root) {
+		if ( gtype==LEXER_GRAMMAR ) {
+			String filter = (String)grammar.getOption("filter");
+			GrammarAST tokensRuleAST =
+			    grammar.addArtificialMatchTokensRule(
+			    	root,
+			    	lexerRuleNames,
+			    	filter!=null&&filter.equals("true"));
+		}
+    }
+
+protected ANTLRParser(TokenBuffer tokenBuf, int k) {
+  super(tokenBuf,k);
+  tokenNames = _tokenNames;
+  buildTokenTypeASTClassMap();
+  astFactory = new ASTFactory(getTokenTypeToASTClassMap());
+}
+
+public ANTLRParser(TokenBuffer tokenBuf) {
+  this(tokenBuf,2);
+}
+
+protected ANTLRParser(TokenStream lexer, int k) {
+  super(lexer,k);
+  tokenNames = _tokenNames;
+  buildTokenTypeASTClassMap();
+  astFactory = new ASTFactory(getTokenTypeToASTClassMap());
+}
+
+public ANTLRParser(TokenStream lexer) {
+  this(lexer,2);
+}
+
+public ANTLRParser(ParserSharedInputState state) {
+  super(state,2);
+  tokenNames = _tokenNames;
+  buildTokenTypeASTClassMap();
+  astFactory = new ASTFactory(getTokenTypeToASTClassMap());
+}
+
+	public final void grammar(
+		Grammar g
+	) throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST grammar_AST = null;
+		Token  cmt = null;
+		GrammarAST cmt_AST = null;
+		GrammarAST gr_AST = null;
+		GrammarAST gid_AST = null;
+		GrammarAST ts_AST = null;
+		GrammarAST scopes_AST = null;
+		GrammarAST a_AST = null;
+		GrammarAST r_AST = null;
+		
+			this.grammar = g;
+			GrammarAST opt=null;
+			Token optionsStartToken = null;
+			Map opts;
+		
+		
+		try {      // for error handling
+			{
+			switch ( LA(1)) {
+			case ACTION:
+			{
+				GrammarAST tmp1_AST = null;
+				tmp1_AST = (GrammarAST)astFactory.create(LT(1));
+				match(ACTION);
+				break;
+			}
+			case PARSER:
+			case DOC_COMMENT:
+			case LITERAL_lexer:
+			case LITERAL_tree:
+			case LITERAL_grammar:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case DOC_COMMENT:
+			{
+				cmt = LT(1);
+				cmt_AST = (GrammarAST)astFactory.create(cmt);
+				match(DOC_COMMENT);
+				break;
+			}
+			case PARSER:
+			case LITERAL_lexer:
+			case LITERAL_tree:
+			case LITERAL_grammar:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			grammarType();
+			gr_AST = (GrammarAST)returnAST;
+			id();
+			gid_AST = (GrammarAST)returnAST;
+			GrammarAST tmp2_AST = null;
+			tmp2_AST = (GrammarAST)astFactory.create(LT(1));
+			match(SEMI);
+			{
+			switch ( LA(1)) {
+			case OPTIONS:
+			{
+				optionsStartToken=LT(1);
+				opts=optionsSpec();
+				grammar.setOptions(opts, optionsStartToken);
+				opt=(GrammarAST)returnAST;
+				break;
+			}
+			case TOKENS:
+			case SCOPE:
+			case FRAGMENT:
+			case DOC_COMMENT:
+			case AMPERSAND:
+			case TOKEN_REF:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			case RULE_REF:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case TOKENS:
+			{
+				tokensSpec();
+				ts_AST = (GrammarAST)returnAST;
+				break;
+			}
+			case SCOPE:
+			case FRAGMENT:
+			case DOC_COMMENT:
+			case AMPERSAND:
+			case TOKEN_REF:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			case RULE_REF:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			attrScopes();
+			scopes_AST = (GrammarAST)returnAST;
+			{
+			switch ( LA(1)) {
+			case AMPERSAND:
+			{
+				actions();
+				a_AST = (GrammarAST)returnAST;
+				break;
+			}
+			case FRAGMENT:
+			case DOC_COMMENT:
+			case TOKEN_REF:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			case RULE_REF:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			rules();
+			r_AST = (GrammarAST)returnAST;
+			GrammarAST tmp3_AST = null;
+			tmp3_AST = (GrammarAST)astFactory.create(LT(1));
+			match(Token.EOF_TYPE);
+			grammar_AST = (GrammarAST)currentAST.root;
+			
+			grammar_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(null).add((GrammarAST)astFactory.make( (new ASTArray(8)).add(gr_AST).add(gid_AST).add(cmt_AST).add(opt).add(ts_AST).add(scopes_AST).add(a_AST).add(r_AST))));
+			cleanup(grammar_AST);
+			
+			currentAST.root = grammar_AST;
+			currentAST.child = grammar_AST!=null &&grammar_AST.getFirstChild()!=null ?
+				grammar_AST.getFirstChild() : grammar_AST;
+			currentAST.advanceChildToEnd();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_0);
+		}
+		returnAST = grammar_AST;
+	}
+	
+	public final void grammarType() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST grammarType_AST = null;
+		Token  gr = null;
+		GrammarAST gr_AST = null;
+		
+		try {      // for error handling
+			{
+			switch ( LA(1)) {
+			case LITERAL_lexer:
+			{
+				match(LITERAL_lexer);
+				gtype=LEXER_GRAMMAR;
+				break;
+			}
+			case PARSER:
+			{
+				match(PARSER);
+				gtype=PARSER_GRAMMAR;
+				break;
+			}
+			case LITERAL_tree:
+			{
+				match(LITERAL_tree);
+				gtype=TREE_GRAMMAR;
+				break;
+			}
+			case LITERAL_grammar:
+			{
+				gtype=COMBINED_GRAMMAR;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			gr = LT(1);
+			gr_AST = (GrammarAST)astFactory.create(gr);
+			astFactory.addASTChild(currentAST, gr_AST);
+			match(LITERAL_grammar);
+			gr_AST.setType(gtype);
+			grammarType_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_1);
+		}
+		returnAST = grammarType_AST;
+	}
+	
+	public final void id() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST id_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case TOKEN_REF:
+			{
+				GrammarAST tmp7_AST = null;
+				tmp7_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp7_AST);
+				match(TOKEN_REF);
+				id_AST = (GrammarAST)currentAST.root;
+				id_AST.setType(ID);
+				id_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case RULE_REF:
+			{
+				GrammarAST tmp8_AST = null;
+				tmp8_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp8_AST);
+				match(RULE_REF);
+				id_AST = (GrammarAST)currentAST.root;
+				id_AST.setType(ID);
+				id_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_2);
+		}
+		returnAST = id_AST;
+	}
+	
+	public final Map  optionsSpec() throws RecognitionException, TokenStreamException {
+		Map opts=new HashMap();
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST optionsSpec_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp9_AST = null;
+			tmp9_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp9_AST);
+			match(OPTIONS);
+			{
+			int _cnt17=0;
+			_loop17:
+			do {
+				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+					option(opts);
+					astFactory.addASTChild(currentAST, returnAST);
+					match(SEMI);
+				}
+				else {
+					if ( _cnt17>=1 ) { break _loop17; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt17++;
+			} while (true);
+			}
+			match(RCURLY);
+			optionsSpec_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_3);
+		}
+		returnAST = optionsSpec_AST;
+		return opts;
+	}
+	
+	public final void tokensSpec() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST tokensSpec_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp12_AST = null;
+			tmp12_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp12_AST);
+			match(TOKENS);
+			{
+			int _cnt22=0;
+			_loop22:
+			do {
+				if ((LA(1)==TOKEN_REF)) {
+					tokenSpec();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					if ( _cnt22>=1 ) { break _loop22; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt22++;
+			} while (true);
+			}
+			match(RCURLY);
+			tokensSpec_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_4);
+		}
+		returnAST = tokensSpec_AST;
+	}
+	
+	public final void attrScopes() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST attrScopes_AST = null;
+		
+		try {      // for error handling
+			{
+			_loop28:
+			do {
+				if ((LA(1)==SCOPE)) {
+					attrScope();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					break _loop28;
+				}
+				
+			} while (true);
+			}
+			attrScopes_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_5);
+		}
+		returnAST = attrScopes_AST;
+	}
+	
+	public final void actions() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST actions_AST = null;
+		
+		try {      // for error handling
+			{
+			int _cnt11=0;
+			_loop11:
+			do {
+				if ((LA(1)==AMPERSAND)) {
+					action();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					if ( _cnt11>=1 ) { break _loop11; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt11++;
+			} while (true);
+			}
+			actions_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_6);
+		}
+		returnAST = actions_AST;
+	}
+	
+	public final void rules() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rules_AST = null;
+		
+		try {      // for error handling
+			{
+			int _cnt32=0;
+			_loop32:
+			do {
+				if ((_tokenSet_6.member(LA(1)))) {
+					rule();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					if ( _cnt32>=1 ) { break _loop32; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt32++;
+			} while (true);
+			}
+			rules_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_0);
+		}
+		returnAST = rules_AST;
+	}
+	
+/** Match stuff like @parser::members {int i;} */
+	public final void action() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST action_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp14_AST = null;
+			tmp14_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp14_AST);
+			match(AMPERSAND);
+			{
+			if ((_tokenSet_7.member(LA(1))) && (LA(2)==COLON)) {
+				actionScopeName();
+				astFactory.addASTChild(currentAST, returnAST);
+				match(COLON);
+				match(COLON);
+			}
+			else if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==ACTION)) {
+			}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			
+			}
+			id();
+			astFactory.addASTChild(currentAST, returnAST);
+			GrammarAST tmp17_AST = null;
+			tmp17_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp17_AST);
+			match(ACTION);
+			action_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_5);
+		}
+		returnAST = action_AST;
+	}
+	
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+	public final void actionScopeName() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST actionScopeName_AST = null;
+		Token  l = null;
+		GrammarAST l_AST = null;
+		Token  p = null;
+		GrammarAST p_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case TOKEN_REF:
+			case RULE_REF:
+			{
+				id();
+				astFactory.addASTChild(currentAST, returnAST);
+				actionScopeName_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case LITERAL_lexer:
+			{
+				l = LT(1);
+				l_AST = (GrammarAST)astFactory.create(l);
+				astFactory.addASTChild(currentAST, l_AST);
+				match(LITERAL_lexer);
+				l_AST.setType(ID);
+				actionScopeName_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case PARSER:
+			{
+				p = LT(1);
+				p_AST = (GrammarAST)astFactory.create(p);
+				astFactory.addASTChild(currentAST, p_AST);
+				match(PARSER);
+				p_AST.setType(ID);
+				actionScopeName_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_8);
+		}
+		returnAST = actionScopeName_AST;
+	}
+	
+	public final void option(
+		Map opts
+	) throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST option_AST = null;
+		GrammarAST o_AST = null;
+		
+		Object value=null;
+		
+		
+		try {      // for error handling
+			id();
+			o_AST = (GrammarAST)returnAST;
+			astFactory.addASTChild(currentAST, returnAST);
+			GrammarAST tmp18_AST = null;
+			tmp18_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp18_AST);
+			match(ASSIGN);
+			value=optionValue();
+			astFactory.addASTChild(currentAST, returnAST);
+			
+				opts.put(o_AST.getText(), value);
+				
+			option_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_9);
+		}
+		returnAST = option_AST;
+	}
+	
+	public final Object  optionValue() throws RecognitionException, TokenStreamException {
+		Object value=null;
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST optionValue_AST = null;
+		GrammarAST x_AST = null;
+		Token  s = null;
+		GrammarAST s_AST = null;
+		Token  c = null;
+		GrammarAST c_AST = null;
+		Token  i = null;
+		GrammarAST i_AST = null;
+		Token  ss = null;
+		GrammarAST ss_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case TOKEN_REF:
+			case RULE_REF:
+			{
+				id();
+				x_AST = (GrammarAST)returnAST;
+				astFactory.addASTChild(currentAST, returnAST);
+				value = x_AST.getText();
+				optionValue_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = LT(1);
+				s_AST = (GrammarAST)astFactory.create(s);
+				astFactory.addASTChild(currentAST, s_AST);
+				match(STRING_LITERAL);
+				String vs = s_AST.getText();
+				value=vs.substring(1,vs.length()-1);
+				optionValue_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				c = LT(1);
+				c_AST = (GrammarAST)astFactory.create(c);
+				astFactory.addASTChild(currentAST, c_AST);
+				match(CHAR_LITERAL);
+				String vs = c_AST.getText();
+				value=vs.substring(1,vs.length()-1);
+				optionValue_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case INT:
+			{
+				i = LT(1);
+				i_AST = (GrammarAST)astFactory.create(i);
+				astFactory.addASTChild(currentAST, i_AST);
+				match(INT);
+				value = new Integer(i_AST.getText());
+				optionValue_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case STAR:
+			{
+				ss = LT(1);
+				ss_AST = (GrammarAST)astFactory.create(ss);
+				astFactory.addASTChild(currentAST, ss_AST);
+				match(STAR);
+				ss_AST.setType(STRING_LITERAL); value = "*";
+				optionValue_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_9);
+		}
+		returnAST = optionValue_AST;
+		return value;
+	}
+	
+	public final void tokenSpec() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST tokenSpec_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp19_AST = null;
+			tmp19_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp19_AST);
+			match(TOKEN_REF);
+			{
+			switch ( LA(1)) {
+			case ASSIGN:
+			{
+				GrammarAST tmp20_AST = null;
+				tmp20_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.makeASTRoot(currentAST, tmp20_AST);
+				match(ASSIGN);
+				{
+				switch ( LA(1)) {
+				case STRING_LITERAL:
+				{
+					GrammarAST tmp21_AST = null;
+					tmp21_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.addASTChild(currentAST, tmp21_AST);
+					match(STRING_LITERAL);
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					GrammarAST tmp22_AST = null;
+					tmp22_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.addASTChild(currentAST, tmp22_AST);
+					match(CHAR_LITERAL);
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				break;
+			}
+			case SEMI:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			match(SEMI);
+			tokenSpec_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_10);
+		}
+		returnAST = tokenSpec_AST;
+	}
+	
+	public final void attrScope() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST attrScope_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp24_AST = null;
+			tmp24_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp24_AST);
+			match(SCOPE);
+			id();
+			astFactory.addASTChild(currentAST, returnAST);
+			GrammarAST tmp25_AST = null;
+			tmp25_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp25_AST);
+			match(ACTION);
+			attrScope_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_4);
+		}
+		returnAST = attrScope_AST;
+	}
+	
+	public final void rule() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rule_AST = null;
+		Token  d = null;
+		GrammarAST d_AST = null;
+		Token  p1 = null;
+		GrammarAST p1_AST = null;
+		Token  p2 = null;
+		GrammarAST p2_AST = null;
+		Token  p3 = null;
+		GrammarAST p3_AST = null;
+		Token  p4 = null;
+		GrammarAST p4_AST = null;
+		GrammarAST ruleName_AST = null;
+		Token  aa = null;
+		GrammarAST aa_AST = null;
+		Token  rt = null;
+		GrammarAST rt_AST = null;
+		GrammarAST scopes_AST = null;
+		GrammarAST a_AST = null;
+		Token  colon = null;
+		GrammarAST colon_AST = null;
+		GrammarAST b_AST = null;
+		Token  semi = null;
+		GrammarAST semi_AST = null;
+		GrammarAST ex_AST = null;
+		
+		GrammarAST modifier=null, blk=null, blkRoot=null, eob=null;
+		int start = ((TokenWithIndex)LT(1)).getIndex();
+		int startLine = LT(1).getLine();
+		GrammarAST opt = null;
+		Map opts = null;
+		
+		
+		try {      // for error handling
+			{
+			switch ( LA(1)) {
+			case DOC_COMMENT:
+			{
+				d = LT(1);
+				d_AST = (GrammarAST)astFactory.create(d);
+				match(DOC_COMMENT);
+				break;
+			}
+			case FRAGMENT:
+			case TOKEN_REF:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			case RULE_REF:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case LITERAL_protected:
+			{
+				p1 = LT(1);
+				p1_AST = (GrammarAST)astFactory.create(p1);
+				match(LITERAL_protected);
+				modifier=p1_AST;
+				break;
+			}
+			case LITERAL_public:
+			{
+				p2 = LT(1);
+				p2_AST = (GrammarAST)astFactory.create(p2);
+				match(LITERAL_public);
+				modifier=p2_AST;
+				break;
+			}
+			case LITERAL_private:
+			{
+				p3 = LT(1);
+				p3_AST = (GrammarAST)astFactory.create(p3);
+				match(LITERAL_private);
+				modifier=p3_AST;
+				break;
+			}
+			case FRAGMENT:
+			{
+				p4 = LT(1);
+				p4_AST = (GrammarAST)astFactory.create(p4);
+				match(FRAGMENT);
+				modifier=p4_AST;
+				break;
+			}
+			case TOKEN_REF:
+			case RULE_REF:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			id();
+			ruleName_AST = (GrammarAST)returnAST;
+			currentRuleName=ruleName_AST.getText();
+			if ( gtype==LEXER_GRAMMAR && p4_AST==null ) {
+			lexerRuleNames.add(currentRuleName);
+				 }
+				
+			{
+			switch ( LA(1)) {
+			case BANG:
+			{
+				GrammarAST tmp26_AST = null;
+				tmp26_AST = (GrammarAST)astFactory.create(LT(1));
+				match(BANG);
+				break;
+			}
+			case OPTIONS:
+			case SCOPE:
+			case AMPERSAND:
+			case COLON:
+			case ARG_ACTION:
+			case LITERAL_returns:
+			case LITERAL_throws:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case ARG_ACTION:
+			{
+				aa = LT(1);
+				aa_AST = (GrammarAST)astFactory.create(aa);
+				match(ARG_ACTION);
+				break;
+			}
+			case OPTIONS:
+			case SCOPE:
+			case AMPERSAND:
+			case COLON:
+			case LITERAL_returns:
+			case LITERAL_throws:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case LITERAL_returns:
+			{
+				match(LITERAL_returns);
+				rt = LT(1);
+				rt_AST = (GrammarAST)astFactory.create(rt);
+				match(ARG_ACTION);
+				break;
+			}
+			case OPTIONS:
+			case SCOPE:
+			case AMPERSAND:
+			case COLON:
+			case LITERAL_throws:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case LITERAL_throws:
+			{
+				throwsSpec();
+				break;
+			}
+			case OPTIONS:
+			case SCOPE:
+			case AMPERSAND:
+			case COLON:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			{
+			switch ( LA(1)) {
+			case OPTIONS:
+			{
+				opts=optionsSpec();
+				opt=(GrammarAST)returnAST;
+				break;
+			}
+			case SCOPE:
+			case AMPERSAND:
+			case COLON:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			ruleScopeSpec();
+			scopes_AST = (GrammarAST)returnAST;
+			{
+			switch ( LA(1)) {
+			case AMPERSAND:
+			{
+				ruleActions();
+				a_AST = (GrammarAST)returnAST;
+				break;
+			}
+			case COLON:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			colon = LT(1);
+			colon_AST = (GrammarAST)astFactory.create(colon);
+			match(COLON);
+			
+				blkRoot = (GrammarAST)astFactory.create(BLOCK,"BLOCK");
+				blkRoot.options = opts;
+				blkRoot.setLine(colon.getLine());
+				blkRoot.setColumn(colon.getColumn());
+				eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
+			
+			altList(opts);
+			b_AST = (GrammarAST)returnAST;
+			blk = b_AST;
+			semi = LT(1);
+			semi_AST = (GrammarAST)astFactory.create(semi);
+			match(SEMI);
+			{
+			switch ( LA(1)) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup();
+				ex_AST = (GrammarAST)returnAST;
+				break;
+			}
+			case EOF:
+			case FRAGMENT:
+			case DOC_COMMENT:
+			case TOKEN_REF:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			case RULE_REF:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			rule_AST = (GrammarAST)currentAST.root;
+			
+			int stop = ((TokenWithIndex)LT(1)).getIndex()-1; // point at the semi or exception thingie
+				eob.setLine(semi.getLine());
+				eob.setColumn(semi.getColumn());
+			GrammarAST eor = (GrammarAST)astFactory.create(EOR,"<end-of-rule>");
+				eor.setEnclosingRule(ruleName_AST.getText());
+				eor.setLine(semi.getLine());
+				eor.setColumn(semi.getColumn());
+				GrammarAST root = (GrammarAST)astFactory.create(RULE,"rule");
+				root.ruleStartTokenIndex = start;
+				root.ruleStopTokenIndex = stop;
+				root.setLine(startLine);
+				root.options = opts;
+			rule_AST = (GrammarAST)astFactory.make( (new ASTArray(11)).add(root).add(ruleName_AST).add(modifier).add((GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(ARG,"ARG")).add(aa_AST))).add((GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(RET,"RET")).add(rt_AST))).add(opt).add(scopes_AST).add(a_AST).add(blk).add(ex_AST).add(eor));
+				currentRuleName=null;
+			
+			currentAST.root = rule_AST;
+			currentAST.child = rule_AST!=null &&rule_AST.getFirstChild()!=null ?
+				rule_AST.getFirstChild() : rule_AST;
+			currentAST.advanceChildToEnd();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_11);
+		}
+		returnAST = rule_AST;
+	}
+	
+	public final void throwsSpec() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST throwsSpec_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp28_AST = null;
+			tmp28_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp28_AST);
+			match(LITERAL_throws);
+			id();
+			astFactory.addASTChild(currentAST, returnAST);
+			{
+			_loop49:
+			do {
+				if ((LA(1)==COMMA)) {
+					GrammarAST tmp29_AST = null;
+					tmp29_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.addASTChild(currentAST, tmp29_AST);
+					match(COMMA);
+					id();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					break _loop49;
+				}
+				
+			} while (true);
+			}
+			throwsSpec_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_12);
+		}
+		returnAST = throwsSpec_AST;
+	}
+	
+	public final void ruleScopeSpec() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST ruleScopeSpec_AST = null;
+		Token  a = null;
+		GrammarAST a_AST = null;
+		GrammarAST ids_AST = null;
+		
+		int line = LT(1).getLine();
+		int column = LT(1).getColumn();
+		
+		
+		try {      // for error handling
+			{
+			if ((LA(1)==SCOPE) && (LA(2)==ACTION)) {
+				match(SCOPE);
+				a = LT(1);
+				a_AST = (GrammarAST)astFactory.create(a);
+				match(ACTION);
+			}
+			else if ((LA(1)==SCOPE||LA(1)==AMPERSAND||LA(1)==COLON) && (_tokenSet_13.member(LA(2)))) {
+			}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			
+			}
+			{
+			_loop53:
+			do {
+				if ((LA(1)==SCOPE)) {
+					match(SCOPE);
+					idList();
+					ids_AST = (GrammarAST)returnAST;
+					match(SEMI);
+				}
+				else {
+					break _loop53;
+				}
+				
+			} while (true);
+			}
+			ruleScopeSpec_AST = (GrammarAST)currentAST.root;
+			
+					GrammarAST scopeRoot = (GrammarAST)(GrammarAST)astFactory.create(SCOPE,"scope");
+					scopeRoot.setLine(line);
+					scopeRoot.setColumn(column);
+					ruleScopeSpec_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(scopeRoot).add(a_AST).add(ids_AST));
+					
+			currentAST.root = ruleScopeSpec_AST;
+			currentAST.child = ruleScopeSpec_AST!=null &&ruleScopeSpec_AST.getFirstChild()!=null ?
+				ruleScopeSpec_AST.getFirstChild() : ruleScopeSpec_AST;
+			currentAST.advanceChildToEnd();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_14);
+		}
+		returnAST = ruleScopeSpec_AST;
+	}
+	
+	public final void ruleActions() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST ruleActions_AST = null;
+		
+		try {      // for error handling
+			{
+			int _cnt45=0;
+			_loop45:
+			do {
+				if ((LA(1)==AMPERSAND)) {
+					ruleAction();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					if ( _cnt45>=1 ) { break _loop45; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt45++;
+			} while (true);
+			}
+			ruleActions_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_8);
+		}
+		returnAST = ruleActions_AST;
+	}
+	
+	public final void altList(
+		Map opts
+	) throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST altList_AST = null;
+		GrammarAST a1_AST = null;
+		GrammarAST a2_AST = null;
+		
+			GrammarAST blkRoot = (GrammarAST)astFactory.create(BLOCK,"BLOCK");
+			blkRoot.options = opts;
+			blkRoot.setLine(LT(0).getLine()); // set to : or (
+			blkRoot.setColumn(LT(0).getColumn());
+			GrammarAST save = currentBlockAST;
+			currentBlockAST = blkRoot;
+		
+		
+		try {      // for error handling
+			alternative();
+			a1_AST = (GrammarAST)returnAST;
+			astFactory.addASTChild(currentAST, returnAST);
+			rewrite();
+			astFactory.addASTChild(currentAST, returnAST);
+			if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a1_AST);
+			{
+			_loop62:
+			do {
+				if ((LA(1)==OR)) {
+					match(OR);
+					alternative();
+					a2_AST = (GrammarAST)returnAST;
+					astFactory.addASTChild(currentAST, returnAST);
+					rewrite();
+					astFactory.addASTChild(currentAST, returnAST);
+					if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a2_AST);
+				}
+				else {
+					break _loop62;
+				}
+				
+			} while (true);
+			}
+			altList_AST = (GrammarAST)currentAST.root;
+			
+			altList_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(blkRoot).add(altList_AST).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));
+			currentBlockAST = save;
+			
+			currentAST.root = altList_AST;
+			currentAST.child = altList_AST!=null &&altList_AST.getFirstChild()!=null ?
+				altList_AST.getFirstChild() : altList_AST;
+			currentAST.advanceChildToEnd();
+			altList_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_9);
+		}
+		returnAST = altList_AST;
+	}
+	
+	public final void exceptionGroup() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST exceptionGroup_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case LITERAL_catch:
+			{
+				{
+				int _cnt68=0;
+				_loop68:
+				do {
+					if ((LA(1)==LITERAL_catch)) {
+						exceptionHandler();
+						astFactory.addASTChild(currentAST, returnAST);
+					}
+					else {
+						if ( _cnt68>=1 ) { break _loop68; } else {throw new NoViableAltException(LT(1), getFilename());}
+					}
+					
+					_cnt68++;
+				} while (true);
+				}
+				{
+				switch ( LA(1)) {
+				case LITERAL_finally:
+				{
+					finallyClause();
+					astFactory.addASTChild(currentAST, returnAST);
+					break;
+				}
+				case EOF:
+				case FRAGMENT:
+				case DOC_COMMENT:
+				case TOKEN_REF:
+				case LITERAL_protected:
+				case LITERAL_public:
+				case LITERAL_private:
+				case RULE_REF:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				exceptionGroup_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case LITERAL_finally:
+			{
+				finallyClause();
+				astFactory.addASTChild(currentAST, returnAST);
+				exceptionGroup_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_11);
+		}
+		returnAST = exceptionGroup_AST;
+	}
+	
+/** Match stuff like @init {int i;} */
+	public final void ruleAction() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST ruleAction_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp34_AST = null;
+			tmp34_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp34_AST);
+			match(AMPERSAND);
+			id();
+			astFactory.addASTChild(currentAST, returnAST);
+			GrammarAST tmp35_AST = null;
+			tmp35_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp35_AST);
+			match(ACTION);
+			ruleAction_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_14);
+		}
+		returnAST = ruleAction_AST;
+	}
+	
+	public final void idList() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST idList_AST = null;
+		
+		try {      // for error handling
+			{
+			int _cnt103=0;
+			_loop103:
+			do {
+				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+					id();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					if ( _cnt103>=1 ) { break _loop103; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt103++;
+			} while (true);
+			}
+			idList_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_9);
+		}
+		returnAST = idList_AST;
+	}
+	
+/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
+	public final void block() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST block_AST = null;
+		Token  lp = null;
+		GrammarAST lp_AST = null;
+		GrammarAST a1_AST = null;
+		GrammarAST a2_AST = null;
+		Token  rp = null;
+		GrammarAST rp_AST = null;
+		
+		GrammarAST save = currentBlockAST;
+		Map opts=null;
+		
+		
+		try {      // for error handling
+			lp = LT(1);
+			lp_AST = (GrammarAST)astFactory.create(lp);
+			astFactory.makeASTRoot(currentAST, lp_AST);
+			match(LPAREN);
+			lp_AST.setType(BLOCK); lp_AST.setText("BLOCK");
+			{
+			if ((LA(1)==OPTIONS||LA(1)==AMPERSAND||LA(1)==COLON)) {
+				{
+				switch ( LA(1)) {
+				case OPTIONS:
+				{
+					opts=optionsSpec();
+					astFactory.addASTChild(currentAST, returnAST);
+					block_AST = (GrammarAST)currentAST.root;
+					block_AST.setOptions(grammar,opts);
+					break;
+				}
+				case AMPERSAND:
+				case COLON:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				{
+				switch ( LA(1)) {
+				case AMPERSAND:
+				{
+					ruleActions();
+					astFactory.addASTChild(currentAST, returnAST);
+					break;
+				}
+				case COLON:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				match(COLON);
+			}
+			else if ((LA(1)==ACTION) && (LA(2)==COLON)) {
+				GrammarAST tmp37_AST = null;
+				tmp37_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp37_AST);
+				match(ACTION);
+				match(COLON);
+			}
+			else if ((_tokenSet_15.member(LA(1))) && (_tokenSet_16.member(LA(2)))) {
+			}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			
+			}
+			currentBlockAST = lp_AST;
+			alternative();
+			a1_AST = (GrammarAST)returnAST;
+			astFactory.addASTChild(currentAST, returnAST);
+			rewrite();
+			astFactory.addASTChild(currentAST, returnAST);
+			if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a1_AST);
+			{
+			_loop59:
+			do {
+				if ((LA(1)==OR)) {
+					match(OR);
+					alternative();
+					a2_AST = (GrammarAST)returnAST;
+					astFactory.addASTChild(currentAST, returnAST);
+					rewrite();
+					astFactory.addASTChild(currentAST, returnAST);
+					if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a2_AST);
+				}
+				else {
+					break _loop59;
+				}
+				
+			} while (true);
+			}
+			rp = LT(1);
+			rp_AST = (GrammarAST)astFactory.create(rp);
+			match(RPAREN);
+			block_AST = (GrammarAST)currentAST.root;
+			
+					currentBlockAST = save;
+			GrammarAST eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
+			eob.setLine(rp.getLine());
+			eob.setColumn(rp.getColumn());
+			block_AST.addChild(eob);
+			
+			block_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_17);
+		}
+		returnAST = block_AST;
+	}
+	
+	public final void alternative() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST alternative_AST = null;
+		GrammarAST el_AST = null;
+		
+		GrammarAST eoa = (GrammarAST)astFactory.create(EOA,"<end-of-alt>");
+		GrammarAST altRoot = (GrammarAST)astFactory.create(ALT,"ALT");
+		altRoot.setLine(LT(1).getLine());
+		altRoot.setColumn(LT(1).getColumn());
+		
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case ACTION:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case LPAREN:
+			case SEMPRED:
+			case RULE_REF:
+			case NOT:
+			case TREE_BEGIN:
+			case WILDCARD:
+			{
+				{
+				int _cnt65=0;
+				_loop65:
+				do {
+					if ((_tokenSet_18.member(LA(1)))) {
+						element();
+						el_AST = (GrammarAST)returnAST;
+						astFactory.addASTChild(currentAST, returnAST);
+					}
+					else {
+						if ( _cnt65>=1 ) { break _loop65; } else {throw new NoViableAltException(LT(1), getFilename());}
+					}
+					
+					_cnt65++;
+				} while (true);
+				}
+				alternative_AST = (GrammarAST)currentAST.root;
+				
+				if ( alternative_AST==null ) {
+				alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add((GrammarAST)astFactory.create(EPSILON,"epsilon")).add(eoa));
+				}
+				else {
+					// we have a real list of stuff
+					alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add(alternative_AST).add(eoa));
+				}
+				
+				currentAST.root = alternative_AST;
+				currentAST.child = alternative_AST!=null &&alternative_AST.getFirstChild()!=null ?
+					alternative_AST.getFirstChild() : alternative_AST;
+				currentAST.advanceChildToEnd();
+				alternative_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case SEMI:
+			case OR:
+			case RPAREN:
+			case REWRITE:
+			{
+				alternative_AST = (GrammarAST)currentAST.root;
+				
+					GrammarAST eps = (GrammarAST)astFactory.create(EPSILON,"epsilon");
+						eps.setLine(LT(0).getLine()); // get line/col of '|' or ':' (prev token)
+						eps.setColumn(LT(0).getColumn());
+					alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add(eps).add(eoa));
+					
+				currentAST.root = alternative_AST;
+				currentAST.child = alternative_AST!=null &&alternative_AST.getFirstChild()!=null ?
+					alternative_AST.getFirstChild() : alternative_AST;
+				currentAST.advanceChildToEnd();
+				alternative_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_19);
+		}
+		returnAST = alternative_AST;
+	}
+	
+	public final void rewrite() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_AST = null;
+		Token  rew = null;
+		GrammarAST rew_AST = null;
+		Token  pred = null;
+		GrammarAST pred_AST = null;
+		GrammarAST alt_AST = null;
+		Token  rew2 = null;
+		GrammarAST rew2_AST = null;
+		GrammarAST alt2_AST = null;
+		
+		GrammarAST root = new GrammarAST();
+		
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case REWRITE:
+			{
+				{
+				_loop108:
+				do {
+					if ((LA(1)==REWRITE) && (LA(2)==SEMPRED)) {
+						rew = LT(1);
+						rew_AST = (GrammarAST)astFactory.create(rew);
+						match(REWRITE);
+						pred = LT(1);
+						pred_AST = (GrammarAST)astFactory.create(pred);
+						match(SEMPRED);
+						rewrite_alternative();
+						alt_AST = (GrammarAST)returnAST;
+						root.addChild( (GrammarAST)astFactory.make( (new ASTArray(3)).add(rew_AST).add(pred_AST).add(alt_AST)) );
+						
+						pred_AST.setEnclosingRule(currentRuleName);
+						rew_AST.setEnclosingRule(currentRuleName);
+						
+					}
+					else {
+						break _loop108;
+					}
+					
+				} while (true);
+				}
+				rew2 = LT(1);
+				rew2_AST = (GrammarAST)astFactory.create(rew2);
+				match(REWRITE);
+				rewrite_alternative();
+				alt2_AST = (GrammarAST)returnAST;
+				rewrite_AST = (GrammarAST)currentAST.root;
+				
+				root.addChild( (GrammarAST)astFactory.make( (new ASTArray(2)).add(rew2_AST).add(alt2_AST)) );
+				rewrite_AST = (GrammarAST)root.getFirstChild();
+				
+				currentAST.root = rewrite_AST;
+				currentAST.child = rewrite_AST!=null &&rewrite_AST.getFirstChild()!=null ?
+					rewrite_AST.getFirstChild() : rewrite_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case SEMI:
+			case OR:
+			case RPAREN:
+			{
+				rewrite_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_20);
+		}
+		returnAST = rewrite_AST;
+	}
+	
+	public final void element() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST element_AST = null;
+		
+		try {      // for error handling
+			elementNoOptionSpec();
+			astFactory.addASTChild(currentAST, returnAST);
+			element_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_21);
+		}
+		returnAST = element_AST;
+	}
+	
+	public final void exceptionHandler() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST exceptionHandler_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp40_AST = null;
+			tmp40_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp40_AST);
+			match(LITERAL_catch);
+			GrammarAST tmp41_AST = null;
+			tmp41_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp41_AST);
+			match(ARG_ACTION);
+			GrammarAST tmp42_AST = null;
+			tmp42_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp42_AST);
+			match(ACTION);
+			exceptionHandler_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_22);
+		}
+		returnAST = exceptionHandler_AST;
+	}
+	
+	public final void finallyClause() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST finallyClause_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp43_AST = null;
+			tmp43_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp43_AST);
+			match(LITERAL_finally);
+			GrammarAST tmp44_AST = null;
+			tmp44_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp44_AST);
+			match(ACTION);
+			finallyClause_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_11);
+		}
+		returnAST = finallyClause_AST;
+	}
+	
+	public final void elementNoOptionSpec() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST elementNoOptionSpec_AST = null;
+		Token  p = null;
+		GrammarAST p_AST = null;
+		GrammarAST t3_AST = null;
+		
+		IntSet elements=null;
+		GrammarAST sub, sub2;
+		
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case LPAREN:
+			{
+				ebnf();
+				astFactory.addASTChild(currentAST, returnAST);
+				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp45_AST = null;
+				tmp45_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp45_AST);
+				match(ACTION);
+				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case SEMPRED:
+			{
+				p = LT(1);
+				p_AST = (GrammarAST)astFactory.create(p);
+				astFactory.addASTChild(currentAST, p_AST);
+				match(SEMPRED);
+				{
+				switch ( LA(1)) {
+				case IMPLIES:
+				{
+					match(IMPLIES);
+					p_AST.setType(GATED_SEMPRED);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				
+						p_AST.setEnclosingRule(currentRuleName);
+						grammar.blocksWithSemPreds.add(currentBlockAST);
+						
+				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				tree();
+				t3_AST = (GrammarAST)returnAST;
+				astFactory.addASTChild(currentAST, returnAST);
+				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==ASSIGN||LA(2)==PLUS_ASSIGN)) {
+					id();
+					astFactory.addASTChild(currentAST, returnAST);
+					{
+					switch ( LA(1)) {
+					case ASSIGN:
+					{
+						GrammarAST tmp47_AST = null;
+						tmp47_AST = (GrammarAST)astFactory.create(LT(1));
+						astFactory.makeASTRoot(currentAST, tmp47_AST);
+						match(ASSIGN);
+						break;
+					}
+					case PLUS_ASSIGN:
+					{
+						GrammarAST tmp48_AST = null;
+						tmp48_AST = (GrammarAST)astFactory.create(LT(1));
+						astFactory.makeASTRoot(currentAST, tmp48_AST);
+						match(PLUS_ASSIGN);
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(LT(1), getFilename());
+					}
+					}
+					}
+					{
+					switch ( LA(1)) {
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case TOKEN_REF:
+					case RULE_REF:
+					case NOT:
+					case WILDCARD:
+					{
+						atom();
+						astFactory.addASTChild(currentAST, returnAST);
+						break;
+					}
+					case LPAREN:
+					{
+						block();
+						astFactory.addASTChild(currentAST, returnAST);
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(LT(1), getFilename());
+					}
+					}
+					}
+					{
+					switch ( LA(1)) {
+					case STAR:
+					case QUESTION:
+					case PLUS:
+					{
+						sub=ebnfSuffix((GrammarAST)currentAST.root,false);
+						elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+						elementNoOptionSpec_AST=sub;
+						currentAST.root = elementNoOptionSpec_AST;
+						currentAST.child = elementNoOptionSpec_AST!=null &&elementNoOptionSpec_AST.getFirstChild()!=null ?
+							elementNoOptionSpec_AST.getFirstChild() : elementNoOptionSpec_AST;
+						currentAST.advanceChildToEnd();
+						break;
+					}
+					case ACTION:
+					case SEMI:
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case TOKEN_REF:
+					case LPAREN:
+					case OR:
+					case RPAREN:
+					case SEMPRED:
+					case RULE_REF:
+					case NOT:
+					case TREE_BEGIN:
+					case WILDCARD:
+					case REWRITE:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(LT(1), getFilename());
+					}
+					}
+					}
+					elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+				}
+				else if ((_tokenSet_23.member(LA(1))) && (_tokenSet_24.member(LA(2)))) {
+					atom();
+					astFactory.addASTChild(currentAST, returnAST);
+					{
+					switch ( LA(1)) {
+					case STAR:
+					case QUESTION:
+					case PLUS:
+					{
+						sub2=ebnfSuffix((GrammarAST)currentAST.root,false);
+						elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+						elementNoOptionSpec_AST=sub2;
+						currentAST.root = elementNoOptionSpec_AST;
+						currentAST.child = elementNoOptionSpec_AST!=null &&elementNoOptionSpec_AST.getFirstChild()!=null ?
+							elementNoOptionSpec_AST.getFirstChild() : elementNoOptionSpec_AST;
+						currentAST.advanceChildToEnd();
+						break;
+					}
+					case ACTION:
+					case SEMI:
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case TOKEN_REF:
+					case LPAREN:
+					case OR:
+					case RPAREN:
+					case SEMPRED:
+					case RULE_REF:
+					case NOT:
+					case TREE_BEGIN:
+					case WILDCARD:
+					case REWRITE:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(LT(1), getFilename());
+					}
+					}
+					}
+					elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
+				}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_21);
+		}
+		returnAST = elementNoOptionSpec_AST;
+	}
+	
+	public final void atom() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST atom_AST = null;
+		Token  rr = null;
+		GrammarAST rr_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case NOT:
+			{
+				notSet();
+				astFactory.addASTChild(currentAST, returnAST);
+				{
+				switch ( LA(1)) {
+				case ROOT:
+				{
+					GrammarAST tmp49_AST = null;
+					tmp49_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp49_AST);
+					match(ROOT);
+					break;
+				}
+				case BANG:
+				{
+					GrammarAST tmp50_AST = null;
+					tmp50_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp50_AST);
+					match(BANG);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case RULE_REF:
+			{
+				rr = LT(1);
+				rr_AST = (GrammarAST)astFactory.create(rr);
+				astFactory.makeASTRoot(currentAST, rr_AST);
+				match(RULE_REF);
+				{
+				switch ( LA(1)) {
+				case ARG_ACTION:
+				{
+					GrammarAST tmp51_AST = null;
+					tmp51_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.addASTChild(currentAST, tmp51_AST);
+					match(ARG_ACTION);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case BANG:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case ROOT:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				{
+				switch ( LA(1)) {
+				case ROOT:
+				{
+					GrammarAST tmp52_AST = null;
+					tmp52_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp52_AST);
+					match(ROOT);
+					break;
+				}
+				case BANG:
+				{
+					GrammarAST tmp53_AST = null;
+					tmp53_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp53_AST);
+					match(BANG);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+				if ((LA(1)==CHAR_LITERAL) && (LA(2)==RANGE)) {
+					range();
+					astFactory.addASTChild(currentAST, returnAST);
+					{
+					switch ( LA(1)) {
+					case ROOT:
+					{
+						GrammarAST tmp54_AST = null;
+						tmp54_AST = (GrammarAST)astFactory.create(LT(1));
+						astFactory.makeASTRoot(currentAST, tmp54_AST);
+						match(ROOT);
+						break;
+					}
+					case BANG:
+					{
+						GrammarAST tmp55_AST = null;
+						tmp55_AST = (GrammarAST)astFactory.create(LT(1));
+						astFactory.makeASTRoot(currentAST, tmp55_AST);
+						match(BANG);
+						break;
+					}
+					case ACTION:
+					case SEMI:
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case STAR:
+					case TOKEN_REF:
+					case LPAREN:
+					case OR:
+					case RPAREN:
+					case SEMPRED:
+					case RULE_REF:
+					case NOT:
+					case TREE_BEGIN:
+					case QUESTION:
+					case PLUS:
+					case WILDCARD:
+					case REWRITE:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(LT(1), getFilename());
+					}
+					}
+					}
+					atom_AST = (GrammarAST)currentAST.root;
+				}
+				else if ((_tokenSet_25.member(LA(1))) && (_tokenSet_26.member(LA(2)))) {
+					terminal();
+					astFactory.addASTChild(currentAST, returnAST);
+					atom_AST = (GrammarAST)currentAST.root;
+				}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_27);
+		}
+		returnAST = atom_AST;
+	}
+	
+	public final GrammarAST  ebnfSuffix(
+		GrammarAST elemAST, boolean inRewrite
+	) throws RecognitionException, TokenStreamException {
+		GrammarAST subrule=null;
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST ebnfSuffix_AST = null;
+		
+		GrammarAST ebnfRoot=null;
+		
+		
+		try {      // for error handling
+			{
+			switch ( LA(1)) {
+			case QUESTION:
+			{
+				GrammarAST tmp56_AST = null;
+				tmp56_AST = (GrammarAST)astFactory.create(LT(1));
+				match(QUESTION);
+				ebnfRoot = (GrammarAST)astFactory.create(OPTIONAL,"?");
+				break;
+			}
+			case STAR:
+			{
+				GrammarAST tmp57_AST = null;
+				tmp57_AST = (GrammarAST)astFactory.create(LT(1));
+				match(STAR);
+				ebnfRoot = (GrammarAST)astFactory.create(CLOSURE,"*");
+				break;
+			}
+			case PLUS:
+			{
+				GrammarAST tmp58_AST = null;
+				tmp58_AST = (GrammarAST)astFactory.create(LT(1));
+				match(PLUS);
+				ebnfRoot = (GrammarAST)astFactory.create(POSITIVE_CLOSURE,"+");
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			
+					GrammarAST save = currentBlockAST;
+				ebnfRoot.setLine(elemAST.getLine());
+				ebnfRoot.setColumn(elemAST.getColumn());
+				GrammarAST blkRoot = (GrammarAST)astFactory.create(BLOCK,"BLOCK");
+				currentBlockAST = blkRoot;
+				GrammarAST eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
+					eob.setLine(elemAST.getLine());
+					eob.setColumn(elemAST.getColumn());
+					GrammarAST alt = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT,"ALT")).add(elemAST).add((GrammarAST)astFactory.create(EOA,"<end-of-alt>")));
+				if ( !inRewrite ) {
+					prefixWithSynPred(alt);
+				}
+					subrule =
+					     (GrammarAST)astFactory.make( (new ASTArray(2)).add(ebnfRoot).add((GrammarAST)astFactory.make( (new ASTArray(3)).add(blkRoot).add(alt).add(eob))));
+					currentBlockAST = save;
+					
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_28);
+		}
+		returnAST = ebnfSuffix_AST;
+		return subrule;
+	}
+	
+/** matches ENBF blocks (and sets via block rule) */
+	public final void ebnf() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST ebnf_AST = null;
+		GrammarAST b_AST = null;
+		
+		int line = LT(1).getLine();
+		int col = LT(1).getColumn();
+		
+		
+		try {      // for error handling
+			block();
+			b_AST = (GrammarAST)returnAST;
+			{
+			switch ( LA(1)) {
+			case QUESTION:
+			{
+				GrammarAST tmp59_AST = null;
+				tmp59_AST = (GrammarAST)astFactory.create(LT(1));
+				match(QUESTION);
+				ebnf_AST = (GrammarAST)currentAST.root;
+				ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(OPTIONAL,"?")).add(b_AST));
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case STAR:
+			{
+				GrammarAST tmp60_AST = null;
+				tmp60_AST = (GrammarAST)astFactory.create(LT(1));
+				match(STAR);
+				ebnf_AST = (GrammarAST)currentAST.root;
+				ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(CLOSURE,"*")).add(b_AST));
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case PLUS:
+			{
+				GrammarAST tmp61_AST = null;
+				tmp61_AST = (GrammarAST)astFactory.create(LT(1));
+				match(PLUS);
+				ebnf_AST = (GrammarAST)currentAST.root;
+				ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(POSITIVE_CLOSURE,"+")).add(b_AST));
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case IMPLIES:
+			{
+				match(IMPLIES);
+				ebnf_AST = (GrammarAST)currentAST.root;
+				
+							if ( gtype==COMBINED_GRAMMAR &&
+							     Character.isUpperCase(currentRuleName.charAt(0)) )
+						    {
+				// ignore for lexer rules in combined
+						    	ebnf_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(SYNPRED,"=>")).add(b_AST)); 
+						    }
+						    else {
+						    	// create manually specified (...)=> predicate;
+				// convert to sempred
+						    	ebnf_AST = createSynSemPredFromBlock(b_AST, SYN_SEMPRED);
+							}
+							
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case ROOT:
+			{
+				GrammarAST tmp63_AST = null;
+				tmp63_AST = (GrammarAST)astFactory.create(LT(1));
+				match(ROOT);
+				ebnf_AST = (GrammarAST)currentAST.root;
+				ebnf_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(tmp63_AST).add(b_AST));
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case BANG:
+			{
+				GrammarAST tmp64_AST = null;
+				tmp64_AST = (GrammarAST)astFactory.create(LT(1));
+				match(BANG);
+				ebnf_AST = (GrammarAST)currentAST.root;
+				ebnf_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(tmp64_AST).add(b_AST));
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case ACTION:
+			case SEMI:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case LPAREN:
+			case OR:
+			case RPAREN:
+			case SEMPRED:
+			case RULE_REF:
+			case NOT:
+			case TREE_BEGIN:
+			case WILDCARD:
+			case REWRITE:
+			{
+				ebnf_AST = (GrammarAST)currentAST.root;
+				ebnf_AST = b_AST;
+				currentAST.root = ebnf_AST;
+				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
+					ebnf_AST.getFirstChild() : ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			ebnf_AST = (GrammarAST)currentAST.root;
+			ebnf_AST.setLine(line); ebnf_AST.setColumn(col);
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_21);
+		}
+		returnAST = ebnf_AST;
+	}
+	
+	public final void tree() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST tree_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp65_AST = null;
+			tmp65_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp65_AST);
+			match(TREE_BEGIN);
+			element();
+			astFactory.addASTChild(currentAST, returnAST);
+			{
+			int _cnt88=0;
+			_loop88:
+			do {
+				if ((_tokenSet_18.member(LA(1)))) {
+					element();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					if ( _cnt88>=1 ) { break _loop88; } else {throw new NoViableAltException(LT(1), getFilename());}
+				}
+				
+				_cnt88++;
+			} while (true);
+			}
+			match(RPAREN);
+			tree_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_21);
+		}
+		returnAST = tree_AST;
+	}
+	
+	public final void range() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST range_AST = null;
+		Token  c1 = null;
+		GrammarAST c1_AST = null;
+		Token  c2 = null;
+		GrammarAST c2_AST = null;
+		
+		GrammarAST subrule=null, root=null;
+		
+		
+		try {      // for error handling
+			c1 = LT(1);
+			c1_AST = (GrammarAST)astFactory.create(c1);
+			match(CHAR_LITERAL);
+			GrammarAST tmp67_AST = null;
+			tmp67_AST = (GrammarAST)astFactory.create(LT(1));
+			match(RANGE);
+			c2 = LT(1);
+			c2_AST = (GrammarAST)astFactory.create(c2);
+			match(CHAR_LITERAL);
+			range_AST = (GrammarAST)currentAST.root;
+			
+					GrammarAST r = (GrammarAST)astFactory.create(CHAR_RANGE,"..");
+					r.setLine(c1.getLine());
+					r.setColumn(c1.getColumn());
+					range_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(r).add(c1_AST).add(c2_AST));
+					root = range_AST;
+					
+			currentAST.root = range_AST;
+			currentAST.child = range_AST!=null &&range_AST.getFirstChild()!=null ?
+				range_AST.getFirstChild() : range_AST;
+			currentAST.advanceChildToEnd();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_29);
+		}
+		returnAST = range_AST;
+	}
+	
+	public final void terminal() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST terminal_AST = null;
+		Token  cl = null;
+		GrammarAST cl_AST = null;
+		Token  tr = null;
+		GrammarAST tr_AST = null;
+		Token  sl = null;
+		GrammarAST sl_AST = null;
+		Token  wi = null;
+		GrammarAST wi_AST = null;
+		
+		GrammarAST ebnfRoot=null, subrule=null;
+		
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case CHAR_LITERAL:
+			{
+				cl = LT(1);
+				cl_AST = (GrammarAST)astFactory.create(cl);
+				astFactory.makeASTRoot(currentAST, cl_AST);
+				match(CHAR_LITERAL);
+				{
+				switch ( LA(1)) {
+				case ROOT:
+				{
+					GrammarAST tmp68_AST = null;
+					tmp68_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp68_AST);
+					match(ROOT);
+					break;
+				}
+				case BANG:
+				{
+					GrammarAST tmp69_AST = null;
+					tmp69_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp69_AST);
+					match(BANG);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				terminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case TOKEN_REF:
+			{
+				tr = LT(1);
+				tr_AST = (GrammarAST)astFactory.create(tr);
+				astFactory.makeASTRoot(currentAST, tr_AST);
+				match(TOKEN_REF);
+				{
+				switch ( LA(1)) {
+				case ARG_ACTION:
+				{
+					GrammarAST tmp70_AST = null;
+					tmp70_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.addASTChild(currentAST, tmp70_AST);
+					match(ARG_ACTION);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case BANG:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case ROOT:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				{
+				switch ( LA(1)) {
+				case ROOT:
+				{
+					GrammarAST tmp71_AST = null;
+					tmp71_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp71_AST);
+					match(ROOT);
+					break;
+				}
+				case BANG:
+				{
+					GrammarAST tmp72_AST = null;
+					tmp72_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp72_AST);
+					match(BANG);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				terminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				sl = LT(1);
+				sl_AST = (GrammarAST)astFactory.create(sl);
+				astFactory.addASTChild(currentAST, sl_AST);
+				match(STRING_LITERAL);
+				{
+				switch ( LA(1)) {
+				case ROOT:
+				{
+					GrammarAST tmp73_AST = null;
+					tmp73_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp73_AST);
+					match(ROOT);
+					break;
+				}
+				case BANG:
+				{
+					GrammarAST tmp74_AST = null;
+					tmp74_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp74_AST);
+					match(BANG);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				terminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case WILDCARD:
+			{
+				wi = LT(1);
+				wi_AST = (GrammarAST)astFactory.create(wi);
+				astFactory.addASTChild(currentAST, wi_AST);
+				match(WILDCARD);
+				{
+				switch ( LA(1)) {
+				case ROOT:
+				{
+					GrammarAST tmp75_AST = null;
+					tmp75_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp75_AST);
+					match(ROOT);
+					break;
+				}
+				case BANG:
+				{
+					GrammarAST tmp76_AST = null;
+					tmp76_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.makeASTRoot(currentAST, tmp76_AST);
+					match(BANG);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case SEMPRED:
+				case RULE_REF:
+				case NOT:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case WILDCARD:
+				case REWRITE:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				terminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_27);
+		}
+		returnAST = terminal_AST;
+	}
+	
+	public final void notSet() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST notSet_AST = null;
+		Token  n = null;
+		GrammarAST n_AST = null;
+		
+		int line = LT(1).getLine();
+		int col = LT(1).getColumn();
+		GrammarAST subrule=null;
+		
+		
+		try {      // for error handling
+			n = LT(1);
+			n_AST = (GrammarAST)astFactory.create(n);
+			astFactory.makeASTRoot(currentAST, n_AST);
+			match(NOT);
+			{
+			switch ( LA(1)) {
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			{
+				notTerminal();
+				astFactory.addASTChild(currentAST, returnAST);
+				break;
+			}
+			case LPAREN:
+			{
+				block();
+				astFactory.addASTChild(currentAST, returnAST);
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			notSet_AST = (GrammarAST)currentAST.root;
+			notSet_AST.setLine(line); notSet_AST.setColumn(col);
+			notSet_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_29);
+		}
+		returnAST = notSet_AST;
+	}
+	
+	public final void notTerminal() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST notTerminal_AST = null;
+		Token  cl = null;
+		GrammarAST cl_AST = null;
+		Token  tr = null;
+		GrammarAST tr_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case CHAR_LITERAL:
+			{
+				cl = LT(1);
+				cl_AST = (GrammarAST)astFactory.create(cl);
+				astFactory.addASTChild(currentAST, cl_AST);
+				match(CHAR_LITERAL);
+				notTerminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case TOKEN_REF:
+			{
+				tr = LT(1);
+				tr_AST = (GrammarAST)astFactory.create(tr);
+				astFactory.addASTChild(currentAST, tr_AST);
+				match(TOKEN_REF);
+				notTerminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				GrammarAST tmp77_AST = null;
+				tmp77_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp77_AST);
+				match(STRING_LITERAL);
+				notTerminal_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_29);
+		}
+		returnAST = notTerminal_AST;
+	}
+	
+/** Match anything that looks like an ID and return tree as token type ID */
+	public final void idToken() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST idToken_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case TOKEN_REF:
+			{
+				GrammarAST tmp78_AST = null;
+				tmp78_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp78_AST);
+				match(TOKEN_REF);
+				idToken_AST = (GrammarAST)currentAST.root;
+				idToken_AST.setType(ID);
+				idToken_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case RULE_REF:
+			{
+				GrammarAST tmp79_AST = null;
+				tmp79_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp79_AST);
+				match(RULE_REF);
+				idToken_AST = (GrammarAST)currentAST.root;
+				idToken_AST.setType(ID);
+				idToken_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_0);
+		}
+		returnAST = idToken_AST;
+	}
+	
+	public final void rewrite_alternative() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_alternative_AST = null;
+		
+		GrammarAST eoa = (GrammarAST)astFactory.create(EOA,"<end-of-alt>");
+		GrammarAST altRoot = (GrammarAST)astFactory.create(ALT,"ALT");
+		altRoot.setLine(LT(1).getLine());
+		altRoot.setColumn(LT(1).getColumn());
+		
+		
+		try {      // for error handling
+			if (((_tokenSet_30.member(LA(1))) && (_tokenSet_31.member(LA(2))))&&(grammar.buildTemplate())) {
+				rewrite_template();
+				astFactory.addASTChild(currentAST, returnAST);
+				rewrite_alternative_AST = (GrammarAST)currentAST.root;
+			}
+			else if (((_tokenSet_32.member(LA(1))) && (_tokenSet_33.member(LA(2))))&&(grammar.buildAST())) {
+				{
+				int _cnt112=0;
+				_loop112:
+				do {
+					if ((_tokenSet_32.member(LA(1)))) {
+						rewrite_element();
+						astFactory.addASTChild(currentAST, returnAST);
+					}
+					else {
+						if ( _cnt112>=1 ) { break _loop112; } else {throw new NoViableAltException(LT(1), getFilename());}
+					}
+					
+					_cnt112++;
+				} while (true);
+				}
+				rewrite_alternative_AST = (GrammarAST)currentAST.root;
+				
+				if ( rewrite_alternative_AST==null ) {
+				rewrite_alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add((GrammarAST)astFactory.create(EPSILON,"epsilon")).add(eoa));
+				}
+				else {
+				rewrite_alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add(rewrite_alternative_AST).add(eoa));
+				}
+				
+				currentAST.root = rewrite_alternative_AST;
+				currentAST.child = rewrite_alternative_AST!=null &&rewrite_alternative_AST.getFirstChild()!=null ?
+					rewrite_alternative_AST.getFirstChild() : rewrite_alternative_AST;
+				currentAST.advanceChildToEnd();
+				rewrite_alternative_AST = (GrammarAST)currentAST.root;
+			}
+			else if ((_tokenSet_19.member(LA(1)))) {
+				rewrite_alternative_AST = (GrammarAST)currentAST.root;
+				rewrite_alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add((GrammarAST)astFactory.create(EPSILON,"epsilon")).add(eoa));
+				currentAST.root = rewrite_alternative_AST;
+				currentAST.child = rewrite_alternative_AST!=null &&rewrite_alternative_AST.getFirstChild()!=null ?
+					rewrite_alternative_AST.getFirstChild() : rewrite_alternative_AST;
+				currentAST.advanceChildToEnd();
+				rewrite_alternative_AST = (GrammarAST)currentAST.root;
+			}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_19);
+		}
+		returnAST = rewrite_alternative_AST;
+	}
+	
+	public final void rewrite_block() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_block_AST = null;
+		Token  lp = null;
+		GrammarAST lp_AST = null;
+		
+		try {      // for error handling
+			lp = LT(1);
+			lp_AST = (GrammarAST)astFactory.create(lp);
+			astFactory.makeASTRoot(currentAST, lp_AST);
+			match(LPAREN);
+			lp_AST.setType(BLOCK); lp_AST.setText("BLOCK");
+			rewrite_alternative();
+			astFactory.addASTChild(currentAST, returnAST);
+			match(RPAREN);
+			rewrite_block_AST = (GrammarAST)currentAST.root;
+			
+			GrammarAST eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
+			eob.setLine(lp.getLine());
+			eob.setColumn(lp.getColumn());
+			rewrite_block_AST.addChild(eob);
+			
+			rewrite_block_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_34);
+		}
+		returnAST = rewrite_block_AST;
+	}
+	
+/** Build a tree for a template rewrite:
+      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+    where ARGLIST is always there even if no args exist.
+    ID can be "template" keyword.  If first child is ACTION then it's
+    an indirect template ref
+
+    -> foo(a={...}, b={...})
+    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
+    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+	public final void rewrite_template() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_template_AST = null;
+		Token st=null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case LPAREN:
+			{
+				rewrite_indirect_template_head();
+				astFactory.addASTChild(currentAST, returnAST);
+				rewrite_template_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp81_AST = null;
+				tmp81_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp81_AST);
+				match(ACTION);
+				rewrite_template_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+				if (((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==LPAREN))&&(LT(1).getText().equals("template"))) {
+					rewrite_template_head();
+					astFactory.addASTChild(currentAST, returnAST);
+					st=LT(1);
+					{
+					switch ( LA(1)) {
+					case DOUBLE_QUOTE_STRING_LITERAL:
+					{
+						match(DOUBLE_QUOTE_STRING_LITERAL);
+						break;
+					}
+					case DOUBLE_ANGLE_STRING_LITERAL:
+					{
+						match(DOUBLE_ANGLE_STRING_LITERAL);
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(LT(1), getFilename());
+					}
+					}
+					}
+					rewrite_template_AST = (GrammarAST)currentAST.root;
+					rewrite_template_AST.addChild((GrammarAST)astFactory.create(st));
+					rewrite_template_AST = (GrammarAST)currentAST.root;
+				}
+				else if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==LPAREN)) {
+					rewrite_template_head();
+					astFactory.addASTChild(currentAST, returnAST);
+					rewrite_template_AST = (GrammarAST)currentAST.root;
+				}
+			else {
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_19);
+		}
+		returnAST = rewrite_template_AST;
+	}
+	
+	public final void rewrite_element() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_element_AST = null;
+		GrammarAST t_AST = null;
+		GrammarAST tr_AST = null;
+		
+		GrammarAST subrule=null;
+		
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case ACTION:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case DOLLAR:
+			{
+				rewrite_atom();
+				t_AST = (GrammarAST)returnAST;
+				astFactory.addASTChild(currentAST, returnAST);
+				{
+				switch ( LA(1)) {
+				case STAR:
+				case QUESTION:
+				case PLUS:
+				{
+					subrule=ebnfSuffix(t_AST,true);
+					astFactory.addASTChild(currentAST, returnAST);
+					rewrite_element_AST = (GrammarAST)currentAST.root;
+					rewrite_element_AST=subrule;
+					currentAST.root = rewrite_element_AST;
+					currentAST.child = rewrite_element_AST!=null &&rewrite_element_AST.getFirstChild()!=null ?
+						rewrite_element_AST.getFirstChild() : rewrite_element_AST;
+					currentAST.advanceChildToEnd();
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case RULE_REF:
+				case TREE_BEGIN:
+				case REWRITE:
+				case DOLLAR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				rewrite_element_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case LPAREN:
+			{
+				rewrite_ebnf();
+				astFactory.addASTChild(currentAST, returnAST);
+				rewrite_element_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				rewrite_tree();
+				tr_AST = (GrammarAST)returnAST;
+				astFactory.addASTChild(currentAST, returnAST);
+				{
+				switch ( LA(1)) {
+				case STAR:
+				case QUESTION:
+				case PLUS:
+				{
+					subrule=ebnfSuffix(tr_AST,true);
+					astFactory.addASTChild(currentAST, returnAST);
+					rewrite_element_AST = (GrammarAST)currentAST.root;
+					rewrite_element_AST=subrule;
+					currentAST.root = rewrite_element_AST;
+					currentAST.child = rewrite_element_AST!=null &&rewrite_element_AST.getFirstChild()!=null ?
+						rewrite_element_AST.getFirstChild() : rewrite_element_AST;
+					currentAST.advanceChildToEnd();
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case RULE_REF:
+				case TREE_BEGIN:
+				case REWRITE:
+				case DOLLAR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				rewrite_element_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_35);
+		}
+		returnAST = rewrite_element_AST;
+	}
+	
+	public final void rewrite_atom() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_atom_AST = null;
+		Token  cl = null;
+		GrammarAST cl_AST = null;
+		Token  tr = null;
+		GrammarAST tr_AST = null;
+		Token  rr = null;
+		GrammarAST rr_AST = null;
+		Token  sl = null;
+		GrammarAST sl_AST = null;
+		Token  d = null;
+		GrammarAST d_AST = null;
+		GrammarAST i_AST = null;
+		
+		GrammarAST subrule=null;
+		
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case CHAR_LITERAL:
+			{
+				cl = LT(1);
+				cl_AST = (GrammarAST)astFactory.create(cl);
+				astFactory.addASTChild(currentAST, cl_AST);
+				match(CHAR_LITERAL);
+				rewrite_atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case TOKEN_REF:
+			{
+				tr = LT(1);
+				tr_AST = (GrammarAST)astFactory.create(tr);
+				astFactory.makeASTRoot(currentAST, tr_AST);
+				match(TOKEN_REF);
+				{
+				switch ( LA(1)) {
+				case ARG_ACTION:
+				{
+					GrammarAST tmp84_AST = null;
+					tmp84_AST = (GrammarAST)astFactory.create(LT(1));
+					astFactory.addASTChild(currentAST, tmp84_AST);
+					match(ARG_ACTION);
+					break;
+				}
+				case ACTION:
+				case SEMI:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case STAR:
+				case TOKEN_REF:
+				case LPAREN:
+				case OR:
+				case RPAREN:
+				case RULE_REF:
+				case TREE_BEGIN:
+				case QUESTION:
+				case PLUS:
+				case REWRITE:
+				case DOLLAR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(LT(1), getFilename());
+				}
+				}
+				}
+				rewrite_atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case RULE_REF:
+			{
+				rr = LT(1);
+				rr_AST = (GrammarAST)astFactory.create(rr);
+				astFactory.addASTChild(currentAST, rr_AST);
+				match(RULE_REF);
+				rewrite_atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				sl = LT(1);
+				sl_AST = (GrammarAST)astFactory.create(sl);
+				astFactory.addASTChild(currentAST, sl_AST);
+				match(STRING_LITERAL);
+				rewrite_atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case DOLLAR:
+			{
+				d = LT(1);
+				d_AST = (GrammarAST)astFactory.create(d);
+				match(DOLLAR);
+				id();
+				i_AST = (GrammarAST)returnAST;
+				rewrite_atom_AST = (GrammarAST)currentAST.root;
+				
+						rewrite_atom_AST = (GrammarAST)astFactory.create(LABEL,i_AST.getText());
+						rewrite_atom_AST.setLine(d_AST.getLine());
+						rewrite_atom_AST.setColumn(d_AST.getColumn());
+				rewrite_atom_AST.setEnclosingRule(currentRuleName);
+						
+				currentAST.root = rewrite_atom_AST;
+				currentAST.child = rewrite_atom_AST!=null &&rewrite_atom_AST.getFirstChild()!=null ?
+					rewrite_atom_AST.getFirstChild() : rewrite_atom_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp85_AST = null;
+				tmp85_AST = (GrammarAST)astFactory.create(LT(1));
+				astFactory.addASTChild(currentAST, tmp85_AST);
+				match(ACTION);
+				rewrite_atom_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_36);
+		}
+		returnAST = rewrite_atom_AST;
+	}
+	
+	public final void rewrite_ebnf() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_ebnf_AST = null;
+		GrammarAST b_AST = null;
+		
+		int line = LT(1).getLine();
+		int col = LT(1).getColumn();
+		
+		
+		try {      // for error handling
+			rewrite_block();
+			b_AST = (GrammarAST)returnAST;
+			{
+			switch ( LA(1)) {
+			case QUESTION:
+			{
+				GrammarAST tmp86_AST = null;
+				tmp86_AST = (GrammarAST)astFactory.create(LT(1));
+				match(QUESTION);
+				rewrite_ebnf_AST = (GrammarAST)currentAST.root;
+				rewrite_ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(OPTIONAL,"?")).add(b_AST));
+				currentAST.root = rewrite_ebnf_AST;
+				currentAST.child = rewrite_ebnf_AST!=null &&rewrite_ebnf_AST.getFirstChild()!=null ?
+					rewrite_ebnf_AST.getFirstChild() : rewrite_ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case STAR:
+			{
+				GrammarAST tmp87_AST = null;
+				tmp87_AST = (GrammarAST)astFactory.create(LT(1));
+				match(STAR);
+				rewrite_ebnf_AST = (GrammarAST)currentAST.root;
+				rewrite_ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(CLOSURE,"*")).add(b_AST));
+				currentAST.root = rewrite_ebnf_AST;
+				currentAST.child = rewrite_ebnf_AST!=null &&rewrite_ebnf_AST.getFirstChild()!=null ?
+					rewrite_ebnf_AST.getFirstChild() : rewrite_ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			case PLUS:
+			{
+				GrammarAST tmp88_AST = null;
+				tmp88_AST = (GrammarAST)astFactory.create(LT(1));
+				match(PLUS);
+				rewrite_ebnf_AST = (GrammarAST)currentAST.root;
+				rewrite_ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(POSITIVE_CLOSURE,"+")).add(b_AST));
+				currentAST.root = rewrite_ebnf_AST;
+				currentAST.child = rewrite_ebnf_AST!=null &&rewrite_ebnf_AST.getFirstChild()!=null ?
+					rewrite_ebnf_AST.getFirstChild() : rewrite_ebnf_AST;
+				currentAST.advanceChildToEnd();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+			}
+			rewrite_ebnf_AST = (GrammarAST)currentAST.root;
+			rewrite_ebnf_AST.setLine(line); rewrite_ebnf_AST.setColumn(col);
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_35);
+		}
+		returnAST = rewrite_ebnf_AST;
+	}
+	
+	public final void rewrite_tree() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_tree_AST = null;
+		
+		try {      // for error handling
+			GrammarAST tmp89_AST = null;
+			tmp89_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.makeASTRoot(currentAST, tmp89_AST);
+			match(TREE_BEGIN);
+			rewrite_atom();
+			astFactory.addASTChild(currentAST, returnAST);
+			{
+			_loop122:
+			do {
+				if ((_tokenSet_32.member(LA(1)))) {
+					rewrite_element();
+					astFactory.addASTChild(currentAST, returnAST);
+				}
+				else {
+					break _loop122;
+				}
+				
+			} while (true);
+			}
+			match(RPAREN);
+			rewrite_tree_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_36);
+		}
+		returnAST = rewrite_tree_AST;
+	}
+	
+/** -> foo(a={...}, ...) */
+	public final void rewrite_template_head() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_template_head_AST = null;
+		Token  lp = null;
+		GrammarAST lp_AST = null;
+		
+		try {      // for error handling
+			id();
+			astFactory.addASTChild(currentAST, returnAST);
+			lp = LT(1);
+			lp_AST = (GrammarAST)astFactory.create(lp);
+			astFactory.makeASTRoot(currentAST, lp_AST);
+			match(LPAREN);
+			lp_AST.setType(TEMPLATE); lp_AST.setText("TEMPLATE");
+			rewrite_template_args();
+			astFactory.addASTChild(currentAST, returnAST);
+			match(RPAREN);
+			rewrite_template_head_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_37);
+		}
+		returnAST = rewrite_template_head_AST;
+	}
+	
+/** -> ({expr})(a={...}, ...) */
+	public final void rewrite_indirect_template_head() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_indirect_template_head_AST = null;
+		Token  lp = null;
+		GrammarAST lp_AST = null;
+		
+		try {      // for error handling
+			lp = LT(1);
+			lp_AST = (GrammarAST)astFactory.create(lp);
+			astFactory.makeASTRoot(currentAST, lp_AST);
+			match(LPAREN);
+			lp_AST.setType(TEMPLATE); lp_AST.setText("TEMPLATE");
+			GrammarAST tmp92_AST = null;
+			tmp92_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp92_AST);
+			match(ACTION);
+			match(RPAREN);
+			match(LPAREN);
+			rewrite_template_args();
+			astFactory.addASTChild(currentAST, returnAST);
+			match(RPAREN);
+			rewrite_indirect_template_head_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_19);
+		}
+		returnAST = rewrite_indirect_template_head_AST;
+	}
+	
+	public final void rewrite_template_args() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_template_args_AST = null;
+		
+		try {      // for error handling
+			switch ( LA(1)) {
+			case TOKEN_REF:
+			case RULE_REF:
+			{
+				rewrite_template_arg();
+				astFactory.addASTChild(currentAST, returnAST);
+				{
+				_loop129:
+				do {
+					if ((LA(1)==COMMA)) {
+						match(COMMA);
+						rewrite_template_arg();
+						astFactory.addASTChild(currentAST, returnAST);
+					}
+					else {
+						break _loop129;
+					}
+					
+				} while (true);
+				}
+				rewrite_template_args_AST = (GrammarAST)currentAST.root;
+				rewrite_template_args_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(ARGLIST,"ARGLIST")).add(rewrite_template_args_AST));
+				currentAST.root = rewrite_template_args_AST;
+				currentAST.child = rewrite_template_args_AST!=null &&rewrite_template_args_AST.getFirstChild()!=null ?
+					rewrite_template_args_AST.getFirstChild() : rewrite_template_args_AST;
+				currentAST.advanceChildToEnd();
+				rewrite_template_args_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			case RPAREN:
+			{
+				rewrite_template_args_AST = (GrammarAST)currentAST.root;
+				rewrite_template_args_AST = (GrammarAST)astFactory.create(ARGLIST,"ARGLIST");
+				currentAST.root = rewrite_template_args_AST;
+				currentAST.child = rewrite_template_args_AST!=null &&rewrite_template_args_AST.getFirstChild()!=null ?
+					rewrite_template_args_AST.getFirstChild() : rewrite_template_args_AST;
+				currentAST.advanceChildToEnd();
+				rewrite_template_args_AST = (GrammarAST)currentAST.root;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(LT(1), getFilename());
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_38);
+		}
+		returnAST = rewrite_template_args_AST;
+	}
+	
+	public final void rewrite_template_arg() throws RecognitionException, TokenStreamException {
+		
+		returnAST = null;
+		ASTPair currentAST = new ASTPair();
+		GrammarAST rewrite_template_arg_AST = null;
+		Token  a = null;
+		GrammarAST a_AST = null;
+		
+		try {      // for error handling
+			id();
+			astFactory.addASTChild(currentAST, returnAST);
+			a = LT(1);
+			a_AST = (GrammarAST)astFactory.create(a);
+			astFactory.makeASTRoot(currentAST, a_AST);
+			match(ASSIGN);
+			a_AST.setType(ARG); a_AST.setText("ARG");
+			GrammarAST tmp97_AST = null;
+			tmp97_AST = (GrammarAST)astFactory.create(LT(1));
+			astFactory.addASTChild(currentAST, tmp97_AST);
+			match(ACTION);
+			rewrite_template_arg_AST = (GrammarAST)currentAST.root;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			recover(ex,_tokenSet_39);
+		}
+		returnAST = rewrite_template_arg_AST;
+	}
+	
+	
+	public static final String[] _tokenNames = {
+		"<0>",
+		"EOF",
+		"<2>",
+		"NULL_TREE_LOOKAHEAD",
+		"\"options\"",
+		"\"tokens\"",
+		"\"parser\"",
+		"LEXER",
+		"RULE",
+		"BLOCK",
+		"OPTIONAL",
+		"CLOSURE",
+		"POSITIVE_CLOSURE",
+		"SYNPRED",
+		"RANGE",
+		"CHAR_RANGE",
+		"EPSILON",
+		"ALT",
+		"EOR",
+		"EOB",
+		"EOA",
+		"ID",
+		"ARG",
+		"ARGLIST",
+		"RET",
+		"LEXER_GRAMMAR",
+		"PARSER_GRAMMAR",
+		"TREE_GRAMMAR",
+		"COMBINED_GRAMMAR",
+		"INITACTION",
+		"LABEL",
+		"TEMPLATE",
+		"\"scope\"",
+		"GATED_SEMPRED",
+		"SYN_SEMPRED",
+		"BACKTRACK_SEMPRED",
+		"\"fragment\"",
+		"ACTION",
+		"DOC_COMMENT",
+		"SEMI",
+		"\"lexer\"",
+		"\"tree\"",
+		"\"grammar\"",
+		"AMPERSAND",
+		"COLON",
+		"RCURLY",
+		"ASSIGN",
+		"STRING_LITERAL",
+		"CHAR_LITERAL",
+		"INT",
+		"STAR",
+		"TOKEN_REF",
+		"\"protected\"",
+		"\"public\"",
+		"\"private\"",
+		"BANG",
+		"ARG_ACTION",
+		"\"returns\"",
+		"\"throws\"",
+		"COMMA",
+		"LPAREN",
+		"OR",
+		"RPAREN",
+		"\"catch\"",
+		"\"finally\"",
+		"PLUS_ASSIGN",
+		"SEMPRED",
+		"IMPLIES",
+		"ROOT",
+		"RULE_REF",
+		"NOT",
+		"TREE_BEGIN",
+		"QUESTION",
+		"PLUS",
+		"WILDCARD",
+		"REWRITE",
+		"DOLLAR",
+		"DOUBLE_QUOTE_STRING_LITERAL",
+		"DOUBLE_ANGLE_STRING_LITERAL",
+		"WS",
+		"COMMENT",
+		"SL_COMMENT",
+		"ML_COMMENT",
+		"OPEN_ELEMENT_OPTION",
+		"CLOSE_ELEMENT_OPTION",
+		"ESC",
+		"DIGIT",
+		"XDIGIT",
+		"NESTED_ARG_ACTION",
+		"NESTED_ACTION",
+		"ACTION_CHAR_LITERAL",
+		"ACTION_STRING_LITERAL",
+		"ACTION_ESC",
+		"WS_LOOP",
+		"INTERNAL_RULE_REF",
+		"WS_OPT",
+		"SRC"
+	};
+	
+	protected void buildTokenTypeASTClassMap() {
+		tokenTypeToASTClassMap=null;
+	};
+	
+	private static final long[] mk_tokenSet_0() {
+		long[] data = { 2L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+	private static final long[] mk_tokenSet_1() {
+		long[] data = { 2251799813685248L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+	private static final long[] mk_tokenSet_2() {
+		long[] data = { 9191240600534384656L, 7074L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+	private static final long[] mk_tokenSet_3() {
+		long[] data = { 33803733376696352L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+	private static final long[] mk_tokenSet_4() {
+		long[] data = { 33786141190651904L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+	private static final long[] mk_tokenSet_5() {
+		long[] data = { 33786136895684608L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+	private static final long[] mk_tokenSet_6() {
+		long[] data = { 33777340802662400L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+	private static final long[] mk_tokenSet_7() {
+		long[] data = { 2252899325313088L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+	private static final long[] mk_tokenSet_8() {
+		long[] data = { 17592186044416L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+	private static final long[] mk_tokenSet_9() {
+		long[] data = { 549755813888L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
+	private static final long[] mk_tokenSet_10() {
+		long[] data = { 2286984185774080L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
+	private static final long[] mk_tokenSet_11() {
+		long[] data = { 33777340802662402L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11());
+	private static final long[] mk_tokenSet_12() {
+		long[] data = { 26392574033936L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12());
+	private static final long[] mk_tokenSet_13() {
+		long[] data = { 3461439213294059520L, 3300L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13());
+	private static final long[] mk_tokenSet_14() {
+		long[] data = { 26388279066624L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14());
+	private static final long[] mk_tokenSet_15() {
+		long[] data = { 8073124681965633536L, 3300L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15());
+	private static final long[] mk_tokenSet_16() {
+		long[] data = { 8182434279708442640L, 8190L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16());
+	private static final long[] mk_tokenSet_17() {
+		long[] data = { 8110279928647254016L, 4092L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17());
+	private static final long[] mk_tokenSet_18() {
+		long[] data = { 1155595654324551680L, 1252L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18());
+	private static final long[] mk_tokenSet_19() {
+		long[] data = { 6917529577396895744L, 2048L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19());
+	private static final long[] mk_tokenSet_20() {
+		long[] data = { 6917529577396895744L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20());
+	private static final long[] mk_tokenSet_21() {
+		long[] data = { 8073125231721447424L, 3300L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21());
+	private static final long[] mk_tokenSet_22() {
+		long[] data = { -9189594696052113406L, 33L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22());
+	private static final long[] mk_tokenSet_23() {
+		long[] data = { 2674012278751232L, 1120L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23());
+	private static final long[] mk_tokenSet_24() {
+		long[] data = { 8182337522685198336L, 4084L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24());
+	private static final long[] mk_tokenSet_25() {
+		long[] data = { 2674012278751232L, 1024L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25());
+	private static final long[] mk_tokenSet_26() {
+		long[] data = { 8182337522685181952L, 4084L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_26 = new BitSet(mk_tokenSet_26());
+	private static final long[] mk_tokenSet_27() {
+		long[] data = { 8074251131628290048L, 4068L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_27 = new BitSet(mk_tokenSet_27());
+	private static final long[] mk_tokenSet_28() {
+		long[] data = { 8073125231721447424L, 7396L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_28 = new BitSet(mk_tokenSet_28());
+	private static final long[] mk_tokenSet_29() {
+		long[] data = { 8110279928647254016L, 4084L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_29 = new BitSet(mk_tokenSet_29());
+	private static final long[] mk_tokenSet_30() {
+		long[] data = { 1155173441859485696L, 32L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_30 = new BitSet(mk_tokenSet_30());
+	private static final long[] mk_tokenSet_31() {
+		long[] data = { 8070451219442696192L, 2048L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_31 = new BitSet(mk_tokenSet_31());
+	private static final long[] mk_tokenSet_32() {
+		long[] data = { 1155595654324551680L, 4256L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_32 = new BitSet(mk_tokenSet_32());
+	private static final long[] mk_tokenSet_33() {
+		long[] data = { 8146308725666217984L, 7072L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_33 = new BitSet(mk_tokenSet_33());
+	private static final long[] mk_tokenSet_34() {
+		long[] data = { 1125899906842624L, 768L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_34 = new BitSet(mk_tokenSet_34());
+	private static final long[] mk_tokenSet_35() {
+		long[] data = { 8073125231721447424L, 6304L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_35 = new BitSet(mk_tokenSet_35());
+	private static final long[] mk_tokenSet_36() {
+		long[] data = { 8074251131628290048L, 7072L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_36 = new BitSet(mk_tokenSet_36());
+	private static final long[] mk_tokenSet_37() {
+		long[] data = { 6917529577396895744L, 26624L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_37 = new BitSet(mk_tokenSet_37());
+	private static final long[] mk_tokenSet_38() {
+		long[] data = { 4611686018427387904L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_38 = new BitSet(mk_tokenSet_38());
+	private static final long[] mk_tokenSet_39() {
+		long[] data = { 5188146770730811392L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_39 = new BitSet(mk_tokenSet_39());
+	
+	}
diff --git a/src/org/antlr/tool/ANTLRParser.smap b/src/org/antlr/tool/ANTLRParser.smap
new file mode 100644
index 0000000..ce01718
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRParser.smap
@@ -0,0 +1,2758 @@
+SMAP
+ANTLRParser.java
+G
+*S G
+*F
++ 0 antlr.g
+antlr.g
+*L
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+1:32
+1:33
+1:34
+1:35
+99:75
+100:76
+101:77
+102:78
+104:80
+105:81
+106:82
+107:83
+108:84
+109:85
+110:86
+112:88
+113:89
+115:91
+116:92
+117:93
+118:94
+119:95
+121:97
+122:98
+123:99
+124:100
+125:101
+127:103
+128:104
+129:105
+130:106
+131:107
+132:108
+133:109
+134:110
+135:111
+137:113
+138:114
+139:115
+140:116
+141:117
+142:118
+143:119
+144:120
+145:121
+146:122
+147:123
+148:124
+149:125
+150:126
+151:127
+152:128
+153:129
+155:131
+156:132
+157:133
+158:134
+160:136
+161:137
+162:138
+163:139
+164:140
+166:142
+167:143
+168:144
+169:145
+170:146
+171:147
+172:148
+173:149
+174:150
+175:151
+176:152
+177:153
+178:154
+179:155
+180:156
+182:158
+183:159
+184:160
+185:161
+186:162
+187:163
+188:164
+189:165
+190:166
+191:167
+192:168
+193:169
+194:170
+195:171
+196:172
+197:173
+198:174
+199:175
+201:177
+202:178
+203:179
+204:180
+205:181
+206:182
+207:183
+208:184
+209:185
+210:186
+211:187
+212:188
+213:189
+214:190
+215:191
+217:193
+218:194
+219:195
+220:196
+221:197
+222:198
+223:199
+224:200
+225:201
+226:202
+237:233
+237:234
+237:235
+237:237
+237:238
+237:239
+237:255
+237:403
+237:404
+237:405
+237:406
+237:407
+237:408
+237:409
+238:249
+239:250
+240:251
+241:252
+245:257
+245:258
+245:259
+245:260
+245:261
+245:262
+245:273
+245:274
+245:275
+245:276
+245:277
+246:240
+246:241
+246:280
+246:281
+246:282
+246:283
+246:284
+246:285
+246:295
+246:296
+246:297
+246:298
+246:299
+247:242
+247:243
+247:301
+247:302
+247:303
+247:304
+247:305
+247:306
+247:307
+248:309
+248:310
+248:311
+248:312
+248:331
+248:332
+248:333
+248:334
+248:335
+249:313
+249:314
+250:315
+252:244
+252:338
+252:339
+252:340
+252:341
+252:342
+252:357
+252:358
+252:359
+252:360
+252:361
+253:245
+253:363
+253:364
+254:246
+254:366
+254:367
+254:368
+254:369
+254:370
+254:383
+254:384
+254:385
+254:386
+254:387
+255:247
+255:389
+255:390
+256:391
+256:392
+256:393
+257:394
+257:399
+257:401
+257:402
+258:396
+259:397
+263:411
+263:413
+263:414
+263:415
+263:419
+263:457
+263:458
+263:459
+263:460
+263:461
+263:462
+263:463
+264:421
+264:422
+264:423
+264:424
+264:425
+264:445
+264:446
+264:447
+264:448
+264:449
+265:428
+265:429
+265:430
+265:431
+266:434
+266:435
+266:436
+266:437
+267:440
+267:441
+267:442
+269:416
+269:417
+269:451
+269:452
+269:453
+269:454
+269:455
+271:456
+272:612
+272:614
+272:615
+272:616
+272:618
+272:635
+272:636
+272:637
+272:638
+272:639
+272:640
+272:641
+273:620
+273:621
+273:622
+273:623
+273:624
+273:625
+273:626
+273:627
+273:628
+273:629
+273:631
+273:632
+273:633
+273:634
+277:674
+277:675
+277:677
+277:678
+277:679
+277:681
+277:707
+277:708
+277:709
+277:710
+277:711
+277:712
+277:713
+278:682
+278:683
+278:684
+278:685
+278:687
+278:688
+278:689
+278:690
+278:691
+278:692
+278:694
+278:695
+278:696
+278:697
+278:700
+278:701
+278:702
+278:703
+278:704
+278:705
+280:706
+284:715
+284:718
+284:720
+284:721
+284:722
+284:728
+284:729
+284:758
+284:759
+284:760
+284:761
+284:762
+284:763
+284:764
+284:765
+284:766
+284:767
+284:768
+284:769
+285:716
+285:730
+285:731
+285:732
+285:733
+285:734
+285:735
+286:717
+286:723
+286:724
+286:738
+286:739
+286:740
+286:741
+286:742
+286:743
+286:744
+287:725
+287:726
+287:748
+287:749
+287:750
+287:751
+287:752
+287:753
+287:754
+288:745
+289:755
+305:508
+305:509
+305:511
+305:512
+305:513
+305:515
+305:538
+305:539
+305:540
+305:541
+305:542
+305:543
+305:544
+305:545
+306:516
+306:517
+306:518
+306:519
+306:521
+306:522
+306:523
+306:524
+306:525
+306:526
+306:527
+306:528
+306:529
+306:530
+306:531
+306:533
+306:534
+306:535
+306:536
+308:537
+309:771
+309:772
+309:773
+309:775
+309:776
+309:777
+309:783
+309:797
+309:798
+309:799
+309:800
+309:801
+309:802
+309:803
+310:780
+313:778
+313:784
+313:785
+313:786
+313:787
+313:788
+313:789
+313:790
+313:791
+313:792
+315:794
+318:796
+334:805
+334:806
+334:808
+334:809
+334:810
+334:821
+334:822
+334:875
+334:876
+334:877
+334:878
+334:879
+334:880
+334:881
+334:882
+334:883
+334:884
+334:885
+334:886
+334:887
+335:811
+335:823
+335:824
+335:825
+335:826
+335:827
+335:828
+335:829
+336:812
+336:813
+336:833
+336:834
+336:835
+336:836
+336:837
+336:838
+336:839
+337:830
+337:840
+338:814
+338:815
+338:844
+338:845
+338:846
+338:847
+338:848
+338:849
+338:850
+339:841
+339:851
+340:816
+340:817
+340:855
+340:856
+340:857
+340:858
+340:859
+340:860
+340:861
+341:818
+341:819
+341:852
+341:865
+341:866
+341:867
+341:868
+341:869
+341:870
+341:871
+342:862
+343:872
+355:547
+355:549
+355:550
+355:551
+355:553
+355:575
+355:576
+355:577
+355:578
+355:579
+355:580
+355:581
+356:554
+356:555
+356:556
+356:557
+357:559
+357:560
+357:561
+357:562
+357:563
+357:564
+357:565
+357:566
+357:567
+357:568
+357:570
+357:571
+357:572
+358:573
+360:574
+361:889
+361:891
+361:892
+361:893
+361:895
+361:946
+361:947
+361:948
+361:949
+361:950
+361:951
+361:952
+362:896
+362:897
+362:898
+362:899
+362:901
+362:902
+362:903
+362:904
+362:905
+362:906
+362:907
+362:909
+362:910
+362:911
+362:912
+362:913
+362:914
+362:915
+362:918
+362:919
+362:920
+362:921
+362:922
+362:923
+362:926
+362:927
+362:928
+362:929
+362:930
+362:938
+362:939
+362:940
+362:941
+362:942
+362:944
+364:945
+365:583
+365:585
+365:586
+365:587
+365:589
+365:604
+365:605
+365:606
+365:607
+365:608
+365:609
+365:610
+366:590
+366:591
+366:592
+366:593
+366:594
+366:595
+366:596
+366:597
+366:598
+366:599
+366:601
+366:602
+366:603
+369:954
+369:956
+369:957
+369:958
+369:960
+369:972
+369:973
+369:974
+369:975
+369:976
+369:977
+369:978
+370:961
+370:962
+370:963
+370:964
+370:965
+370:966
+370:967
+370:968
+370:969
+370:970
+372:971
+373:643
+373:645
+373:646
+373:647
+373:649
+373:666
+373:667
+373:668
+373:669
+373:670
+373:671
+373:672
+374:651
+374:652
+374:653
+374:657
+374:658
+374:659
+374:660
+374:662
+374:663
+374:664
+374:665
+381:654
+381:655
+381:656
+385:980
+385:982
+385:983
+385:984
+385:1016
+385:1289
+385:1290
+385:1291
+385:1292
+385:1293
+385:1294
+385:1295
+386:1009
+387:1010
+388:1011
+389:1012
+390:1013
+394:985
+394:986
+394:1018
+394:1019
+394:1020
+394:1021
+394:1022
+394:1023
+394:1035
+394:1036
+394:1037
+394:1038
+394:1039
+396:987
+396:988
+396:1042
+396:1043
+396:1044
+396:1045
+396:1046
+396:1047
+396:1048
+396:1080
+396:1081
+396:1082
+396:1083
+396:1084
+397:989
+397:990
+397:1051
+397:1052
+397:1053
+397:1054
+397:1055
+397:1056
+398:991
+398:992
+398:1059
+398:1060
+398:1061
+398:1062
+398:1063
+398:1064
+399:993
+399:994
+399:1067
+399:1068
+399:1069
+399:1070
+399:1071
+399:1072
+401:995
+401:1086
+401:1087
+402:1088
+403:1089
+404:1090
+405:1091
+407:1094
+407:1095
+407:1096
+407:1097
+407:1098
+407:1099
+407:1112
+407:1113
+407:1114
+407:1115
+407:1116
+408:996
+408:997
+408:1119
+408:1120
+408:1121
+408:1122
+408:1123
+408:1124
+408:1136
+408:1137
+408:1138
+408:1139
+408:1140
+409:998
+409:999
+409:1143
+409:1144
+409:1145
+409:1146
+409:1147
+409:1148
+409:1149
+409:1160
+409:1161
+409:1162
+409:1163
+409:1164
+410:1167
+410:1168
+410:1169
+410:1170
+410:1180
+410:1181
+410:1182
+410:1183
+410:1184
+411:1187
+411:1188
+411:1189
+411:1190
+411:1191
+411:1200
+411:1201
+411:1202
+411:1203
+411:1204
+412:1000
+412:1206
+412:1207
+413:1001
+413:1209
+413:1210
+413:1211
+413:1212
+413:1213
+413:1220
+413:1221
+413:1222
+413:1223
+413:1224
+414:1002
+414:1003
+414:1226
+414:1227
+414:1228
+416:1230
+417:1231
+418:1232
+419:1233
+420:1234
+431:1004
+431:1236
+431:1237
+431:1238
+432:1005
+432:1006
+432:1239
+432:1240
+432:1241
+433:1007
+433:1243
+433:1244
+433:1245
+433:1246
+433:1247
+433:1248
+433:1262
+433:1263
+433:1264
+433:1265
+433:1266
+434:1268
+434:1285
+434:1287
+434:1288
+435:1270
+436:1271
+437:1272
+438:1273
+439:1274
+440:1275
+441:1276
+442:1277
+443:1278
+444:1279
+445:1280
+446:1281
+447:1282
+448:1283
+454:1398
+454:1400
+454:1401
+454:1402
+454:1404
+454:1421
+454:1422
+454:1423
+454:1424
+454:1425
+454:1426
+454:1427
+455:1406
+455:1407
+455:1408
+455:1409
+455:1410
+455:1411
+455:1412
+455:1413
+455:1414
+455:1415
+455:1417
+455:1418
+455:1419
+455:1420
+459:1563
+459:1564
+459:1566
+459:1567
+459:1568
+459:1570
+459:1582
+459:1583
+459:1584
+459:1585
+459:1586
+459:1587
+459:1588
+460:1571
+460:1572
+460:1573
+460:1574
+460:1575
+460:1576
+460:1577
+460:1578
+460:1579
+460:1580
+462:1581
+463:1297
+463:1299
+463:1300
+463:1301
+463:1303
+463:1328
+463:1329
+463:1330
+463:1331
+463:1332
+463:1333
+463:1334
+464:1304
+464:1305
+464:1306
+464:1307
+464:1308
+464:1309
+464:1310
+464:1311
+464:1312
+464:1313
+464:1314
+464:1315
+464:1316
+464:1317
+464:1318
+464:1319
+464:1320
+464:1321
+464:1322
+464:1323
+464:1325
+464:1326
+464:1327
+468:1336
+468:1338
+468:1339
+468:1340
+468:1349
+468:1390
+468:1391
+468:1392
+468:1393
+468:1394
+468:1395
+468:1396
+469:1345
+470:1346
+473:1341
+473:1342
+473:1351
+473:1352
+473:1353
+473:1354
+473:1355
+473:1356
+473:1358
+473:1359
+473:1360
+473:1361
+474:1343
+474:1364
+474:1365
+474:1366
+474:1367
+474:1368
+474:1369
+474:1370
+474:1371
+474:1372
+474:1373
+474:1374
+474:1375
+474:1377
+474:1378
+475:1379
+475:1386
+475:1388
+475:1389
+476:1381
+477:1382
+478:1383
+479:1384
+484:1621
+484:1622
+484:1624
+484:1625
+484:1626
+484:1638
+484:1738
+484:1739
+484:1740
+484:1741
+484:1742
+484:1743
+484:1744
+485:1634
+486:1635
+494:1627
+494:1628
+494:1639
+494:1640
+494:1641
+494:1642
+494:1643
+495:1686
+495:1693
+495:1695
+495:1696
+495:1697
+495:1698
+503:1645
+503:1647
+503:1648
+503:1649
+503:1650
+503:1651
+503:1652
+503:1653
+503:1661
+503:1662
+503:1663
+503:1664
+503:1665
+504:1668
+504:1669
+504:1670
+504:1671
+504:1672
+504:1679
+504:1680
+504:1681
+504:1682
+504:1683
+505:1685
+506:1687
+506:1688
+506:1689
+506:1690
+506:1691
+506:1692
+509:1701
+511:1629
+511:1702
+511:1703
+511:1704
+511:1705
+511:1706
+512:1707
+513:1630
+513:1708
+513:1709
+513:1710
+513:1711
+513:1712
+513:1713
+513:1714
+513:1715
+513:1716
+513:1717
+513:1719
+513:1720
+513:1721
+513:1722
+513:1724
+513:1725
+514:1718
+517:1631
+517:1632
+517:1726
+517:1727
+517:1728
+518:1729
+519:1731
+520:1732
+521:1733
+522:1734
+523:1735
+526:1737
+527:1429
+527:1430
+527:1431
+527:1433
+527:1434
+527:1435
+527:1447
+527:1482
+527:1483
+527:1484
+527:1485
+527:1486
+527:1487
+527:1488
+528:1439
+529:1440
+530:1441
+531:1442
+532:1443
+533:1444
+536:1436
+536:1448
+536:1449
+536:1450
+536:1451
+536:1452
+537:1453
+538:1437
+538:1454
+538:1455
+538:1456
+538:1457
+538:1458
+538:1459
+538:1460
+538:1461
+538:1462
+538:1463
+538:1465
+538:1466
+538:1467
+538:1468
+538:1470
+538:1471
+539:1464
+540:1472
+540:1477
+540:1479
+540:1480
+540:1481
+541:1474
+542:1475
+546:1746
+546:1748
+546:1749
+546:1750
+546:1759
+546:1760
+546:1824
+546:1825
+546:1826
+546:1827
+546:1828
+546:1829
+546:1830
+546:1831
+546:1832
+546:1833
+546:1834
+546:1835
+547:1753
+548:1754
+549:1755
+550:1756
+553:1751
+553:1761
+553:1762
+553:1763
+553:1764
+553:1765
+553:1766
+553:1767
+553:1768
+553:1769
+553:1770
+553:1771
+553:1773
+553:1774
+553:1775
+553:1776
+553:1777
+553:1778
+553:1779
+553:1780
+553:1781
+553:1782
+553:1783
+553:1785
+553:1786
+553:1787
+554:1788
+554:1798
+554:1800
+554:1801
+554:1802
+555:1790
+556:1791
+557:1792
+558:1793
+559:1794
+560:1795
+561:1796
+563:1805
+563:1806
+563:1807
+563:1808
+563:1809
+563:1810
+563:1817
+563:1819
+563:1820
+563:1821
+564:1812
+565:1813
+566:1814
+567:1815
+571:1490
+571:1492
+571:1493
+571:1494
+571:1496
+571:1497
+571:1550
+571:1551
+571:1552
+571:1553
+571:1554
+571:1555
+571:1556
+571:1557
+571:1558
+571:1559
+571:1560
+571:1561
+572:1498
+572:1499
+572:1501
+572:1502
+572:1503
+572:1504
+572:1505
+572:1506
+572:1507
+572:1508
+572:1509
+572:1510
+572:1512
+572:1513
+572:1514
+572:1516
+572:1517
+572:1518
+572:1519
+572:1520
+572:1534
+572:1535
+572:1536
+572:1537
+572:1538
+573:1540
+573:1543
+573:1544
+573:1545
+573:1546
+573:1547
+576:1936
+576:1938
+576:1939
+576:1940
+576:1942
+576:1956
+576:1957
+576:1958
+576:1959
+576:1960
+576:1961
+576:1962
+577:1943
+577:1944
+577:1945
+577:1946
+577:1947
+577:1948
+577:1949
+577:1950
+577:1951
+577:1952
+577:1953
+577:1954
+579:1955
+580:1964
+580:1966
+580:1967
+580:1968
+580:1970
+580:1980
+580:1981
+580:1982
+580:1983
+580:1984
+580:1985
+580:1986
+581:1971
+581:1972
+581:1973
+581:1974
+581:1975
+581:1976
+581:1977
+581:1978
+583:1979
+584:1918
+584:1920
+584:1921
+584:1922
+584:1924
+584:1928
+584:1929
+584:1930
+584:1931
+584:1932
+584:1933
+584:1934
+585:1925
+585:1926
+585:1927
+588:1988
+588:1990
+588:1991
+588:1992
+588:2001
+588:2002
+588:2071
+588:2163
+588:2206
+588:2207
+588:2208
+588:2209
+588:2210
+588:2211
+588:2212
+588:2213
+588:2214
+588:2215
+588:2216
+588:2217
+589:1997
+590:1998
+593:2072
+593:2073
+593:2074
+593:2076
+593:2077
+593:2078
+593:2079
+593:2080
+593:2081
+593:2082
+593:2085
+593:2086
+593:2087
+593:2088
+593:2089
+593:2090
+593:2093
+593:2094
+593:2095
+593:2096
+593:2097
+593:2100
+593:2101
+593:2102
+593:2103
+593:2104
+593:2105
+593:2106
+593:2107
+593:2108
+593:2109
+593:2112
+593:2113
+593:2114
+593:2115
+593:2118
+593:2119
+593:2120
+593:2121
+593:2122
+594:2125
+594:2126
+594:2127
+594:2128
+594:2129
+594:2130
+594:2131
+594:2132
+594:2133
+594:2135
+594:2136
+594:2156
+594:2157
+594:2158
+594:2159
+594:2160
+595:2162
+595:2164
+595:2165
+595:2166
+596:2168
+596:2169
+596:2170
+596:2171
+596:2172
+596:2173
+596:2174
+596:2175
+596:2176
+596:2178
+596:2179
+596:2199
+596:2200
+596:2201
+596:2202
+596:2203
+597:2003
+597:2004
+597:2005
+597:2006
+597:2007
+597:2205
+598:2010
+598:2011
+598:2012
+598:2013
+598:2014
+598:2015
+599:1993
+599:1994
+599:2019
+599:2020
+599:2021
+599:2022
+599:2023
+599:2024
+599:2026
+599:2027
+599:2028
+599:2029
+599:2030
+599:2050
+599:2051
+599:2052
+599:2053
+599:2054
+600:2016
+601:2057
+602:2058
+604:1995
+604:2063
+604:2064
+604:2065
+604:2066
+604:2067
+604:2068
+605:2060
+607:2219
+607:2221
+607:2222
+607:2223
+607:2227
+607:2228
+607:2371
+607:2372
+607:2373
+607:2374
+607:2376
+607:2377
+607:2378
+607:2379
+607:2380
+607:2381
+607:2382
+607:2385
+607:2386
+607:2387
+607:2388
+607:2389
+607:2390
+607:2413
+607:2414
+607:2415
+607:2416
+607:2417
+607:2420
+607:2425
+607:2426
+607:2427
+607:2428
+607:2429
+607:2430
+607:2431
+607:2432
+607:2433
+607:2434
+607:2435
+607:2436
+608:2419
+608:2421
+608:2422
+608:2423
+608:2424
+609:2229
+609:2230
+609:2231
+609:2232
+609:2234
+609:2235
+609:2236
+609:2237
+609:2238
+609:2239
+609:2240
+609:2243
+609:2244
+609:2245
+609:2246
+609:2247
+609:2248
+609:2271
+609:2272
+609:2273
+609:2274
+609:2275
+610:2224
+610:2225
+610:2277
+610:2280
+610:2281
+610:2282
+610:2283
+610:2284
+610:2285
+611:2287
+611:2288
+611:2289
+611:2290
+611:2291
+611:2292
+611:2293
+611:2318
+611:2319
+611:2320
+611:2321
+611:2322
+612:2325
+612:2326
+612:2327
+612:2328
+612:2329
+612:2330
+612:2331
+612:2334
+612:2335
+612:2336
+612:2337
+612:2338
+612:2339
+612:2362
+612:2363
+612:2364
+612:2365
+612:2366
+613:2368
+615:3016
+615:3018
+615:3019
+615:3020
+615:3029
+615:3059
+615:3060
+615:3061
+615:3062
+615:3063
+615:3064
+615:3065
+616:3024
+617:3025
+618:3026
+621:3021
+621:3022
+621:3030
+621:3031
+621:3032
+621:3033
+622:3035
+622:3036
+622:3037
+622:3038
+622:3039
+622:3040
+622:3041
+622:3050
+622:3051
+622:3052
+622:3053
+622:3054
+623:3044
+623:3045
+623:3046
+623:3047
+625:3056
+625:3057
+627:3058
+628:2653
+628:2655
+628:2656
+628:2657
+628:2659
+628:2683
+628:2684
+628:2685
+628:2686
+628:2687
+628:2688
+628:2689
+629:2660
+629:2661
+629:2662
+629:2663
+630:2664
+630:2665
+630:2667
+630:2668
+630:2669
+630:2670
+630:2671
+630:2672
+630:2673
+630:2674
+630:2675
+630:2676
+630:2678
+630:2679
+630:2680
+631:2681
+633:2682
+635:2509
+635:2510
+635:2512
+635:2513
+635:2514
+635:2521
+635:2645
+635:2646
+635:2647
+635:2648
+635:2649
+635:2650
+635:2651
+636:2517
+637:2518
+640:2515
+640:2522
+640:2523
+641:2525
+641:2526
+641:2527
+641:2528
+641:2529
+641:2530
+641:2531
+641:2532
+641:2533
+641:2535
+641:2536
+641:2637
+641:2638
+641:2639
+641:2640
+641:2641
+642:2539
+642:2540
+642:2541
+642:2542
+642:2543
+642:2544
+642:2545
+642:2546
+642:2548
+642:2549
+643:2552
+643:2553
+643:2554
+643:2555
+643:2556
+643:2557
+643:2558
+643:2559
+643:2561
+643:2562
+644:2565
+644:2566
+644:2567
+645:2568
+645:2582
+645:2584
+645:2585
+646:2570
+647:2571
+648:2572
+649:2573
+650:2574
+651:2575
+652:2576
+653:2577
+654:2578
+655:2579
+656:2580
+658:2588
+658:2589
+658:2590
+658:2591
+658:2592
+658:2593
+658:2594
+658:2595
+658:2597
+658:2598
+659:2601
+659:2602
+659:2603
+659:2604
+659:2605
+659:2606
+659:2607
+659:2608
+659:2610
+659:2611
+660:2614
+660:2615
+660:2616
+660:2617
+660:2618
+660:2619
+660:2620
+660:2621
+660:2622
+660:2623
+660:2624
+660:2625
+660:2626
+660:2627
+660:2628
+660:2629
+660:2630
+660:2631
+660:2633
+660:2634
+662:2643
+662:2644
+665:2691
+665:2693
+665:2694
+665:2695
+665:2704
+665:2726
+665:2727
+665:2728
+665:2729
+665:2730
+665:2731
+665:2732
+666:2701
+669:2696
+669:2697
+669:2698
+669:2699
+669:2705
+669:2706
+669:2707
+669:2708
+669:2709
+669:2710
+669:2711
+669:2712
+669:2713
+670:2714
+670:2722
+670:2724
+670:2725
+671:2716
+672:2717
+673:2718
+674:2719
+675:2720
+680:2734
+680:2736
+680:2737
+680:2738
+680:2751
+680:2752
+680:3003
+680:3004
+680:3005
+680:3006
+680:3007
+680:3008
+680:3009
+680:3010
+680:3011
+680:3012
+680:3013
+680:3014
+681:2748
+684:2739
+684:2740
+684:2753
+684:2754
+684:2755
+684:2756
+684:2757
+684:2758
+684:2760
+684:2761
+684:2762
+684:2763
+684:2764
+684:2765
+684:2766
+684:2769
+684:2770
+684:2771
+684:2772
+684:2773
+684:2774
+684:2797
+684:2798
+684:2799
+684:2800
+684:2801
+685:2803
+686:2741
+686:2742
+686:2806
+686:2807
+686:2808
+686:2809
+686:2810
+686:2811
+687:2813
+687:2814
+687:2815
+687:2816
+687:2817
+687:2818
+687:2819
+687:2844
+687:2845
+687:2846
+687:2847
+687:2848
+688:2851
+688:2852
+688:2853
+688:2854
+688:2855
+688:2856
+688:2857
+688:2860
+688:2861
+688:2862
+688:2863
+688:2864
+688:2865
+688:2888
+688:2889
+688:2890
+688:2891
+688:2892
+689:2894
+690:2743
+690:2744
+690:2897
+690:2898
+690:2899
+690:2900
+690:2901
+690:2902
+690:2904
+690:2905
+690:2906
+690:2907
+690:2908
+690:2909
+690:2910
+690:2913
+690:2914
+690:2915
+690:2916
+690:2917
+690:2918
+690:2941
+690:2942
+690:2943
+690:2944
+690:2945
+691:2947
+692:2745
+692:2746
+692:2950
+692:2951
+692:2952
+692:2953
+692:2954
+692:2955
+692:2957
+692:2958
+692:2959
+692:2960
+692:2961
+692:2962
+692:2963
+692:2966
+692:2967
+692:2968
+692:2969
+692:2970
+692:2971
+692:2994
+692:2995
+692:2996
+692:2997
+692:2998
+693:3000
+695:2438
+695:2439
+695:2440
+695:2441
+695:2443
+695:2444
+695:2445
+695:2450
+695:2500
+695:2501
+695:2502
+695:2503
+695:2504
+695:2505
+695:2506
+695:2507
+696:2447
+699:2452
+699:2453
+699:2454
+699:2455
+699:2456
+699:2457
+699:2458
+699:2477
+699:2478
+699:2479
+699:2480
+699:2481
+700:2461
+700:2462
+700:2463
+700:2464
+700:2465
+700:2466
+701:2469
+701:2470
+701:2471
+701:2472
+701:2473
+701:2474
+704:2484
+705:2485
+706:2486
+707:2487
+708:2488
+709:2489
+710:2490
+711:2491
+712:2492
+713:2493
+714:2494
+715:2495
+716:2496
+717:2497
+718:2498
+724:3067
+724:3069
+724:3070
+724:3071
+724:3077
+724:3078
+724:3106
+724:3107
+724:3108
+724:3109
+724:3110
+724:3111
+724:3112
+724:3113
+724:3114
+724:3115
+724:3116
+724:3117
+725:3072
+725:3073
+725:3079
+725:3080
+725:3081
+725:3082
+725:3083
+725:3084
+726:3074
+726:3075
+726:3088
+726:3089
+726:3090
+726:3091
+726:3092
+726:3093
+727:3085
+727:3097
+727:3098
+727:3099
+727:3100
+727:3101
+727:3102
+728:3094
+729:3103
+730:1590
+730:1592
+730:1593
+730:1594
+730:1596
+730:1613
+730:1614
+730:1615
+730:1616
+730:1617
+730:1618
+730:1619
+731:1598
+731:1599
+731:1600
+731:1601
+731:1602
+731:1603
+731:1604
+731:1605
+731:1606
+731:1607
+731:1609
+731:1610
+731:1611
+731:1612
+734:465
+734:467
+734:468
+734:469
+734:471
+734:472
+734:473
+734:474
+734:475
+734:476
+734:477
+734:478
+734:479
+734:480
+734:495
+734:496
+734:497
+734:498
+734:499
+734:500
+734:501
+734:502
+734:503
+734:504
+734:505
+734:506
+735:484
+735:485
+735:486
+735:487
+735:488
+735:489
+735:490
+735:491
+736:481
+737:492
+739:3119
+739:3120
+739:3122
+739:3123
+739:3124
+739:3126
+739:3127
+739:3150
+739:3151
+739:3152
+739:3153
+739:3154
+739:3155
+739:3156
+739:3157
+739:3158
+739:3159
+739:3160
+739:3161
+740:3128
+740:3129
+740:3130
+740:3131
+740:3132
+740:3133
+740:3134
+740:3135
+741:3139
+741:3140
+741:3141
+741:3142
+741:3143
+741:3144
+741:3145
+741:3146
+742:3136
+743:3147
+746:1837
+746:1839
+746:1840
+746:1841
+746:1854
+746:1855
+746:1905
+746:1906
+746:1907
+746:1908
+746:1909
+746:1910
+746:1911
+746:1912
+746:1913
+746:1914
+746:1915
+746:1916
+747:1851
+751:1856
+751:1857
+751:1858
+751:1859
+751:1860
+751:1875
+751:1876
+751:1877
+751:1878
+751:1880
+751:1881
+752:1842
+752:1843
+752:1844
+752:1845
+752:1846
+752:1861
+752:1862
+752:1863
+752:1864
+752:1865
+752:1866
+752:1867
+752:1868
+752:1869
+753:1870
+755:1872
+756:1873
+759:1847
+759:1848
+759:1849
+759:1882
+759:1883
+759:1884
+759:1885
+759:1886
+760:1887
+760:1892
+760:1894
+760:1895
+761:1889
+762:1890
+766:1902
+767:3233
+767:3235
+767:3236
+767:3237
+767:3241
+767:3258
+767:3259
+767:3260
+767:3261
+767:3262
+767:3263
+767:3264
+768:3238
+768:3239
+768:3242
+768:3243
+768:3244
+768:3245
+768:3246
+769:3247
+769:3248
+770:3249
+771:3250
+772:3252
+773:3253
+774:3254
+775:3255
+778:3257
+779:3163
+779:3165
+779:3166
+779:3167
+779:3175
+779:3180
+779:3211
+779:3220
+779:3221
+779:3222
+779:3223
+779:3225
+779:3226
+779:3227
+779:3228
+779:3229
+779:3230
+779:3231
+780:3169
+781:3170
+782:3171
+783:3172
+786:3176
+786:3177
+786:3178
+786:3179
+788:3181
+788:3183
+788:3184
+788:3185
+788:3186
+788:3187
+788:3188
+788:3189
+788:3190
+788:3191
+788:3192
+788:3194
+788:3195
+788:3196
+789:3197
+789:3206
+789:3208
+789:3209
+789:3210
+790:3199
+791:3200
+792:3201
+793:3202
+794:3203
+795:3204
+798:3212
+798:3213
+798:3214
+798:3215
+798:3217
+798:3218
+798:3219
+801:3346
+801:3348
+801:3349
+801:3350
+801:3357
+801:3358
+801:3461
+801:3462
+801:3463
+801:3464
+801:3465
+801:3466
+801:3467
+801:3468
+801:3469
+801:3470
+801:3471
+801:3472
+802:3354
+805:3351
+805:3359
+805:3360
+805:3361
+805:3362
+805:3363
+805:3364
+805:3365
+805:3366
+805:3367
+805:3368
+806:3370
+806:3371
+806:3372
+806:3373
+806:3374
+806:3375
+806:3376
+806:3377
+806:3378
+806:3379
+806:3381
+806:3382
+806:3400
+806:3401
+806:3402
+806:3403
+806:3404
+807:3406
+807:3409
+807:3410
+807:3411
+807:3412
+807:3413
+808:3352
+808:3416
+808:3417
+808:3418
+808:3419
+808:3420
+809:3422
+809:3423
+809:3424
+809:3425
+809:3426
+809:3427
+809:3428
+809:3429
+809:3430
+809:3431
+809:3433
+809:3434
+809:3452
+809:3453
+809:3454
+809:3455
+809:3456
+810:3458
+812:3474
+812:3476
+812:3477
+812:3478
+812:3494
+812:3495
+812:3595
+812:3596
+812:3597
+812:3598
+812:3599
+812:3600
+812:3601
+812:3602
+812:3603
+812:3604
+812:3605
+812:3606
+813:3491
+816:3479
+816:3480
+816:3496
+816:3497
+816:3498
+816:3499
+816:3500
+816:3501
+817:3481
+817:3482
+817:3505
+817:3506
+817:3507
+817:3508
+817:3509
+817:3510
+817:3512
+817:3513
+817:3514
+817:3515
+817:3516
+817:3517
+817:3518
+817:3539
+817:3540
+817:3541
+817:3542
+817:3543
+818:3483
+818:3484
+818:3502
+818:3545
+818:3548
+818:3549
+818:3550
+818:3551
+818:3552
+818:3553
+819:3485
+819:3486
+819:3557
+819:3558
+819:3559
+819:3560
+819:3561
+819:3562
+820:3487
+820:3488
+820:3489
+820:3554
+820:3566
+820:3567
+820:3568
+820:3569
+820:3570
+820:3571
+820:3572
+821:3563
+821:3573
+821:3580
+821:3582
+821:3583
+822:3575
+823:3576
+824:3577
+825:3578
+827:3586
+827:3587
+827:3588
+827:3589
+827:3590
+827:3591
+829:3592
+830:3608
+830:3610
+830:3611
+830:3612
+830:3619
+830:3671
+830:3672
+830:3673
+830:3674
+830:3675
+830:3676
+830:3677
+831:3615
+832:3616
+835:3613
+835:3620
+835:3621
+836:3623
+836:3624
+836:3625
+836:3626
+836:3627
+836:3628
+836:3629
+836:3630
+836:3631
+836:3633
+836:3634
+836:3663
+836:3664
+836:3665
+836:3666
+836:3667
+837:3637
+837:3638
+837:3639
+837:3640
+837:3641
+837:3642
+837:3643
+837:3644
+837:3646
+837:3647
+838:3650
+838:3651
+838:3652
+838:3653
+838:3654
+838:3655
+838:3656
+838:3657
+838:3659
+838:3660
+840:3669
+840:3670
+843:3679
+843:3681
+843:3682
+843:3683
+843:3685
+843:3707
+843:3708
+843:3709
+843:3710
+843:3711
+843:3712
+843:3713
+844:3686
+844:3687
+844:3688
+844:3689
+845:3690
+845:3691
+845:3692
+845:3693
+845:3694
+845:3695
+845:3696
+845:3697
+845:3698
+845:3699
+845:3700
+845:3701
+845:3703
+845:3704
+846:3705
+848:3706
+860:3266
+860:3277
+860:3279
+860:3280
+860:3281
+860:3282
+860:3284
+860:3285
+860:3302
+860:3328
+860:3333
+860:3334
+860:3335
+860:3336
+860:3337
+860:3338
+860:3339
+860:3340
+860:3341
+860:3342
+860:3343
+860:3344
+861:3267
+862:3268
+863:3269
+864:3270
+864:3303
+864:3304
+864:3305
+864:3306
+865:3308
+865:3309
+865:3310
+865:3311
+865:3314
+865:3315
+865:3316
+865:3319
+865:3320
+865:3321
+865:3322
+865:3323
+866:3272
+866:3325
+866:3326
+867:3273
+868:3274
+868:3327
+869:3275
+869:3329
+869:3330
+869:3331
+869:3332
+870:3276
+872:3286
+872:3287
+872:3288
+872:3289
+872:3290
+875:3293
+875:3294
+875:3295
+875:3296
+875:3297
+875:3298
+877:3299
+879:3715
+879:3716
+879:3718
+879:3719
+879:3720
+879:3724
+879:3736
+879:3737
+879:3738
+879:3739
+879:3740
+879:3741
+879:3742
+880:3721
+880:3722
+880:3725
+880:3726
+880:3727
+880:3728
+880:3729
+880:3730
+880:3731
+881:3732
+881:3733
+882:3734
+884:3735
+886:3744
+886:3745
+886:3747
+886:3748
+886:3749
+886:3753
+886:3769
+886:3770
+886:3771
+886:3772
+886:3773
+886:3774
+886:3775
+887:3750
+887:3751
+887:3754
+887:3755
+887:3756
+887:3757
+887:3758
+888:3759
+888:3760
+888:3761
+888:3762
+889:3763
+890:3764
+890:3765
+890:3766
+890:3767
+892:3768
+893:3777
+893:3779
+893:3780
+893:3781
+893:3783
+893:3784
+893:3824
+893:3825
+893:3826
+893:3827
+893:3828
+893:3829
+893:3830
+893:3831
+893:3832
+893:3833
+893:3834
+893:3835
+894:3785
+894:3786
+894:3787
+894:3788
+894:3789
+894:3790
+894:3791
+894:3792
+894:3793
+894:3794
+894:3795
+894:3796
+894:3797
+894:3798
+894:3799
+894:3800
+894:3802
+894:3803
+895:3804
+895:3805
+895:3806
+895:3808
+895:3809
+895:3810
+896:3813
+896:3814
+896:3815
+896:3816
+896:3817
+896:3819
+896:3820
+896:3821
+899:3837
+899:3839
+899:3840
+899:3841
+899:3845
+899:3858
+899:3859
+899:3860
+899:3861
+899:3862
+899:3863
+899:3864
+900:3842
+900:3843
+900:3846
+900:3847
+900:3848
+900:3849
+900:3850
+900:3851
+900:3852
+900:3853
+900:3854
+900:3855
+900:3856
+902:3857
+*E
diff --git a/src/org/antlr/tool/ANTLRTokenTypes.java b/src/org/antlr/tool/ANTLRTokenTypes.java
new file mode 100644
index 0000000..692c15a
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRTokenTypes.java
@@ -0,0 +1,133 @@
+// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRLexer.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import java.io.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import antlr.*;
+
+public interface ANTLRTokenTypes {
+	int EOF = 1;
+	int NULL_TREE_LOOKAHEAD = 3;
+	int OPTIONS = 4;
+	int TOKENS = 5;
+	int PARSER = 6;
+	int LEXER = 7;
+	int RULE = 8;
+	int BLOCK = 9;
+	int OPTIONAL = 10;
+	int CLOSURE = 11;
+	int POSITIVE_CLOSURE = 12;
+	int SYNPRED = 13;
+	int RANGE = 14;
+	int CHAR_RANGE = 15;
+	int EPSILON = 16;
+	int ALT = 17;
+	int EOR = 18;
+	int EOB = 19;
+	int EOA = 20;
+	int ID = 21;
+	int ARG = 22;
+	int ARGLIST = 23;
+	int RET = 24;
+	int LEXER_GRAMMAR = 25;
+	int PARSER_GRAMMAR = 26;
+	int TREE_GRAMMAR = 27;
+	int COMBINED_GRAMMAR = 28;
+	int INITACTION = 29;
+	int LABEL = 30;
+	int TEMPLATE = 31;
+	int SCOPE = 32;
+	int GATED_SEMPRED = 33;
+	int SYN_SEMPRED = 34;
+	int BACKTRACK_SEMPRED = 35;
+	int FRAGMENT = 36;
+	int ACTION = 37;
+	int DOC_COMMENT = 38;
+	int SEMI = 39;
+	int LITERAL_lexer = 40;
+	int LITERAL_tree = 41;
+	int LITERAL_grammar = 42;
+	int AMPERSAND = 43;
+	int COLON = 44;
+	int RCURLY = 45;
+	int ASSIGN = 46;
+	int STRING_LITERAL = 47;
+	int CHAR_LITERAL = 48;
+	int INT = 49;
+	int STAR = 50;
+	int TOKEN_REF = 51;
+	int LITERAL_protected = 52;
+	int LITERAL_public = 53;
+	int LITERAL_private = 54;
+	int BANG = 55;
+	int ARG_ACTION = 56;
+	int LITERAL_returns = 57;
+	int LITERAL_throws = 58;
+	int COMMA = 59;
+	int LPAREN = 60;
+	int OR = 61;
+	int RPAREN = 62;
+	int LITERAL_catch = 63;
+	int LITERAL_finally = 64;
+	int PLUS_ASSIGN = 65;
+	int SEMPRED = 66;
+	int IMPLIES = 67;
+	int ROOT = 68;
+	int RULE_REF = 69;
+	int NOT = 70;
+	int TREE_BEGIN = 71;
+	int QUESTION = 72;
+	int PLUS = 73;
+	int WILDCARD = 74;
+	int REWRITE = 75;
+	int DOLLAR = 76;
+	int DOUBLE_QUOTE_STRING_LITERAL = 77;
+	int DOUBLE_ANGLE_STRING_LITERAL = 78;
+	int WS = 79;
+	int COMMENT = 80;
+	int SL_COMMENT = 81;
+	int ML_COMMENT = 82;
+	int OPEN_ELEMENT_OPTION = 83;
+	int CLOSE_ELEMENT_OPTION = 84;
+	int ESC = 85;
+	int DIGIT = 86;
+	int XDIGIT = 87;
+	int NESTED_ARG_ACTION = 88;
+	int NESTED_ACTION = 89;
+	int ACTION_CHAR_LITERAL = 90;
+	int ACTION_STRING_LITERAL = 91;
+	int ACTION_ESC = 92;
+	int WS_LOOP = 93;
+	int INTERNAL_RULE_REF = 94;
+	int WS_OPT = 95;
+	int SRC = 96;
+}
diff --git a/src/org/antlr/tool/ANTLRTokenTypes.txt b/src/org/antlr/tool/ANTLRTokenTypes.txt
new file mode 100644
index 0000000..27eaa78
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRTokenTypes.txt
@@ -0,0 +1,95 @@
+// $ANTLR 2.7.7 (2006-01-29): antlr.g -> ANTLRTokenTypes.txt$
+ANTLR    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
diff --git a/src/org/antlr/tool/ANTLRTreePrinter.java b/src/org/antlr/tool/ANTLRTreePrinter.java
new file mode 100644
index 0000000..4b64428
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRTreePrinter.java
@@ -0,0 +1,2295 @@
+// $ANTLR 2.7.7 (2006-01-29): "antlr.print.g" -> "ANTLRTreePrinter.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+
+import antlr.TreeParser;
+import antlr.Token;
+import antlr.collections.AST;
+import antlr.RecognitionException;
+import antlr.ANTLRException;
+import antlr.NoViableAltException;
+import antlr.MismatchedTokenException;
+import antlr.SemanticException;
+import antlr.collections.impl.BitSet;
+import antlr.ASTPair;
+import antlr.collections.impl.ASTArray;
+
+
+/** Print out a grammar (no pretty printing).
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  August 19, 2003
+ */
+public class ANTLRTreePrinter extends antlr.TreeParser       implements ANTLRTreePrinterTokenTypes
+ {
+
+	protected Grammar grammar;
+	protected boolean showActions;
+    protected StringBuffer buf = new StringBuffer(300);
+
+    public void out(String s) {
+        buf.append(s);
+    }
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "antlr.print: "+ex.toString(),
+            ex);
+    }
+
+	/** Normalize a grammar print out by removing all double spaces
+	 *  and trailing/beginning stuff.  FOr example, convert
+	 *
+	 *  ( A  |  B  |  C )*
+	 *
+	 *  to
+	 *
+	 *  ( A | B | C )*
+	 */
+	public static String normalize(String g) {
+	    StringTokenizer st = new StringTokenizer(g, " ", false);
+		StringBuffer buf = new StringBuffer();
+		while ( st.hasMoreTokens() ) {
+			String w = st.nextToken();
+			buf.append(w);
+			buf.append(" ");
+		}
+		return buf.toString().trim();
+	}
+public ANTLRTreePrinter() {
+	tokenNames = _tokenNames;
+}
+
+/** Call this to figure out how to print */
+	public final String  toString(AST _t,
+		Grammar g, boolean showActions
+	) throws RecognitionException {
+		String s=null;
+		
+		GrammarAST toString_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		grammar = g;
+		this.showActions = showActions;
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LEXER_GRAMMAR:
+			case PARSER_GRAMMAR:
+			case TREE_GRAMMAR:
+			case COMBINED_GRAMMAR:
+			{
+				grammar(_t);
+				_t = _retTree;
+				break;
+			}
+			case RULE:
+			{
+				rule(_t);
+				_t = _retTree;
+				break;
+			}
+			case ALT:
+			{
+				alternative(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			case SYNPRED:
+			case RANGE:
+			case CHAR_RANGE:
+			case EPSILON:
+			case LABEL:
+			case GATED_SEMPRED:
+			case SYN_SEMPRED:
+			case BACKTRACK_SEMPRED:
+			case ACTION:
+			case ASSIGN:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case BANG:
+			case PLUS_ASSIGN:
+			case SEMPRED:
+			case ROOT:
+			case RULE_REF:
+			case NOT:
+			case TREE_BEGIN:
+			case WILDCARD:
+			{
+				element(_t);
+				_t = _retTree;
+				break;
+			}
+			case REWRITE:
+			{
+				single_rewrite(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				GrammarAST tmp1_AST_in = (GrammarAST)_t;
+				match(_t,EOR);
+				_t = _t.getNextSibling();
+				s="EOR";
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			return normalize(buf.toString());
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return s;
+	}
+	
+	public final void grammar(AST _t) throws RecognitionException {
+		
+		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LEXER_GRAMMAR:
+			{
+				AST __t5 = _t;
+				GrammarAST tmp2_AST_in = (GrammarAST)_t;
+				match(_t,LEXER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t,"lexer " );
+				_t = _retTree;
+				_t = __t5;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PARSER_GRAMMAR:
+			{
+				AST __t6 = _t;
+				GrammarAST tmp3_AST_in = (GrammarAST)_t;
+				match(_t,PARSER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t,"parser ");
+				_t = _retTree;
+				_t = __t6;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TREE_GRAMMAR:
+			{
+				AST __t7 = _t;
+				GrammarAST tmp4_AST_in = (GrammarAST)_t;
+				match(_t,TREE_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t,"tree ");
+				_t = _retTree;
+				_t = __t7;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case COMBINED_GRAMMAR:
+			{
+				AST __t8 = _t;
+				GrammarAST tmp5_AST_in = (GrammarAST)_t;
+				match(_t,COMBINED_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t,"");
+				_t = _retTree;
+				_t = __t8;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rule(AST _t) throws RecognitionException {
+		
+		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST arg = null;
+		GrammarAST ret = null;
+		GrammarAST b = null;
+		
+		try {      // for error handling
+			AST __t42 = _t;
+			GrammarAST tmp6_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			out(id.getText());
+			AST __t44 = _t;
+			GrammarAST tmp7_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				arg = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				out("["+arg.getText()+"]");
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t44;
+			_t = _t.getNextSibling();
+			AST __t46 = _t;
+			GrammarAST tmp8_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				ret = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				out(" returns ["+ret.getText()+"]");
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t46;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop51:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					ruleAction(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop51;
+				}
+				
+			} while (true);
+			}
+			out(" : ");
+			b = _t==ASTNULL ? null : (GrammarAST)_t;
+			block(_t,false);
+			_t = _retTree;
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp9_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			out(";\n");
+			_t = __t42;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void alternative(AST _t) throws RecognitionException {
+		
+		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t74 = _t;
+			GrammarAST tmp10_AST_in = (GrammarAST)_t;
+			match(_t,ALT);
+			_t = _t.getFirstChild();
+			{
+			int _cnt76=0;
+			_loop76:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==LABEL||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.get [...]
+					element(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt76>=1 ) { break _loop76; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt76++;
+			} while (true);
+			}
+			GrammarAST tmp11_AST_in = (GrammarAST)_t;
+			match(_t,EOA);
+			_t = _t.getNextSibling();
+			_t = __t74;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void element(AST _t) throws RecognitionException {
+		
+		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST id2 = null;
+		GrammarAST a = null;
+		GrammarAST pred = null;
+		GrammarAST spred = null;
+		GrammarAST gpred = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				AST __t101 = _t;
+				GrammarAST tmp12_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t101;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				AST __t102 = _t;
+				GrammarAST tmp13_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t102;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LABEL:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case WILDCARD:
+			{
+				atom(_t);
+				_t = _retTree;
+				break;
+			}
+			case NOT:
+			{
+				AST __t103 = _t;
+				GrammarAST tmp14_AST_in = (GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				out("~");
+				element(_t);
+				_t = _retTree;
+				_t = __t103;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RANGE:
+			{
+				AST __t104 = _t;
+				GrammarAST tmp15_AST_in = (GrammarAST)_t;
+				match(_t,RANGE);
+				_t = _t.getFirstChild();
+				atom(_t);
+				_t = _retTree;
+				out("..");
+				atom(_t);
+				_t = _retTree;
+				_t = __t104;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t105 = _t;
+				GrammarAST tmp16_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				atom(_t);
+				_t = _retTree;
+				out("..");
+				atom(_t);
+				_t = _retTree;
+				_t = __t105;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t106 = _t;
+				GrammarAST tmp17_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				id = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				out(id.getText()+"=");
+				element(_t);
+				_t = _retTree;
+				_t = __t106;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PLUS_ASSIGN:
+			{
+				AST __t107 = _t;
+				GrammarAST tmp18_AST_in = (GrammarAST)_t;
+				match(_t,PLUS_ASSIGN);
+				_t = _t.getFirstChild();
+				id2 = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				out(id2.getText()+"+=");
+				element(_t);
+				_t = _retTree;
+				_t = __t107;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			{
+				ebnf(_t);
+				_t = _retTree;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				tree(_t);
+				_t = _retTree;
+				break;
+			}
+			case SYNPRED:
+			{
+				AST __t108 = _t;
+				GrammarAST tmp19_AST_in = (GrammarAST)_t;
+				match(_t,SYNPRED);
+				_t = _t.getFirstChild();
+				block(_t,true);
+				_t = _retTree;
+				_t = __t108;
+				_t = _t.getNextSibling();
+				out("=>");
+				break;
+			}
+			case ACTION:
+			{
+				a = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( showActions ) {out("{"); out(a.getText()); out("}");}
+				break;
+			}
+			case SEMPRED:
+			{
+				pred = (GrammarAST)_t;
+				match(_t,SEMPRED);
+				_t = _t.getNextSibling();
+				
+					if ( showActions ) {out("{"); out(pred.getText()); out("}?");}
+					else {out("{...}?");}
+					
+				break;
+			}
+			case SYN_SEMPRED:
+			{
+				spred = (GrammarAST)_t;
+				match(_t,SYN_SEMPRED);
+				_t = _t.getNextSibling();
+				
+					  String name = spred.getText();
+					  GrammarAST predAST=grammar.getSyntacticPredicate(name);
+					  block(predAST, true);
+					  out("=>");
+					
+				break;
+			}
+			case BACKTRACK_SEMPRED:
+			{
+				GrammarAST tmp20_AST_in = (GrammarAST)_t;
+				match(_t,BACKTRACK_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case GATED_SEMPRED:
+			{
+				gpred = (GrammarAST)_t;
+				match(_t,GATED_SEMPRED);
+				_t = _t.getNextSibling();
+				
+					if ( showActions ) {out("{"); out(gpred.getText()); out("}? =>");}
+					else {out("{...}? =>");}
+					
+				break;
+			}
+			case EPSILON:
+			{
+				GrammarAST tmp21_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void single_rewrite(AST _t) throws RecognitionException {
+		
+		GrammarAST single_rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t86 = _t;
+			GrammarAST tmp22_AST_in = (GrammarAST)_t;
+			match(_t,REWRITE);
+			_t = _t.getFirstChild();
+			out(" ->");
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SEMPRED:
+			{
+				GrammarAST tmp23_AST_in = (GrammarAST)_t;
+				match(_t,SEMPRED);
+				_t = _t.getNextSibling();
+				out(" {"+tmp23_AST_in.getText()+"}?");
+				break;
+			}
+			case ALT:
+			case TEMPLATE:
+			case ACTION:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ALT:
+			{
+				alternative(_t);
+				_t = _retTree;
+				break;
+			}
+			case TEMPLATE:
+			{
+				rewrite_template(_t);
+				_t = _retTree;
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp24_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				out(" {"+tmp24_AST_in.getText()+"}");
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t86;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void grammarSpec(AST _t,
+		String gtype
+	) throws RecognitionException {
+		
+		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST cmt = null;
+		
+		try {      // for error handling
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			out(gtype+"grammar "+id.getText());
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case DOC_COMMENT:
+			{
+				cmt = (GrammarAST)_t;
+				match(_t,DOC_COMMENT);
+				_t = _t.getNextSibling();
+				out(cmt.getText()+"\n");
+				break;
+			}
+			case OPTIONS:
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			out(";\n");
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKENS:
+			{
+				tokensSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop16:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==SCOPE)) {
+					attrScope(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop16;
+				}
+				
+			} while (true);
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case AMPERSAND:
+			{
+				actions(_t);
+				_t = _retTree;
+				break;
+			}
+			case RULE:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			rules(_t);
+			_t = _retTree;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void attrScope(AST _t) throws RecognitionException {
+		
+		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t10 = _t;
+			GrammarAST tmp25_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			GrammarAST tmp26_AST_in = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			GrammarAST tmp27_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t10;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void optionsSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST optionsSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t25 = _t;
+			GrammarAST tmp28_AST_in = (GrammarAST)_t;
+			match(_t,OPTIONS);
+			_t = _t.getFirstChild();
+			out(" options {");
+			{
+			int _cnt27=0;
+			_loop27:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ASSIGN)) {
+					option(_t);
+					_t = _retTree;
+					out("; ");
+				}
+				else {
+					if ( _cnt27>=1 ) { break _loop27; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt27++;
+			} while (true);
+			}
+			out("} ");
+			_t = __t25;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tokensSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST tokensSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t32 = _t;
+			GrammarAST tmp29_AST_in = (GrammarAST)_t;
+			match(_t,TOKENS);
+			_t = _t.getFirstChild();
+			{
+			int _cnt34=0;
+			_loop34:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ASSIGN||_t.getType()==TOKEN_REF)) {
+					tokenSpec(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt34>=1 ) { break _loop34; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt34++;
+			} while (true);
+			}
+			_t = __t32;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void actions(AST _t) throws RecognitionException {
+		
+		GrammarAST actions_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			int _cnt20=0;
+			_loop20:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					action(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt20>=1 ) { break _loop20; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt20++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rules(AST _t) throws RecognitionException {
+		
+		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			int _cnt40=0;
+			_loop40:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==RULE)) {
+					rule(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt40>=1 ) { break _loop40; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt40++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void action(AST _t) throws RecognitionException {
+		
+		GrammarAST action_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id1 = null;
+		GrammarAST id2 = null;
+		GrammarAST a1 = null;
+		GrammarAST a2 = null;
+		
+		String scope=null, name=null;
+		String action=null;
+		
+		
+		try {      // for error handling
+			AST __t22 = _t;
+			GrammarAST tmp30_AST_in = (GrammarAST)_t;
+			match(_t,AMPERSAND);
+			_t = _t.getFirstChild();
+			id1 = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ID:
+			{
+				id2 = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				a1 = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				scope=id1.getText(); name=a1.getText(); action=a1.getText();
+				break;
+			}
+			case ACTION:
+			{
+				a2 = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				scope=null; name=id1.getText(); action=a2.getText();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t22;
+			_t = _t.getNextSibling();
+			
+					 if ( showActions ) {
+					 	out("@"+(scope!=null?scope+"::":"")+name+action);
+					 }
+					
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void option(AST _t) throws RecognitionException {
+		
+		GrammarAST option_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		
+		try {      // for error handling
+			AST __t29 = _t;
+			GrammarAST tmp31_AST_in = (GrammarAST)_t;
+			match(_t,ASSIGN);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			out(id.getText()+"=");
+			optionValue(_t);
+			_t = _retTree;
+			_t = __t29;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void optionValue(AST _t) throws RecognitionException {
+		
+		GrammarAST optionValue_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST s = null;
+		GrammarAST c = null;
+		GrammarAST i = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ID:
+			{
+				id = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				out(id.getText());
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				out(s.getText());
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				out(c.getText());
+				break;
+			}
+			case INT:
+			{
+				i = (GrammarAST)_t;
+				match(_t,INT);
+				_t = _t.getNextSibling();
+				out(i.getText());
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tokenSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST tokenSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKEN_REF:
+			{
+				GrammarAST tmp32_AST_in = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t36 = _t;
+				GrammarAST tmp33_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				GrammarAST tmp34_AST_in = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case STRING_LITERAL:
+				{
+					GrammarAST tmp35_AST_in = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					GrammarAST tmp36_AST_in = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t36;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void modifier(AST _t) throws RecognitionException {
+		
+		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		out(modifier_AST_in.getText()); out(" ");
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_protected:
+			{
+				GrammarAST tmp37_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_protected);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_public:
+			{
+				GrammarAST tmp38_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_public);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_private:
+			{
+				GrammarAST tmp39_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_private);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case FRAGMENT:
+			{
+				GrammarAST tmp40_AST_in = (GrammarAST)_t;
+				match(_t,FRAGMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ruleScopeSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t57 = _t;
+			GrammarAST tmp41_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ACTION:
+			{
+				GrammarAST tmp42_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			case ID:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop60:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ID)) {
+					GrammarAST tmp43_AST_in = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop60;
+				}
+				
+			} while (true);
+			}
+			_t = __t57;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ruleAction(AST _t) throws RecognitionException {
+		
+		GrammarAST ruleAction_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST a = null;
+		
+		try {      // for error handling
+			AST __t54 = _t;
+			GrammarAST tmp44_AST_in = (GrammarAST)_t;
+			match(_t,AMPERSAND);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			a = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t54;
+			_t = _t.getNextSibling();
+			if ( showActions ) out("@"+id.getText()+"{"+a.getText()+"}");
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void block(AST _t,
+		boolean forceParens
+	) throws RecognitionException {
+		
+		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		int numAlts = countAltsForBlock(block_AST_in);
+		
+		
+		try {      // for error handling
+			AST __t62 = _t;
+			GrammarAST tmp45_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			if ( forceParens||numAlts>1 ) out(" (");
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				out(" : ");
+				break;
+			}
+			case ALT:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			alternative(_t);
+			_t = _retTree;
+			rewrite(_t);
+			_t = _retTree;
+			{
+			_loop65:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					out(" | ");
+					alternative(_t);
+					_t = _retTree;
+					rewrite(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop65;
+				}
+				
+			} while (true);
+			}
+			GrammarAST tmp46_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			if ( forceParens||numAlts>1 ) out(")");
+			_t = __t62;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void exceptionGroup(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			{
+				{
+				int _cnt79=0;
+				_loop79:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==LITERAL_catch)) {
+						exceptionHandler(_t);
+						_t = _retTree;
+					}
+					else {
+						if ( _cnt79>=1 ) { break _loop79; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt79++;
+				} while (true);
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case LITERAL_finally:
+				{
+					finallyClause(_t);
+					_t = _retTree;
+					break;
+				}
+				case EOR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				break;
+			}
+			case LITERAL_finally:
+			{
+				finallyClause(_t);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			_loop99:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==REWRITE)) {
+					single_rewrite(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop99;
+				}
+				
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final int  countAltsForBlock(AST _t) throws RecognitionException {
+		int n=0;
+		
+		GrammarAST countAltsForBlock_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t67 = _t;
+			GrammarAST tmp47_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp48_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ALT:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			int _cnt72=0;
+			_loop72:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					GrammarAST tmp49_AST_in = (GrammarAST)_t;
+					match(_t,ALT);
+					_t = _t.getNextSibling();
+					{
+					_loop71:
+					do {
+						if (_t==null) _t=ASTNULL;
+						if ((_t.getType()==REWRITE)) {
+							GrammarAST tmp50_AST_in = (GrammarAST)_t;
+							match(_t,REWRITE);
+							_t = _t.getNextSibling();
+						}
+						else {
+							break _loop71;
+						}
+						
+					} while (true);
+					}
+					n++;
+				}
+				else {
+					if ( _cnt72>=1 ) { break _loop72; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt72++;
+			} while (true);
+			}
+			GrammarAST tmp51_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t67;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return n;
+	}
+	
+	public final void exceptionHandler(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t82 = _t;
+			GrammarAST tmp52_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_catch);
+			_t = _t.getFirstChild();
+			GrammarAST tmp53_AST_in = (GrammarAST)_t;
+			match(_t,ARG_ACTION);
+			_t = _t.getNextSibling();
+			GrammarAST tmp54_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t82;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void finallyClause(AST _t) throws RecognitionException {
+		
+		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t84 = _t;
+			GrammarAST tmp55_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_finally);
+			_t = _t.getFirstChild();
+			GrammarAST tmp56_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t84;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_template(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_template_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST ind = null;
+		GrammarAST arg = null;
+		GrammarAST a = null;
+		
+		try {      // for error handling
+			AST __t90 = _t;
+			GrammarAST tmp57_AST_in = (GrammarAST)_t;
+			match(_t,TEMPLATE);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ID:
+			{
+				id = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				out(" "+id.getText());
+				break;
+			}
+			case ACTION:
+			{
+				ind = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				out(" ({"+ind.getText()+"})");
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			AST __t92 = _t;
+			GrammarAST tmp58_AST_in = (GrammarAST)_t;
+			match(_t,ARGLIST);
+			_t = _t.getFirstChild();
+			out("(");
+			{
+			_loop95:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ARG)) {
+					AST __t94 = _t;
+					GrammarAST tmp59_AST_in = (GrammarAST)_t;
+					match(_t,ARG);
+					_t = _t.getFirstChild();
+					arg = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+					out(arg.getText()+"=");
+					a = (GrammarAST)_t;
+					match(_t,ACTION);
+					_t = _t.getNextSibling();
+					out(a.getText());
+					_t = __t94;
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop95;
+				}
+				
+			} while (true);
+			}
+			out(")");
+			_t = __t92;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case DOUBLE_QUOTE_STRING_LITERAL:
+			{
+				GrammarAST tmp60_AST_in = (GrammarAST)_t;
+				match(_t,DOUBLE_QUOTE_STRING_LITERAL);
+				_t = _t.getNextSibling();
+				out(" "+tmp60_AST_in.getText());
+				break;
+			}
+			case DOUBLE_ANGLE_STRING_LITERAL:
+			{
+				GrammarAST tmp61_AST_in = (GrammarAST)_t;
+				match(_t,DOUBLE_ANGLE_STRING_LITERAL);
+				_t = _t.getNextSibling();
+				out(" "+tmp61_AST_in.getText());
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t90;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void atom(AST _t) throws RecognitionException {
+		
+		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST rarg = null;
+		GrammarAST targ = null;
+		out(" ");
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case WILDCARD:
+			{
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case RULE_REF:
+				{
+					AST __t119 = _t;
+					GrammarAST tmp62_AST_in = (GrammarAST)_t;
+					match(_t,RULE_REF);
+					_t = _t.getFirstChild();
+					out(atom_AST_in.toString());
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case ARG_ACTION:
+					{
+						rarg = (GrammarAST)_t;
+						match(_t,ARG_ACTION);
+						_t = _t.getNextSibling();
+						out("["+rarg.toString()+"]");
+						break;
+					}
+					case 3:
+					case BANG:
+					case ROOT:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t119;
+					_t = _t.getNextSibling();
+					break;
+				}
+				case TOKEN_REF:
+				{
+					AST __t122 = _t;
+					GrammarAST tmp63_AST_in = (GrammarAST)_t;
+					match(_t,TOKEN_REF);
+					_t = _t.getFirstChild();
+					out(atom_AST_in.toString());
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case ARG_ACTION:
+					{
+						targ = (GrammarAST)_t;
+						match(_t,ARG_ACTION);
+						_t = _t.getNextSibling();
+						out("["+targ.toString()+"]");
+						break;
+					}
+					case 3:
+					case BANG:
+					case ROOT:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t122;
+					_t = _t.getNextSibling();
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					AST __t125 = _t;
+					GrammarAST tmp64_AST_in = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getFirstChild();
+					out(atom_AST_in.toString());
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t125;
+					_t = _t.getNextSibling();
+					break;
+				}
+				case STRING_LITERAL:
+				{
+					AST __t127 = _t;
+					GrammarAST tmp65_AST_in = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getFirstChild();
+					out(atom_AST_in.toString());
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t127;
+					_t = _t.getNextSibling();
+					break;
+				}
+				case WILDCARD:
+				{
+					AST __t129 = _t;
+					GrammarAST tmp66_AST_in = (GrammarAST)_t;
+					match(_t,WILDCARD);
+					_t = _t.getFirstChild();
+					out(atom_AST_in.toString());
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t129;
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				out(" ");
+				break;
+			}
+			case LABEL:
+			{
+				GrammarAST tmp67_AST_in = (GrammarAST)_t;
+				match(_t,LABEL);
+				_t = _t.getNextSibling();
+				out(" $"+tmp67_AST_in.getText());
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ebnf(AST _t) throws RecognitionException {
+		
+		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case BLOCK:
+			{
+				block(_t,true);
+				_t = _retTree;
+				out(" ");
+				break;
+			}
+			case OPTIONAL:
+			{
+				AST __t110 = _t;
+				GrammarAST tmp68_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				block(_t,true);
+				_t = _retTree;
+				_t = __t110;
+				_t = _t.getNextSibling();
+				out("? ");
+				break;
+			}
+			case CLOSURE:
+			{
+				AST __t111 = _t;
+				GrammarAST tmp69_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				block(_t,true);
+				_t = _retTree;
+				_t = __t111;
+				_t = _t.getNextSibling();
+				out("* ");
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				AST __t112 = _t;
+				GrammarAST tmp70_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				block(_t,true);
+				_t = _retTree;
+				_t = __t112;
+				_t = _t.getNextSibling();
+				out("+ ");
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tree(AST _t) throws RecognitionException {
+		
+		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t114 = _t;
+			GrammarAST tmp71_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			out(" ^(");
+			element(_t);
+			_t = _retTree;
+			{
+			_loop116:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==LABEL||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.get [...]
+					element(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop116;
+				}
+				
+			} while (true);
+			}
+			out(") ");
+			_t = __t114;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ast_suffix(AST _t) throws RecognitionException {
+		
+		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				GrammarAST tmp72_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getNextSibling();
+				out("^");
+				break;
+			}
+			case BANG:
+			{
+				GrammarAST tmp73_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getNextSibling();
+				out("!");
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	
+	public static final String[] _tokenNames = {
+		"<0>",
+		"EOF",
+		"<2>",
+		"NULL_TREE_LOOKAHEAD",
+		"\"options\"",
+		"\"tokens\"",
+		"\"parser\"",
+		"LEXER",
+		"RULE",
+		"BLOCK",
+		"OPTIONAL",
+		"CLOSURE",
+		"POSITIVE_CLOSURE",
+		"SYNPRED",
+		"RANGE",
+		"CHAR_RANGE",
+		"EPSILON",
+		"ALT",
+		"EOR",
+		"EOB",
+		"EOA",
+		"ID",
+		"ARG",
+		"ARGLIST",
+		"RET",
+		"LEXER_GRAMMAR",
+		"PARSER_GRAMMAR",
+		"TREE_GRAMMAR",
+		"COMBINED_GRAMMAR",
+		"INITACTION",
+		"LABEL",
+		"TEMPLATE",
+		"\"scope\"",
+		"GATED_SEMPRED",
+		"SYN_SEMPRED",
+		"BACKTRACK_SEMPRED",
+		"\"fragment\"",
+		"ACTION",
+		"DOC_COMMENT",
+		"SEMI",
+		"\"lexer\"",
+		"\"tree\"",
+		"\"grammar\"",
+		"AMPERSAND",
+		"COLON",
+		"RCURLY",
+		"ASSIGN",
+		"STRING_LITERAL",
+		"CHAR_LITERAL",
+		"INT",
+		"STAR",
+		"TOKEN_REF",
+		"\"protected\"",
+		"\"public\"",
+		"\"private\"",
+		"BANG",
+		"ARG_ACTION",
+		"\"returns\"",
+		"\"throws\"",
+		"COMMA",
+		"LPAREN",
+		"OR",
+		"RPAREN",
+		"\"catch\"",
+		"\"finally\"",
+		"PLUS_ASSIGN",
+		"SEMPRED",
+		"IMPLIES",
+		"ROOT",
+		"RULE_REF",
+		"NOT",
+		"TREE_BEGIN",
+		"QUESTION",
+		"PLUS",
+		"WILDCARD",
+		"REWRITE",
+		"DOLLAR",
+		"DOUBLE_QUOTE_STRING_LITERAL",
+		"DOUBLE_ANGLE_STRING_LITERAL",
+		"WS",
+		"COMMENT",
+		"SL_COMMENT",
+		"ML_COMMENT",
+		"OPEN_ELEMENT_OPTION",
+		"CLOSE_ELEMENT_OPTION",
+		"ESC",
+		"DIGIT",
+		"XDIGIT",
+		"NESTED_ARG_ACTION",
+		"NESTED_ACTION",
+		"ACTION_CHAR_LITERAL",
+		"ACTION_STRING_LITERAL",
+		"ACTION_ESC",
+		"WS_LOOP",
+		"INTERNAL_RULE_REF",
+		"WS_OPT",
+		"SRC"
+	};
+	
+	}
+	
diff --git a/src/org/antlr/tool/ANTLRTreePrinter.smap b/src/org/antlr/tool/ANTLRTreePrinter.smap
new file mode 100644
index 0000000..1cc3b71
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRTreePrinter.smap
@@ -0,0 +1,1670 @@
+SMAP
+ANTLRTreePrinter.java
+G
+*S G
+*F
++ 0 antlr.print.g
+antlr.print.g
+*L
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+48:55
+49:56
+50:57
+52:59
+53:60
+54:61
+56:63
+57:64
+58:65
+59:66
+60:67
+61:68
+62:69
+63:70
+64:71
+65:72
+66:73
+67:74
+68:75
+69:76
+70:77
+72:79
+73:80
+74:81
+75:82
+76:83
+77:84
+78:85
+79:86
+80:87
+81:88
+82:89
+83:90
+84:91
+85:92
+86:93
+87:94
+88:95
+89:96
+90:97
+94:102
+94:103
+94:104
+94:105
+94:106
+94:114
+94:190
+94:191
+94:192
+94:193
+94:194
+94:195
+94:196
+94:197
+95:110
+96:111
+99:116
+99:117
+99:118
+99:119
+99:120
+99:121
+99:122
+99:123
+99:124
+99:183
+99:184
+99:185
+99:186
+99:187
+100:127
+100:128
+100:129
+100:130
+101:133
+101:134
+101:135
+101:136
+102:139
+102:140
+102:141
+102:142
+102:143
+102:144
+102:145
+102:146
+102:147
+102:148
+102:149
+102:150
+102:151
+102:152
+102:153
+102:154
+102:155
+102:156
+102:157
+102:158
+102:159
+102:160
+102:161
+102:162
+102:163
+102:164
+102:165
+102:166
+103:169
+103:170
+103:171
+103:172
+104:175
+104:176
+104:177
+104:178
+104:179
+104:180
+106:189
+111:199
+111:203
+111:261
+111:262
+111:263
+111:264
+111:265
+111:266
+111:267
+112:205
+112:206
+112:207
+112:208
+112:209
+112:210
+112:211
+112:212
+112:213
+112:214
+112:215
+112:216
+112:255
+112:256
+112:257
+112:258
+112:259
+113:219
+113:220
+113:221
+113:222
+113:223
+113:224
+113:225
+113:226
+113:227
+113:228
+114:231
+114:232
+114:233
+114:234
+114:235
+114:236
+114:237
+114:238
+114:239
+114:240
+115:243
+115:244
+115:245
+115:246
+115:247
+115:248
+115:249
+115:250
+115:251
+115:252
+119:911
+119:915
+119:928
+119:929
+119:930
+119:931
+119:932
+119:933
+119:934
+120:916
+120:917
+120:918
+120:919
+120:920
+120:921
+120:922
+120:923
+120:924
+120:925
+120:926
+120:927
+123:786
+123:787
+123:788
+123:794
+123:903
+123:904
+123:905
+123:906
+123:907
+123:908
+123:909
+124:791
+124:795
+124:796
+124:797
+124:798
+125:792
+125:800
+125:801
+125:802
+125:803
+125:804
+125:805
+125:806
+125:807
+125:818
+125:819
+125:820
+125:821
+125:822
+126:825
+126:826
+126:827
+126:828
+126:829
+126:830
+126:840
+126:841
+126:842
+126:843
+126:844
+126:846
+127:848
+127:849
+127:850
+127:851
+127:852
+127:853
+127:862
+127:863
+127:864
+127:865
+127:866
+128:868
+128:869
+128:870
+128:871
+128:872
+128:873
+128:874
+128:875
+128:876
+128:877
+128:878
+128:880
+128:881
+129:883
+129:884
+129:885
+129:886
+129:887
+129:888
+129:895
+129:896
+129:897
+129:898
+129:899
+130:901
+130:902
+133:1009
+133:1013
+133:1030
+133:1031
+133:1032
+133:1033
+133:1034
+133:1035
+133:1036
+134:1015
+134:1016
+134:1017
+134:1018
+134:1019
+134:1020
+134:1021
+134:1022
+134:1023
+134:1024
+134:1025
+134:1027
+134:1028
+134:1029
+137:1067
+137:1079
+137:1122
+137:1123
+137:1124
+137:1125
+137:1126
+137:1127
+137:1128
+138:1075
+139:1076
+142:1070
+142:1080
+142:1081
+142:1082
+142:1083
+142:1084
+142:1085
+142:1086
+142:1115
+142:1116
+143:1071
+143:1072
+143:1088
+143:1089
+143:1090
+143:1091
+143:1092
+143:1093
+143:1094
+143:1095
+143:1096
+143:1097
+143:1109
+143:1110
+143:1111
+143:1112
+143:1113
+144:1098
+145:1073
+145:1101
+145:1102
+145:1103
+145:1104
+145:1105
+146:1106
+150:1118
+151:1119
+152:1120
+156:936
+156:940
+156:966
+156:967
+156:968
+156:969
+156:970
+156:971
+156:972
+157:941
+157:942
+157:943
+157:944
+157:945
+157:964
+157:965
+158:947
+158:948
+158:949
+158:950
+158:951
+158:952
+158:953
+158:954
+158:955
+158:956
+158:957
+158:958
+158:960
+158:961
+158:962
+159:963
+163:1130
+163:1135
+163:1148
+163:1149
+163:1150
+163:1151
+163:1152
+163:1153
+163:1154
+164:1133
+164:1136
+164:1137
+164:1138
+164:1139
+164:1140
+164:1141
+164:1142
+164:1143
+164:1144
+164:1145
+164:1146
+164:1147
+167:1156
+167:1164
+167:1165
+167:1166
+167:1199
+167:1200
+167:1201
+167:1202
+167:1203
+167:1204
+167:1205
+167:1206
+167:1207
+167:1208
+167:1209
+167:1210
+168:1159
+168:1167
+168:1168
+168:1169
+168:1170
+168:1171
+168:1172
+169:1160
+169:1175
+169:1176
+169:1177
+169:1178
+169:1179
+169:1180
+170:1161
+170:1183
+170:1184
+170:1185
+170:1186
+170:1187
+170:1188
+171:1162
+171:1191
+171:1192
+171:1193
+171:1194
+171:1195
+171:1196
+187:974
+187:978
+187:1001
+187:1002
+187:1003
+187:1004
+187:1005
+187:1006
+187:1007
+188:979
+188:980
+188:981
+188:982
+188:984
+188:985
+188:986
+188:987
+188:988
+188:989
+188:990
+188:991
+188:992
+188:993
+188:994
+188:996
+188:997
+188:998
+188:999
+188:1000
+191:1212
+191:1216
+191:1217
+191:1218
+191:1262
+191:1263
+191:1264
+191:1265
+191:1266
+191:1267
+191:1268
+191:1269
+191:1270
+191:1271
+191:1272
+191:1273
+192:1219
+192:1220
+192:1221
+192:1222
+192:1223
+193:1226
+193:1227
+193:1228
+193:1229
+193:1230
+193:1231
+193:1232
+193:1233
+193:1234
+193:1236
+193:1237
+193:1238
+193:1239
+193:1240
+193:1241
+193:1242
+193:1245
+193:1246
+193:1247
+193:1248
+193:1249
+193:1252
+193:1253
+193:1254
+193:1255
+193:1256
+193:1258
+193:1259
+196:1038
+196:1042
+196:1059
+196:1060
+196:1061
+196:1062
+196:1063
+196:1064
+196:1065
+197:1044
+197:1045
+197:1046
+197:1047
+197:1048
+197:1049
+197:1050
+197:1051
+197:1052
+197:1053
+197:1054
+197:1056
+197:1057
+197:1058
+200:269
+200:277
+200:447
+200:448
+200:449
+200:450
+200:451
+200:452
+200:453
+201:272
+201:278
+201:279
+201:280
+201:281
+201:282
+201:283
+201:284
+201:445
+201:446
+202:286
+202:287
+202:288
+202:289
+202:290
+202:291
+202:292
+202:293
+202:294
+202:301
+202:302
+202:303
+202:304
+202:305
+203:307
+204:273
+204:308
+204:309
+204:310
+204:311
+204:313
+204:314
+204:315
+204:316
+204:317
+204:318
+204:319
+204:320
+204:327
+204:328
+204:329
+204:330
+204:331
+204:333
+204:334
+205:274
+205:335
+205:336
+205:337
+205:338
+205:340
+205:341
+205:342
+205:343
+205:344
+205:345
+205:346
+205:347
+205:354
+205:355
+205:356
+205:357
+205:358
+205:360
+205:361
+206:363
+206:364
+206:365
+206:366
+206:367
+206:368
+206:377
+206:378
+206:379
+206:380
+206:381
+207:384
+207:385
+207:386
+207:387
+207:388
+207:389
+207:397
+207:398
+207:399
+207:400
+207:401
+208:403
+208:404
+208:405
+208:406
+208:407
+208:408
+208:409
+208:410
+208:411
+208:412
+208:413
+208:415
+208:416
+209:417
+210:275
+210:418
+210:419
+210:420
+211:422
+211:423
+211:424
+211:425
+211:426
+211:427
+211:428
+211:435
+211:436
+211:437
+211:438
+211:439
+212:441
+212:442
+212:443
+212:444
+216:1379
+216:1385
+216:1399
+216:1400
+216:1401
+216:1402
+216:1403
+216:1404
+216:1405
+217:1382
+217:1383
+217:1386
+217:1387
+217:1388
+217:1389
+217:1390
+217:1391
+217:1392
+217:1393
+217:1394
+217:1395
+217:1396
+217:1397
+218:1398
+221:1275
+221:1278
+221:1280
+221:1281
+221:1282
+221:1311
+221:1312
+221:1313
+221:1314
+221:1315
+221:1316
+221:1317
+221:1318
+221:1319
+221:1320
+221:1321
+221:1322
+223:1283
+223:1284
+223:1285
+223:1286
+223:1287
+224:1290
+224:1291
+224:1292
+224:1293
+224:1294
+225:1297
+225:1298
+225:1299
+225:1300
+225:1301
+226:1304
+226:1305
+226:1306
+226:1307
+226:1308
+229:1324
+229:1328
+229:1371
+229:1372
+229:1373
+229:1374
+229:1375
+229:1376
+229:1377
+230:1329
+230:1330
+230:1331
+230:1332
+230:1334
+230:1335
+230:1336
+230:1337
+230:1338
+230:1339
+230:1340
+230:1348
+230:1349
+230:1350
+230:1351
+230:1352
+230:1354
+230:1355
+230:1356
+230:1357
+230:1358
+230:1359
+230:1360
+230:1361
+230:1362
+230:1363
+230:1364
+230:1365
+230:1367
+230:1368
+230:1369
+230:1370
+233:1407
+233:1408
+233:1409
+233:1416
+233:1469
+233:1470
+233:1471
+233:1472
+233:1473
+233:1474
+233:1475
+234:1413
+237:1417
+237:1418
+237:1419
+237:1420
+237:1421
+237:1467
+237:1468
+238:1423
+238:1424
+238:1425
+238:1426
+238:1427
+238:1428
+238:1429
+238:1436
+238:1437
+238:1438
+238:1439
+238:1440
+239:1442
+239:1443
+239:1444
+239:1445
+239:1446
+239:1447
+239:1448
+239:1449
+239:1450
+239:1451
+239:1452
+239:1453
+239:1454
+239:1455
+239:1456
+239:1457
+239:1458
+239:1459
+239:1461
+239:1462
+240:1463
+240:1464
+240:1465
+240:1466
+244:1569
+244:1570
+244:1574
+244:1637
+244:1638
+244:1639
+244:1640
+244:1641
+244:1642
+244:1643
+244:1644
+245:1575
+245:1576
+245:1577
+245:1578
+245:1580
+245:1581
+245:1582
+245:1583
+245:1584
+245:1585
+245:1586
+245:1593
+245:1594
+245:1595
+245:1596
+245:1597
+245:1600
+245:1601
+245:1602
+245:1603
+245:1604
+245:1605
+245:1606
+245:1607
+245:1608
+245:1609
+245:1610
+245:1611
+245:1612
+245:1613
+245:1614
+245:1615
+245:1616
+245:1617
+245:1618
+245:1619
+245:1621
+245:1622
+245:1623
+245:1624
+245:1625
+245:1626
+245:1627
+245:1629
+245:1630
+245:1631
+245:1632
+245:1633
+245:1634
+245:1635
+245:1636
+248:455
+248:459
+248:485
+248:486
+248:487
+248:488
+248:489
+248:490
+248:491
+249:460
+249:461
+249:462
+249:463
+249:465
+249:466
+249:467
+249:468
+249:469
+249:470
+249:471
+249:472
+249:473
+249:474
+249:475
+249:477
+249:478
+249:479
+249:480
+249:481
+249:482
+249:483
+249:484
+252:1477
+252:1481
+252:1482
+252:1483
+252:1529
+252:1530
+252:1531
+252:1532
+252:1533
+252:1534
+252:1535
+252:1536
+252:1537
+252:1538
+252:1539
+252:1540
+253:1484
+253:1485
+253:1487
+253:1488
+253:1489
+253:1490
+253:1491
+253:1492
+253:1493
+253:1494
+253:1495
+253:1496
+253:1497
+253:1499
+253:1500
+253:1501
+253:1503
+253:1504
+253:1505
+253:1506
+253:1507
+253:1508
+253:1515
+253:1516
+253:1517
+253:1518
+253:1519
+254:1523
+254:1524
+254:1525
+254:1526
+257:1646
+257:1650
+257:1663
+257:1664
+257:1665
+257:1666
+257:1667
+257:1668
+257:1669
+258:1651
+258:1652
+258:1653
+258:1654
+258:1655
+258:1656
+258:1657
+258:1658
+258:1659
+258:1660
+258:1661
+258:1662
+261:1671
+261:1675
+261:1685
+261:1686
+261:1687
+261:1688
+261:1689
+261:1690
+261:1691
+262:1676
+262:1677
+262:1678
+262:1679
+262:1680
+262:1681
+262:1682
+262:1683
+262:1684
+265:714
+265:718
+265:778
+265:779
+265:780
+265:781
+265:782
+265:783
+265:784
+266:719
+266:720
+266:721
+266:722
+266:723
+266:725
+266:726
+266:727
+266:728
+266:729
+266:730
+266:731
+266:732
+266:741
+266:742
+266:743
+266:744
+266:745
+266:776
+266:777
+267:748
+267:749
+267:750
+267:751
+267:752
+267:753
+267:756
+267:757
+267:758
+267:759
+267:762
+267:763
+267:764
+267:765
+267:766
+267:767
+267:770
+267:771
+267:772
+267:773
+267:774
+271:1693
+271:1701
+271:1796
+271:1797
+271:1798
+271:1799
+271:1800
+271:1801
+271:1802
+272:1702
+272:1703
+272:1704
+272:1705
+272:1794
+272:1795
+273:1696
+273:1697
+273:1707
+273:1708
+273:1709
+273:1710
+273:1711
+273:1712
+273:1713
+273:1714
+273:1717
+273:1718
+273:1719
+273:1720
+273:1721
+273:1722
+273:1725
+273:1726
+273:1727
+273:1728
+273:1729
+274:1731
+274:1732
+274:1733
+274:1734
+274:1763
+274:1764
+275:1735
+276:1698
+276:1736
+276:1737
+276:1738
+276:1739
+276:1740
+276:1741
+276:1742
+276:1743
+276:1744
+276:1745
+276:1746
+276:1747
+276:1748
+276:1753
+276:1754
+276:1755
+276:1756
+276:1757
+276:1758
+276:1760
+276:1761
+277:1699
+277:1749
+277:1750
+277:1751
+277:1752
+280:1762
+282:1766
+282:1767
+282:1768
+282:1769
+282:1770
+282:1771
+282:1772
+282:1773
+282:1788
+282:1789
+282:1790
+282:1791
+282:1792
+283:1776
+283:1777
+283:1778
+283:1779
+283:1780
+283:1781
+288:1542
+288:1546
+288:1561
+288:1562
+288:1563
+288:1564
+288:1565
+288:1566
+288:1567
+289:1547
+289:1548
+289:1549
+289:1550
+289:1551
+289:1552
+289:1553
+289:1554
+289:1555
+289:1556
+289:1557
+289:1559
+289:1560
+292:493
+292:503
+292:504
+292:505
+292:701
+292:702
+292:703
+292:704
+292:705
+292:706
+292:707
+292:708
+292:709
+292:710
+292:711
+292:712
+293:506
+293:507
+293:508
+293:509
+293:510
+293:511
+293:512
+293:513
+293:514
+293:515
+294:518
+294:519
+294:520
+294:521
+294:522
+294:523
+294:524
+294:525
+294:526
+294:527
+295:530
+295:531
+295:532
+295:533
+295:534
+295:535
+295:536
+295:537
+295:538
+296:541
+296:542
+296:543
+296:544
+296:545
+296:546
+296:547
+296:548
+296:549
+296:550
+296:551
+297:554
+297:555
+297:556
+297:557
+297:558
+297:559
+297:560
+297:561
+297:562
+297:563
+297:564
+297:565
+297:566
+298:569
+298:570
+298:571
+298:572
+298:573
+298:574
+298:575
+298:576
+298:577
+298:578
+298:579
+298:580
+298:581
+299:496
+299:584
+299:585
+299:586
+299:587
+299:588
+299:589
+299:590
+299:591
+299:592
+299:593
+299:594
+299:595
+299:596
+299:597
+300:497
+300:600
+300:601
+300:602
+300:603
+300:604
+300:605
+300:606
+300:607
+300:608
+300:609
+300:610
+300:611
+300:612
+300:613
+301:616
+301:617
+301:618
+301:619
+301:620
+301:621
+301:622
+302:625
+302:626
+302:627
+302:628
+303:631
+303:632
+303:633
+303:634
+303:635
+303:636
+303:637
+303:638
+303:639
+303:640
+303:641
+304:498
+304:644
+304:645
+304:646
+304:647
+304:648
+304:649
+305:499
+305:652
+305:653
+305:654
+305:655
+305:656
+307:658
+308:659
+310:500
+310:663
+310:664
+310:665
+310:666
+310:667
+312:669
+313:670
+314:671
+315:672
+317:676
+317:677
+317:678
+317:679
+317:680
+318:501
+318:683
+318:684
+318:685
+318:686
+318:687
+320:689
+321:690
+323:694
+323:695
+323:696
+323:697
+323:698
+326:2054
+326:2058
+326:2059
+326:2060
+326:2061
+326:2062
+326:2063
+326:2064
+326:2065
+326:2107
+326:2108
+326:2109
+326:2110
+326:2111
+326:2112
+326:2113
+326:2114
+326:2115
+326:2116
+326:2117
+326:2118
+327:2068
+327:2069
+327:2070
+327:2071
+327:2072
+327:2073
+327:2074
+327:2075
+327:2076
+327:2077
+327:2078
+328:2081
+328:2082
+328:2083
+328:2084
+328:2085
+328:2086
+328:2087
+328:2088
+328:2089
+328:2090
+328:2091
+329:2094
+329:2095
+329:2096
+329:2097
+329:2098
+329:2099
+329:2100
+329:2101
+329:2102
+329:2103
+329:2104
+332:2120
+332:2124
+332:2125
+332:2126
+332:2127
+332:2128
+332:2129
+332:2130
+332:2131
+332:2132
+332:2133
+332:2134
+332:2135
+332:2136
+332:2137
+332:2138
+332:2139
+332:2140
+332:2141
+332:2142
+332:2144
+332:2145
+332:2146
+332:2147
+332:2148
+332:2149
+332:2150
+332:2151
+332:2152
+332:2153
+332:2154
+332:2155
+335:1804
+335:1809
+335:1811
+335:1812
+335:1813
+335:2041
+335:2042
+335:2043
+335:2044
+335:2045
+335:2046
+335:2047
+335:2048
+335:2049
+335:2050
+335:2051
+335:2052
+337:1814
+337:1815
+337:1816
+337:1817
+337:1818
+337:1819
+337:1821
+337:1822
+337:1823
+337:1824
+337:1825
+337:1826
+337:1827
+337:1828
+337:1829
+337:1873
+337:1874
+337:2024
+337:2025
+337:2026
+337:2027
+337:2028
+338:1807
+338:1831
+338:1832
+338:1833
+338:1834
+338:1835
+338:1836
+338:1837
+338:1838
+338:1847
+338:1848
+338:1849
+338:1850
+338:1851
+339:1854
+339:1855
+339:1856
+339:1857
+339:1858
+339:1859
+339:1860
+339:1867
+339:1868
+339:1869
+339:1870
+339:1871
+341:1877
+341:1878
+341:1879
+341:1880
+341:1881
+341:1882
+341:1883
+341:1927
+341:1928
+342:1808
+342:1885
+342:1886
+342:1887
+342:1888
+342:1889
+342:1890
+342:1891
+342:1892
+342:1901
+342:1902
+342:1903
+342:1904
+342:1905
+343:1908
+343:1909
+343:1910
+343:1911
+343:1912
+343:1913
+343:1914
+343:1921
+343:1922
+343:1923
+343:1924
+343:1925
+345:1931
+345:1932
+345:1933
+345:1934
+345:1935
+345:1936
+345:1937
+345:1958
+345:1959
+346:1939
+346:1940
+346:1941
+346:1942
+346:1943
+346:1944
+346:1945
+346:1952
+346:1953
+346:1954
+346:1955
+346:1956
+348:1962
+348:1963
+348:1964
+348:1965
+348:1966
+348:1967
+348:1968
+348:1989
+348:1990
+349:1970
+349:1971
+349:1972
+349:1973
+349:1974
+349:1975
+349:1976
+349:1983
+349:1984
+349:1985
+349:1986
+349:1987
+351:1993
+351:1994
+351:1995
+351:1996
+351:1997
+351:1998
+351:1999
+351:2020
+351:2021
+352:2001
+352:2002
+352:2003
+352:2004
+352:2005
+352:2006
+352:2007
+352:2014
+352:2015
+352:2016
+352:2017
+352:2018
+355:2030
+356:2033
+356:2034
+356:2035
+356:2036
+356:2037
+356:2038
+359:2157
+359:2161
+359:2162
+359:2163
+359:2180
+359:2181
+359:2182
+359:2183
+359:2184
+359:2185
+359:2186
+359:2187
+359:2188
+359:2189
+359:2190
+359:2191
+360:2164
+360:2165
+360:2166
+360:2167
+360:2168
+360:2169
+361:2172
+361:2173
+361:2174
+361:2175
+361:2176
+361:2177
+*E
diff --git a/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java b/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java
new file mode 100644
index 0000000..7e6fbcb
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java
@@ -0,0 +1,129 @@
+// $ANTLR 2.7.7 (2006-01-29): "antlr.print.g" -> "ANTLRTreePrinter.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+
+public interface ANTLRTreePrinterTokenTypes {
+	int EOF = 1;
+	int NULL_TREE_LOOKAHEAD = 3;
+	int OPTIONS = 4;
+	int TOKENS = 5;
+	int PARSER = 6;
+	int LEXER = 7;
+	int RULE = 8;
+	int BLOCK = 9;
+	int OPTIONAL = 10;
+	int CLOSURE = 11;
+	int POSITIVE_CLOSURE = 12;
+	int SYNPRED = 13;
+	int RANGE = 14;
+	int CHAR_RANGE = 15;
+	int EPSILON = 16;
+	int ALT = 17;
+	int EOR = 18;
+	int EOB = 19;
+	int EOA = 20;
+	int ID = 21;
+	int ARG = 22;
+	int ARGLIST = 23;
+	int RET = 24;
+	int LEXER_GRAMMAR = 25;
+	int PARSER_GRAMMAR = 26;
+	int TREE_GRAMMAR = 27;
+	int COMBINED_GRAMMAR = 28;
+	int INITACTION = 29;
+	int LABEL = 30;
+	int TEMPLATE = 31;
+	int SCOPE = 32;
+	int GATED_SEMPRED = 33;
+	int SYN_SEMPRED = 34;
+	int BACKTRACK_SEMPRED = 35;
+	int FRAGMENT = 36;
+	int ACTION = 37;
+	int DOC_COMMENT = 38;
+	int SEMI = 39;
+	int LITERAL_lexer = 40;
+	int LITERAL_tree = 41;
+	int LITERAL_grammar = 42;
+	int AMPERSAND = 43;
+	int COLON = 44;
+	int RCURLY = 45;
+	int ASSIGN = 46;
+	int STRING_LITERAL = 47;
+	int CHAR_LITERAL = 48;
+	int INT = 49;
+	int STAR = 50;
+	int TOKEN_REF = 51;
+	int LITERAL_protected = 52;
+	int LITERAL_public = 53;
+	int LITERAL_private = 54;
+	int BANG = 55;
+	int ARG_ACTION = 56;
+	int LITERAL_returns = 57;
+	int LITERAL_throws = 58;
+	int COMMA = 59;
+	int LPAREN = 60;
+	int OR = 61;
+	int RPAREN = 62;
+	int LITERAL_catch = 63;
+	int LITERAL_finally = 64;
+	int PLUS_ASSIGN = 65;
+	int SEMPRED = 66;
+	int IMPLIES = 67;
+	int ROOT = 68;
+	int RULE_REF = 69;
+	int NOT = 70;
+	int TREE_BEGIN = 71;
+	int QUESTION = 72;
+	int PLUS = 73;
+	int WILDCARD = 74;
+	int REWRITE = 75;
+	int DOLLAR = 76;
+	int DOUBLE_QUOTE_STRING_LITERAL = 77;
+	int DOUBLE_ANGLE_STRING_LITERAL = 78;
+	int WS = 79;
+	int COMMENT = 80;
+	int SL_COMMENT = 81;
+	int ML_COMMENT = 82;
+	int OPEN_ELEMENT_OPTION = 83;
+	int CLOSE_ELEMENT_OPTION = 84;
+	int ESC = 85;
+	int DIGIT = 86;
+	int XDIGIT = 87;
+	int NESTED_ARG_ACTION = 88;
+	int NESTED_ACTION = 89;
+	int ACTION_CHAR_LITERAL = 90;
+	int ACTION_STRING_LITERAL = 91;
+	int ACTION_ESC = 92;
+	int WS_LOOP = 93;
+	int INTERNAL_RULE_REF = 94;
+	int WS_OPT = 95;
+	int SRC = 96;
+}
diff --git a/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt b/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt
new file mode 100644
index 0000000..88d2e6a
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt
@@ -0,0 +1,95 @@
+// $ANTLR 2.7.7 (2006-01-29): antlr.print.g -> ANTLRTreePrinterTokenTypes.txt$
+ANTLRTreePrinter    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
diff --git a/src/org/antlr/tool/ANTLRv3.g b/src/org/antlr/tool/ANTLRv3.g
new file mode 100644
index 0000000..d089640
--- /dev/null
+++ b/src/org/antlr/tool/ANTLRv3.g
@@ -0,0 +1,745 @@
+/** ANTLR v3 grammar written in ANTLR v3 */
+grammar ANTLRv3;
+
+options {
+	output=AST;
+}
+
+tokens {
+	DOC_COMMENT;
+	PARSER;	
+    LEXER;
+    RULE;
+    BLOCK;
+    OPTIONAL;
+    CLOSURE;
+    POSITIVE_CLOSURE;
+    SYNPRED;
+    RANGE;
+    CHAR_RANGE;
+    EPSILON;
+    ALT;
+    EOR;
+    EOB;
+    EOA; // end of alt
+    ID;
+    ARG;
+    ARGLIST;
+    RET;
+    LEXER_GRAMMAR;
+    PARSER_GRAMMAR;
+    TREE_GRAMMAR;
+    COMBINED_GRAMMAR;
+    INITACTION;
+    LABEL; // $x used in rewrite rules
+    TEMPLATE;
+    SCOPE='scope';
+    SEMPRED;
+    GATED_SEMPRED; // {p}? =>
+    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
+    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
+    FRAGMENT='fragment';
+}
+
+ at header {
+package org.antlr.tool;
+import java.util.Map;
+import java.util.HashMap;
+}
+
+ at members {
+	Grammar grammar = null;
+	protected int gtype = 0;
+	protected String currentRuleName = null;
+	protected GrammarAST currentBlockAST = null;
+}
+
+grammarDef
+ at init {
+		for (int i=0; i<input.size(); i++) {
+			System.out.println(input.get(i));
+		}
+}
+    :   DOC_COMMENT?
+    	(	'lexer'  {gtype=LEXER_GRAMMAR;}    // pure lexer
+    	|   'parser' {gtype=PARSER_GRAMMAR;}   // pure parser
+    	|   'tree'   {gtype=TREE_GRAMMAR;}     // a tree parser
+    	|		     {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
+    	)
+    	'grammar' id ';' optionsSpec? tokensSpec? attrScope* action*
+    	rule+
+    	EOF
+    	-> ^('grammar' id DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action*)
+    ;
+
+tokensSpec
+	:	TOKENS tokenSpec+ '}' -> ^(TOKENS tokenSpec+)
+	;
+
+tokenSpec
+	:	TOKEN_REF
+		(	'=' (lit=STRING_LITERAL|lit=CHAR_LITERAL)	-> ^('=' TOKEN_REF $lit)
+		|												-> TOKEN_REF
+		)
+		';'
+	;
+
+attrScope
+	:	'scope' id ACTION -> ^('scope' id ACTION)
+	;
+
+/** Match stuff like @parser::members {int i;} */
+action
+	:	'@' (actionScopeName '::')? id ACTION
+		-> ^('@' actionScopeName? id ACTION)
+	;
+
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+actionScopeName
+	:	id
+	|	l='lexer'	-> ID[$l]
+    |   p='parser'	-> ID[$p]
+	;
+
+optionsSpec returns [Map opts]
+ at init {
+	$opts=new HashMap();
+}
+	:	OPTIONS (option[$opts] ';')+ '}'
+		-> ^(OPTIONS option+)
+	;
+
+option[Map opts]
+    :   id '=' v=optionValue {$opts.put($id.text, $v.value);}
+    	-> ^('=' id optionValue)
+ 	;
+ 	
+optionValue returns [Object value]
+ at init {$value=null;}
+    :   id			 	{$value = $id.text;}
+    |   STRING_LITERAL	{String vs = $STRING_LITERAL.text;
+                         $value=vs.substring(1,vs.length()-1);}
+    |   CHAR_LITERAL	{String vs = $CHAR_LITERAL.text;
+                         $value=vs.substring(1,vs.length()-1);}
+    |   INT				{$value = new Integer($INT.text);}
+    |	s='*'			{$value = '*';} -> STRING_LITERAL[$s]  // used for k=*
+    ;
+
+rule
+ at init {
+GrammarAST modifier=null, blk=null, blkRoot=null, eob=null;
+int start = ((TokenWithIndex)LT(1)).getIndex();
+int startLine = LT(1).getLine();
+GrammarAST opt = null;
+Map opts = null;
+}
+	:	DOC_COMMENT?
+		{modifier=input.LT(1);}
+		(	'protected'
+		|	'public'
+		|	'private'
+		|	frag='fragment'
+		)?
+		ruleName=id
+		{
+		currentRuleName=$ruleName.text;
+	    if ( gtype==LEXER_GRAMMAR && $frag==null ) {
+	        lexerRuleNames.add(currentRuleName);
+		}
+		}
+		'!'?
+		( arg=ARG_ACTION )?
+		( 'returns' rt=ARG_ACTION  )?
+		throwsSpec?
+	    optionsSpec?
+		ruleScopeSpec
+		ruleAction+
+		':'
+		b=altList[opts]
+		semi=';'
+		exceptionGroup?
+	    {
+	    /*
+	    int stop = ((TokenWithIndex)LT(1)).getIndex()-1; // point at the semi or exception thingie
+		eob.setLine(semi.getLine());
+		eob.setColumn(semi.getColumn());
+	    GrammarAST eor = #[EOR,'<end-of-rule>'];
+	   	eor.setEnclosingRule($ruleName.text);
+		eor.setLine(semi.getLine());
+		eor.setColumn(semi.getColumn());
+		GrammarAST root = #[RULE,'rule'];
+		root.ruleStartTokenIndex = start;
+		root.ruleStopTokenIndex = stop;
+		root.setLine(startLine);
+		root.options = opts;
+	    #rule = #(root,
+	              #ruleName,modifier,#(#[ARG,'ARG'],#aa),#(#[RET,'RET'],#rt),
+	              opt,#scopes,#a,blk,ex,eor);
+	              */
+		currentRuleName=null;
+	    }
+	    -> ^( RULE $ruleName {modifier} ^(ARG $arg)? ^(RET $rt)?
+	    	  optionsSpec? ruleScopeSpec? ruleAction+
+	    	  altList
+	    	  exceptionGroup?
+	    	  EOR["<end-of-rule>"]
+	    	)	    	  
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	'@' id ACTION -> ^('@' id ACTION)
+	;
+
+throwsSpec
+	:	'throws' id ( ',' id )* -> ^('throws' id+)
+	;
+
+ruleScopeSpec
+ at init {
+}
+	:	( 'scope' ACTION )?
+		( 'scope' id+ ';' )*
+		-> ^('scope' ACTION? id+)
+	;
+
+/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
+block
+ at init {
+GrammarAST save = currentBlockAST;
+Map opts=null;
+}
+ at after {
+$block.tree.setOptions(grammar,opts);
+}
+    :   lp='('
+		( (opts=optionsSpec)? ':' )?
+		{currentBlockAST = lp;}
+		a1=alternative rewrite
+		{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a1);}
+		( '|' a2=alternative rewrite
+		  {if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a2);}
+		)*
+        rp=')'
+        {
+		currentBlockAST = save;
+		}
+        -> ^( BLOCK[$lp] optionsSpec? alternative+ EOB[$rp] )
+    ;
+
+altList[Map opts]
+ at init {
+	GrammarAST blkRoot = #[BLOCK,'BLOCK'];
+	blkRoot.setLine(LT(1).getLine());
+	blkRoot.setColumn(LT(1).getColumn());
+	GrammarAST save = currentBlockAST;
+	currentBlockAST = #blkRoot;
+}
+    :   a1=alternative rewrite
+		{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a1.tree);}
+    	( '|' a2=alternative rewrite
+    	  {if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a2.tree);}
+    	)*
+        {
+        currentBlockAST = save;
+        }
+		-> ^( {blkRoot} (alternative rewrite)+ EOB["<end-of-block>"] )
+    ;
+
+alternative
+ at init {
+	Token firstToken = input.LT(1);
+}
+    :   ( el=element )+ -> ^(ALT[firstToken] element+ EOA["<end-of-alt>"])
+    |   -> ^(ALT[input.LT(1)] EPSILON[input.LT(-1)] EOA["<end-of-alt>"])
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ ( finallyClause )?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    'catch'^ ARG_ACTION ACTION
+    ;
+
+finallyClause
+    :    'finally'^ ACTION
+    ;
+
+element
+	:	elementNoOptionSpec
+	;
+
+elementNoOptionSpec
+ at init {
+    IntSet elements=null;
+    GrammarAST sub, sub2;
+}
+	:	id ('='^|'+='^) (atom|block)
+        ( sub=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub;} )?
+    |   atom
+        ( sub2=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub2;} )?
+    |	ebnf
+	|   ACTION
+	|   p=SEMPRED ( '=>' ! {#p.setType(GATED_SEMPRED);} )?
+		{
+		#p.setEnclosingRule(currentRuleName);
+		grammar.blocksWithSemPreds.add(currentBlockAST);
+		}
+	|   t3=treeSpec
+	;
+
+atom:   range ('^'^|'!'^)?
+    |   terminal
+    |	notSet ('^'^|'!'^)?
+    |   rr=RULE_REF^
+		( ARG_ACTION )?
+		('^'^|'!'^)?
+    ;
+
+notSet
+ at init {
+    int line = LT(1).getLine();
+    int col = LT(1).getColumn();
+    GrammarAST subrule=null;
+}
+	:	n='~'^
+		(	notTerminal
+        |   block
+		)
+        {#notSet.setLine(line); #notSet.setColumn(col);}
+	;
+
+treeSpec :
+	'^('^
+        element ( element )+
+    ')'!
+	;
+
+/** matches ENBF blocks (and sets via block rule) */
+ebnf
+ at init {
+    int line = LT(1).getLine();
+    int col = LT(1).getColumn();
+}
+	:	b=block
+		(	'?'    {#ebnf=#([OPTIONAL,'?'],#b);}
+		|	'*'	    {#ebnf=#([CLOSURE,'*'],#b);}
+		|	'+'	    {#ebnf=#([POSITIVE_CLOSURE,'+'],#b);}
+		|   '=>'! // syntactic predicate
+			{
+			if ( gtype==COMBINED_GRAMMAR &&
+			     Character.isUpperCase(currentRuleName.charAt(0)) )
+		    {
+                // ignore for lexer rules in combined
+		    	#ebnf = #(#[SYNPRED,'=>'],#b); 
+		    }
+		    else {
+		    	// create manually specified (...)=> predicate;
+                // convert to sempred
+		    	#ebnf = createSynSemPredFromBlock(#b, SYN_SEMPRED);
+			}
+			}
+		|   '^' {#ebnf = #(#ROOT, #b);}
+		|   '!' {#ebnf = #(#BANG, #b);}
+        |   {#ebnf = #b;}
+		)
+		{#ebnf.setLine(line); #ebnf.setColumn(col);}
+	;
+
+range!
+ at init {
+GrammarAST subrule=null, root=null;
+}
+	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL
+		{
+		GrammarAST r = #[CHAR_RANGE,".."];
+		r.setLine(c1.getLine());
+		r.setColumn(c1.getColumn());
+		#range = #(r, #c1, #c2);
+		root = #range;
+		}
+//    	(subrule=ebnfSuffix[root,false] {#range=subrule;})?
+	;
+
+terminal
+ at init {
+GrammarAST ebnfRoot=null, subrule=null;
+}
+    :   CHAR_LITERAL^ ('^'^|'!'^)?
+
+	|   TOKEN_REF^
+			( ARG_ACTION )? // Args are only valid for lexer rules
+            ('^'^|'!'^)?
+
+	|   STRING_LITERAL ('^'^|'!'^)?
+
+	|   '.' ('^'^|'!'^)?
+	;
+
+ebnfSuffix[GrammarAST elemAST, boolean inRewrite] returns [GrammarAST subrule=null]
+ at init {
+GrammarAST ebnfRoot=null;
+// bang on alt
+}
+	:	(	'?'	{ebnfRoot = #[OPTIONAL,'?'];}
+   		|	'*' {ebnfRoot = #[CLOSURE,'*'];}
+   		|	'+' {ebnfRoot = #[POSITIVE_CLOSURE,'+'];}
+   		)
+    	{
+		GrammarAST save = currentBlockAST;
+       	ebnfRoot.setLine(elemAST.getLine());
+       	ebnfRoot.setColumn(elemAST.getColumn());
+    	GrammarAST blkRoot = #[BLOCK,"BLOCK"];
+    	currentBlockAST = blkRoot;
+       	GrammarAST eob = #[EOB,'<end-of-block>'];
+		eob.setLine(elemAST.getLine());
+		eob.setColumn(elemAST.getColumn());
+		GrammarAST alt = #(#[ALT,'ALT'],elemAST,#[EOA,"<end-of-alt>"]);
+    	if ( !inRewrite ) {
+    		prefixWithSynPred(alt);
+    	}
+  		subrule =
+  		     #(ebnfRoot,
+  		       #(blkRoot,alt,eob)
+  		      );
+  		currentBlockAST = save;
+   		}
+    ;
+
+notTerminal
+	:   CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+
+
+// R E W R I T E  S Y N T A X
+
+rewrite
+ at init {
+    GrammarAST root = new GrammarAST();
+    // bang on alt
+}
+	:
+		( rew='->' pred=SEMPRED alt=rewrite_alternative
+	      {root.addChild( #(#rew, #pred, #alt) );}
+		  {
+          #pred.setEnclosingRule(currentRuleName);
+          #rew.setEnclosingRule(currentRuleName);
+          }
+	    )*
+		rew2='->' alt2=rewrite_alternative
+        {
+        root.addChild( #(#rew2, #alt2) );
+        #rewrite = (GrammarAST)root.getFirstChild();
+        }
+	|
+	;
+
+rewrite_block
+    :   lp='('^ {#lp.setType(BLOCK); #lp.setText('BLOCK');}
+		rewrite_alternative
+        ')'!
+        {
+        GrammarAST eob = #[EOB,"<end-of-block>"];
+        eob.setLine(lp.getLine());
+        eob.setColumn(lp.getColumn());
+        #rewrite_block.addChild(eob);
+        }
+    ;
+
+rewrite_alternative
+ at init {
+    GrammarAST eoa = #[EOA, "<end-of-alt>"];
+    GrammarAST altRoot = #[ALT,"ALT"];
+    altRoot.setLine(LT(1).getLine());
+    altRoot.setColumn(LT(1).getColumn());
+}
+    :	{grammar.buildTemplate()}? rewrite_template
+
+    |	{grammar.buildAST()}? ( rewrite_element )+
+        {
+            if ( #rewrite_alternative==null ) {
+                #rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);
+            }
+            else {
+                #rewrite_alternative = #(altRoot, #rewrite_alternative,eoa);
+            }
+        }
+
+   	|   {#rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);}
+    ;
+
+rewrite_element
+ at init {
+GrammarAST subrule=null;
+}
+	:	t=rewrite_atom
+    	( subrule=ebnfSuffix[#t,true] {#rewrite_element=subrule;} )?
+	|   rewrite_ebnf
+	|   tr=rewrite_tree
+    	( subrule=ebnfSuffix[#tr,true] {#rewrite_element=subrule;} )?
+	;
+
+rewrite_atom
+ at init {
+GrammarAST subrule=null;
+}
+    :   CHAR_LITERAL
+	|   TOKEN_REF^ (ARG_ACTION)? // for imaginary nodes
+    |   RULE_REF
+	|   STRING_LITERAL
+	|   // bang on this alt
+		d='$' i=id // reference to a label in a rewrite rule
+		{
+		#rewrite_atom = #[LABEL,i_AST.getText()];
+		#rewrite_atom.setLine(#d.getLine());
+		#rewrite_atom.setColumn(#d.getColumn());
+        #rewrite_atom.setEnclosingRule(currentRuleName);
+		}
+	|	ACTION
+	;
+
+rewrite_ebnf!
+ at init {
+    int line = LT(1).getLine();
+    int col = LT(1).getColumn();
+}
+	:	b=rewrite_block
+		(	'?'   {#rewrite_ebnf=#([OPTIONAL,'?'],#b);}
+		|	'*'	  {#rewrite_ebnf=#([CLOSURE,'*'],#b);}
+		|	'+'	  {#rewrite_ebnf=#([POSITIVE_CLOSURE,'+'],#b);}
+		)
+		{#rewrite_ebnf.setLine(line); #rewrite_ebnf.setColumn(col);}
+	;
+
+rewrite_tree :
+	'^(' rewrite_atom rewrite_element* ')' -> ^('^(' rewrite_atom rewrite_element* )
+	;
+
+/** Build a tree for a template rewrite:
+      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+    where ARGLIST is always there even if no args exist.
+    ID can be "template" keyword.  If first child is ACTION then it's
+    an indirect template ref
+
+    -> foo(a={...}, b={...})
+    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
+    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+rewrite_template
+ at init {Token st=null;}
+	:   // -> template(a={...},...) "..."
+		{LT(1).getText().equals('template')}? // inline
+		rewrite_template_head {st=LT(1);}
+		( DOUBLE_QUOTE_STRING_LITERAL! | DOUBLE_ANGLE_STRING_LITERAL! )
+		{#rewrite_template.addChild(#[st]);}
+
+	|	// -> foo(a={...}, ...)
+		rewrite_template_head
+
+	|	// -> ({expr})(a={...}, ...)
+		rewrite_indirect_template_head
+
+	|	// -> {...}
+		ACTION
+	;
+
+/** -> foo(a={...}, ...) */
+rewrite_template_head
+	:	id lp='('^ {#lp.setType(TEMPLATE); #lp.setText('TEMPLATE');}
+		rewrite_template_args
+		')'!
+	;
+
+/** -> ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	lp='('^ {#lp.setType(TEMPLATE); #lp.setText('TEMPLATE');}
+		ACTION
+		')'!
+		'('! rewrite_template_args ')'!
+	;
+
+rewrite_template_args
+	:	rewrite_template_arg (','! rewrite_template_arg)*
+		{#rewrite_template_args = #(#[ARGLIST,"ARGLIST"], rewrite_template_args);}
+	|	{#rewrite_template_args = #[ARGLIST,"ARGLIST"];}
+	;
+
+rewrite_template_arg
+	:   id a='=' ACTION -> ^(ARG[$a] id ACTION)
+	;
+
+idList
+	:	id+
+	;
+
+id	:	TOKEN_REF -> ID[$TOKEN_REF]
+	|	RULE_REF  -> ID[$RULE_REF]
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//'
+ 	 	(	' $ANTLR ' SRC // src directive
+ 		|	.*
+		)
+		'\r'? '\n'
+		{$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {if (input.LA(1)=='*') $type=DOC_COMMENT; else $channel=HIDDEN;} .* '*/'
+	;
+
+CHAR_LITERAL
+	:	'\'' LITERAL_CHAR '\''
+	;
+
+STRING_LITERAL
+	:	'\'' LITERAL_CHAR LITERAL_CHAR* '\''
+	;
+
+fragment
+LITERAL_CHAR
+	:	ESC
+	|	~('\''|'\\')
+	;
+
+DOUBLE_QUOTE_STRING_LITERAL
+	:	'"' LITERAL_CHAR* '"'
+	;
+
+DOUBLE_ANGLE_STRING_LITERAL
+	:	'<<' .* '>>'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+INT	:	'0'..'9'+
+	;
+
+ARG_ACTION
+	:	NESTED_ARG_ACTION
+	;
+
+fragment
+NESTED_ARG_ACTION :
+	'['!
+	(	options {greedy=false; k=1;}
+	:	NESTED_ARG_ACTION
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	']'!
+	;
+
+ACTION
+	:	NESTED_ACTION ( '?' {$type = SEMPRED;} )?
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=3;}
+	:	NESTED_ACTION
+//	|	DOC_COMMENT
+	|	SL_COMMENT
+	|	ML_COMMENT
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	'}'
+   ;
+
+fragment
+ACTION_CHAR_LITERAL
+	:	'\'' (ACTION_ESC|.) '\''
+	;
+
+fragment
+ACTION_STRING_LITERAL
+	:	'"' (ACTION_ESC|.) (ACTION_ESC|.)* '"'
+	;
+
+fragment
+ACTION_ESC
+	:	'\\\''
+	|	'\\"'
+	|	'\\' ~('\''|'"')
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+	
+OPTIONS
+	:	'options' WS_LOOP '{' {$channel=0;} // reset after WS call
+	;
+	
+TOKENS
+	:	'tokens' WS_LOOP '{' {$channel=0;} // reset after WS call
+	;
+
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+protected
+SRC	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
+		{
+		//setFilename($file.text.substring(1,$file.text.length()-1));
+		//setLine(Integer.parseInt($line.text)-1);  // -1 because SL_COMMENT will increment the line no. KR
+		$channel=HIDDEN;
+		}
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
+
+fragment
+WS_LOOP
+	:	(	WS
+		|	SL_COMMENT
+		|	ML_COMMENT
+		)*
+		{$channel=HIDDEN;}
+	;
+
diff --git a/src/org/antlr/tool/ActionAnalysis.g b/src/org/antlr/tool/ActionAnalysis.g
new file mode 100644
index 0000000..c07308a
--- /dev/null
+++ b/src/org/antlr/tool/ActionAnalysis.g
@@ -0,0 +1,129 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** We need to set Rule.referencedPredefinedRuleAttributes before
+ *  code generation.  This filter looks at an action in context of
+ *  its rule and outer alternative number and figures out which
+ *  rules have predefined prefs referenced.  I need this so I can
+ *  remove unusued labels.  This also tracks, for labeled rules,
+ *  which are referenced by actions.
+ */
+lexer grammar ActionAnalysis;
+options {
+  filter=true;  // try all non-fragment rules in order specified
+}
+
+ at header {
+package org.antlr.codegen;
+import org.antlr.runtime.*;
+import org.antlr.tool.*;
+}
+
+ at members {
+Rule enclosingRule;
+Grammar grammar;
+antlr.Token actionToken;
+int outerAltNum = 0;
+
+	public ActionAnalysisLexer(Grammar grammar, String ruleName, GrammarAST actionAST)
+	{
+		this(new ANTLRStringStream(actionAST.token.getText()));
+		this.grammar = grammar;
+	    this.enclosingRule = grammar.getRule(ruleName);
+	    this.actionToken = actionAST.token;
+	    this.outerAltNum = actionAST.outerAltNum;
+	}
+
+public void analyze() {
+	// System.out.println("###\naction="+actionToken);
+	Token t;
+	do {
+		t = nextToken();
+	} while ( t.getType()!= Token.EOF );
+}
+}
+
+/**	$x.y	x is enclosing rule or rule ref or rule label
+ *			y is a return value, parameter, or predefined property.
+ */
+X_Y :	'$' x=ID '.' y=ID {enclosingRule!=null}?
+		{
+		AttributeScope scope = null;
+		String refdRuleName = null;
+		if ( $x.text.equals(enclosingRule.name) ) {
+			// ref to enclosing rule.
+			refdRuleName = $x.text;
+			scope = enclosingRule.getLocalAttributeScope($y.text);
+		}
+		else if ( enclosingRule.getRuleLabel($x.text)!=null ) {
+			// ref to rule label
+			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel($x.text);
+			pair.actionReferencesLabel = true;
+			refdRuleName = pair.referencedRuleName;
+			Rule refdRule = grammar.getRule(refdRuleName);
+			scope = refdRule.getLocalAttributeScope($y.text);
+		}
+		else if ( enclosingRule.getRuleRefsInAlt(x.getText(), outerAltNum)!=null ) {
+			// ref to rule referenced in this alt
+			refdRuleName = $x.text;
+			Rule refdRule = grammar.getRule(refdRuleName);
+			scope = refdRule.getLocalAttributeScope($y.text);
+		}
+		if ( scope!=null &&
+			 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
+		{
+			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+			//System.out.println("referenceRuleLabelPredefinedAttribute for "+refdRuleName);
+		}
+		}
+	;
+
+/** $x	x is an isolated rule label.  Just record that the label was referenced */
+X	:	'$' x=ID {enclosingRule!=null && enclosingRule.getRuleLabel($x.text)!=null}?
+		{
+			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel($x.text);
+			pair.actionReferencesLabel = true;
+		}
+	;
+	
+/** $y	y is a return value, parameter, or predefined property of current rule */
+Y	:	'$' ID {enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null}?
+		{
+			AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
+			if ( scope!=null &&
+				 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
+			{
+				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+				//System.out.println("referenceRuleLabelPredefinedAttribute for "+$ID.text);
+			}
+		}
+	;
+	
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
diff --git a/src/org/antlr/tool/ActionAnalysis.tokens b/src/org/antlr/tool/ActionAnalysis.tokens
new file mode 100644
index 0000000..a788b35
--- /dev/null
+++ b/src/org/antlr/tool/ActionAnalysis.tokens
@@ -0,0 +1,5 @@
+X_Y=5
+Tokens=8
+Y=7
+ID=4
+X=6
diff --git a/src/org/antlr/tool/ActionAnalysisLexer.java b/src/org/antlr/tool/ActionAnalysisLexer.java
new file mode 100644
index 0000000..e7ece4b
--- /dev/null
+++ b/src/org/antlr/tool/ActionAnalysisLexer.java
@@ -0,0 +1,400 @@
+// $ANTLR 3.0b7 ActionAnalysis.g 2007-04-03 12:25:48
+
+package org.antlr.tool;
+import org.antlr.runtime.*;
+import org.antlr.tool.AttributeScope;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+import org.antlr.tool.Rule;
+
+import java.util.HashMap;
+/** We need to set Rule.referencedPredefinedRuleAttributes before
+ *  code generation.  This filter looks at an action in context of
+ *  its rule and outer alternative number and figures out which
+ *  rules have predefined prefs referenced.  I need this so I can
+ *  remove unusued labels.
+ */
+public class ActionAnalysisLexer extends Lexer {
+    public static final int X_Y=5;
+    public static final int EOF=-1;
+    public static final int Tokens=8;
+    public static final int Y=7;
+    public static final int ID=4;
+    public static final int X=6;
+
+    Rule enclosingRule;
+    Grammar grammar;
+    antlr.Token actionToken;
+    int outerAltNum = 0;
+
+    	public ActionAnalysisLexer(Grammar grammar, String ruleName, GrammarAST actionAST)
+    	{
+    		this(new ANTLRStringStream(actionAST.token.getText()));
+    		this.grammar = grammar;
+    	    this.enclosingRule = grammar.getRule(ruleName);
+    	    this.actionToken = actionAST.token;
+    	    this.outerAltNum = actionAST.outerAltNum;
+    	}
+
+    public void analyze() {
+    	// System.out.println("###\naction="+actionToken);
+    	Token t;
+    	do {
+    		t = nextToken();
+    	} while ( t.getType()!= Token.EOF );
+    }
+
+    public ActionAnalysisLexer() {;}
+    public ActionAnalysisLexer(CharStream input) {
+        super(input);
+        ruleMemo = new HashMap[7+1];
+     }
+    public String getGrammarFileName() { return "ActionAnalysis.g"; }
+
+    public Token nextToken() {
+        while (true) {
+            if ( input.LA(1)==CharStream.EOF ) {
+                return Token.EOF_TOKEN;
+            }
+            token = null;
+    	channel = Token.DEFAULT_CHANNEL;
+            tokenStartCharIndex = input.index();
+            tokenStartCharPositionInLine = input.getCharPositionInLine();
+            tokenStartLine = input.getLine();
+    	text = null;
+            try {
+                int m = input.mark();
+                backtracking=1;
+                failed=false;
+                mTokens();
+                backtracking=0;
+
+                if ( failed ) {
+                    input.rewind(m);
+                    input.consume();
+                }
+                else {
+                    emit();
+                    return token;
+                }
+            }
+            catch (RecognitionException re) {
+                // shouldn't happen in backtracking mode, but...
+                reportError(re);
+                recover(re);
+            }
+        }
+    }
+
+    public void memoize(IntStream input,
+    		int ruleIndex,
+    		int ruleStartIndex)
+    {
+    if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+    }
+
+    public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+    if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+    return false;
+    }// $ANTLR start X_Y
+    public final void mX_Y() throws RecognitionException {
+        try {
+            int _type = X_Y;
+            // ActionAnalysis.g:73:7: ( '$' x= ID '.' y= ID {...}?)
+            // ActionAnalysis.g:73:7: '$' x= ID '.' y= ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            match('.'); if (failed) return ;
+            int yStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
+            if ( !(enclosingRule!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "X_Y", "enclosingRule!=null");
+            }
+            if ( backtracking==1 ) {
+
+              		AttributeScope scope = null;
+              		String refdRuleName = null;
+              		if ( x.getText().equals(enclosingRule.name) ) {
+              			// ref to enclosing rule.
+              			refdRuleName = x.getText();
+              			scope = enclosingRule.getLocalAttributeScope(y.getText());
+              		}
+              		else if ( enclosingRule.getRuleLabel(x.getText())!=null ) {
+              			// ref to rule label
+              			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel(x.getText());
+              			pair.actionReferencesLabel = true;
+              			refdRuleName = pair.referencedRuleName;
+              			Rule refdRule = grammar.getRule(refdRuleName);
+              			scope = refdRule.getLocalAttributeScope(y.getText());
+              		}
+              		else if ( enclosingRule.getRuleRefsInAlt(x.getText(), outerAltNum)!=null ) {
+              			// ref to rule referenced in this alt
+              			refdRuleName = x.getText();
+              			Rule refdRule = grammar.getRule(refdRuleName);
+              			scope = refdRule.getLocalAttributeScope(y.getText());
+              		}
+              		if ( scope!=null &&
+              			 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
+              		{
+              			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+              			//System.out.println("referenceRuleLabelPredefinedAttribute for "+refdRuleName);
+              		}
+
+            }
+
+            }
+
+            this.type = _type;
+        }
+        finally {
+        }
+    }
+    // $ANTLR end X_Y
+
+    // $ANTLR start X
+    public final void mX() throws RecognitionException {
+        try {
+            int _type = X;
+            // ActionAnalysis.g:106:5: ( '$' x= ID {...}?)
+            // ActionAnalysis.g:106:5: '$' x= ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int xStart = getCharIndex();
+            mID(); if (failed) return ;
+            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
+            if ( !(enclosingRule!=null && enclosingRule.getRuleLabel(x.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "X", "enclosingRule!=null && enclosingRule.getRuleLabel($x.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel(x.getText());
+              			pair.actionReferencesLabel = true;
+
+            }
+
+            }
+
+            this.type = _type;
+        }
+        finally {
+        }
+    }
+    // $ANTLR end X
+
+    // $ANTLR start Y
+    public final void mY() throws RecognitionException {
+        try {
+            int _type = Y;
+            // ActionAnalysis.g:114:5: ( '$' ID {...}?)
+            // ActionAnalysis.g:114:5: '$' ID {...}?
+            {
+            match('$'); if (failed) return ;
+            int ID1Start = getCharIndex();
+            mID(); if (failed) return ;
+            Token ID1 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID1Start, getCharIndex()-1);
+            if ( !(enclosingRule!=null && enclosingRule.getLocalAttributeScope(ID1.getText())!=null) ) {
+                if (backtracking>0) {failed=true; return ;}
+                throw new FailedPredicateException(input, "Y", "enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null");
+            }
+            if ( backtracking==1 ) {
+
+              			AttributeScope scope = enclosingRule.getLocalAttributeScope(ID1.getText());
+              			if ( scope!=null &&
+              				 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
+              			{
+              				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+              				//System.out.println("referenceRuleLabelPredefinedAttribute for "+ID1.getText());
+              			}
+
+            }
+
+            }
+
+            this.type = _type;
+        }
+        finally {
+        }
+    }
+    // $ANTLR end Y
+
+    // $ANTLR start ID
+    public final void mID() throws RecognitionException {
+        try {
+            // ActionAnalysis.g:127:9: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* )
+            // ActionAnalysis.g:127:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
+            {
+            if ( (input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
+                input.consume();
+            failed=false;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                MismatchedSetException mse =
+                    new MismatchedSetException(null,input);
+                recover(mse);    throw mse;
+            }
+
+            // ActionAnalysis.g:127:33: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
+            loop1:
+            do {
+                int alt1=2;
+                int LA1_0 = input.LA(1);
+
+                if ( ((LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||LA1_0=='_'||(LA1_0>='a' && LA1_0<='z')) ) {
+                    alt1=1;
+                }
+
+
+                switch (alt1) {
+            	case 1 :
+            	    // ActionAnalysis.g:
+            	    {
+            	    if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
+            	        input.consume();
+            	    failed=false;
+            	    }
+            	    else {
+            	        if (backtracking>0) {failed=true; return ;}
+            	        MismatchedSetException mse =
+            	            new MismatchedSetException(null,input);
+            	        recover(mse);    throw mse;
+            	    }
+
+
+            	    }
+            	    break;
+
+            	default :
+            	    break loop1;
+                }
+            } while (true);
+
+
+            }
+
+        }
+        finally {
+        }
+    }
+    // $ANTLR end ID
+
+    public void mTokens() throws RecognitionException {
+        // ActionAnalysis.g:1:41: ( X_Y | X | Y )
+        int alt2=3;
+        int LA2_0 = input.LA(1);
+
+        if ( (LA2_0=='$') ) {
+            int LA2_1 = input.LA(2);
+
+            if ( (synpred1()) ) {
+                alt2=1;
+            }
+            else if ( (synpred2()) ) {
+                alt2=2;
+            }
+            else if ( (true) ) {
+                alt2=3;
+            }
+            else {
+                if (backtracking>0) {failed=true; return ;}
+                NoViableAltException nvae =
+                    new NoViableAltException("1:1: Tokens options {k=1; backtrack=true; } : ( X_Y | X | Y );", 2, 1, input);
+
+                throw nvae;
+            }
+        }
+        else {
+            if (backtracking>0) {failed=true; return ;}
+            NoViableAltException nvae =
+                new NoViableAltException("1:1: Tokens options {k=1; backtrack=true; } : ( X_Y | X | Y );", 2, 0, input);
+
+            throw nvae;
+        }
+        switch (alt2) {
+            case 1 :
+                // ActionAnalysis.g:1:41: X_Y
+                {
+                mX_Y(); if (failed) return ;
+
+                }
+                break;
+            case 2 :
+                // ActionAnalysis.g:1:45: X
+                {
+                mX(); if (failed) return ;
+
+                }
+                break;
+            case 3 :
+                // ActionAnalysis.g:1:47: Y
+                {
+                mY(); if (failed) return ;
+
+                }
+                break;
+
+        }
+
+    }
+
+    // $ANTLR start synpred1
+    public final void synpred1_fragment() throws RecognitionException {
+        // ActionAnalysis.g:1:41: ( X_Y )
+        // ActionAnalysis.g:1:41: X_Y
+        {
+        mX_Y(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred1
+
+    // $ANTLR start synpred2
+    public final void synpred2_fragment() throws RecognitionException {
+        // ActionAnalysis.g:1:45: ( X )
+        // ActionAnalysis.g:1:45: X
+        {
+        mX(); if (failed) return ;
+
+        }
+    }
+    // $ANTLR end synpred2
+
+    public final boolean synpred2() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred2_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+    public final boolean synpred1() {
+        backtracking++;
+        int start = input.mark();
+        try {
+            synpred1_fragment(); // can never throw exception
+        } catch (RecognitionException re) {
+            System.err.println("impossible: "+re);
+        }
+        boolean success = !failed;
+        input.rewind(start);
+        backtracking--;
+        failed=false;
+        return success;
+    }
+
+
+ 
+
+}
\ No newline at end of file
diff --git a/src/org/antlr/tool/AssignTokenTypesWalker.java b/src/org/antlr/tool/AssignTokenTypesWalker.java
new file mode 100644
index 0000000..fb747da
--- /dev/null
+++ b/src/org/antlr/tool/AssignTokenTypesWalker.java
@@ -0,0 +1,1949 @@
+// $ANTLR 2.7.7 (2006-01-29): "assign.types.g" -> "AssignTokenTypesWalker.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.analysis.*;
+	import org.antlr.misc.*;
+	import java.io.*;
+
+import antlr.TreeParser;
+import antlr.Token;
+import antlr.collections.AST;
+import antlr.RecognitionException;
+import antlr.ANTLRException;
+import antlr.NoViableAltException;
+import antlr.MismatchedTokenException;
+import antlr.SemanticException;
+import antlr.collections.impl.BitSet;
+import antlr.ASTPair;
+import antlr.collections.impl.ASTArray;
+
+
+/** [Warning: TJP says that this is probably out of date as of 11/19/2005,
+ *   but since it's probably still useful, I'll leave in.  Don't have energy
+ *   to update at the moment.]
+ *
+ *  Compute the token types for all literals and rules etc..  There are
+ *  a few different cases to consider for grammar types and a few situations
+ *  within.
+ *
+ *  CASE 1 : pure parser grammar
+ *	a) Any reference to a token gets a token type.
+ *  b) The tokens section may alias a token name to a string or char
+ *
+ *  CASE 2 : pure lexer grammar
+ *  a) Import token vocabulary if available. Set token types for any new tokens
+ *     to values above last imported token type
+ *  b) token rule definitions get token types if not already defined
+ *  c) literals do NOT get token types
+ *
+ *  CASE 3 : merged parser / lexer grammar
+ *	a) Any char or string literal gets a token type in a parser rule
+ *  b) Any reference to a token gets a token type if not referencing
+ *     a fragment lexer rule
+ *  c) The tokens section may alias a token name to a string or char
+ *     which must add a rule to the lexer
+ *  d) token rule definitions get token types if not already defined
+ *  e) token rule definitions may also alias a token name to a literal.
+ *     E.g., Rule 'FOR : "for";' will alias FOR to "for" in the sense that
+ *     references to either in the parser grammar will yield the token type
+ *
+ *  What this pass does:
+ *
+ *  0. Collects basic info about the grammar like grammar name and type;
+ *     Oh, I have go get the options in case they affect the token types.
+ *     E.g., tokenVocab option.
+ *     Imports any token vocab name/type pairs into a local hashtable.
+ *  1. Finds a list of all literals and token names.
+ *  2. Finds a list of all token name rule definitions;
+ *     no token rules implies pure parser.
+ *  3. Finds a list of all simple token rule defs of form "<NAME> : <literal>;"
+ *     and aliases them.
+ *  4. Walks token names table and assign types to any unassigned
+ *  5. Walks aliases and assign types to referenced literals
+ *  6. Walks literals, assigning types if untyped
+ *  4. Informs the Grammar object of the type definitions such as:
+ *     g.defineToken(<charliteral>, ttype);
+ *     g.defineToken(<stringliteral>, ttype);
+ *     g.defineToken(<tokenID>, ttype);
+ *     where some of the ttype values will be the same for aliases tokens.
+ */
+public class AssignTokenTypesWalker extends antlr.TreeParser       implements AssignTokenTypesWalkerTokenTypes
+ {
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "assign.types: "+ex.toString(),
+            ex);
+    }
+
+protected GrammarAST stringAlias;
+protected GrammarAST charAlias;
+protected GrammarAST stringAlias2;
+protected GrammarAST charAlias2;
+
+protected Grammar grammar;
+protected Map stringLiterals = new LinkedHashMap(); // Map<literal,Integer>
+protected Map tokens = new LinkedHashMap();         // Map<name,Integer>
+/** Track actual lexer rule defs so we don't get repeated token defs in 
+ *  generated lexer.
+ */
+protected Set tokenRuleDefs = new HashSet();        // Set<name>
+protected Map aliases = new LinkedHashMap();        // Map<name,literal>
+protected String currentRuleName;
+protected static final Integer UNASSIGNED = Utils.integer(-1);
+protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
+
+/** Track string literals in any non-lexer rule (could be in tokens{} section) */
+protected void trackString(GrammarAST t) {
+	// if lexer, don't allow aliasing in tokens section
+	if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
+		ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
+								  grammar,
+								  t.token,
+								  t.getText());
+		return;
+	}
+	// in a plain parser grammar rule, cannot reference literals
+	// (unless defined previously via tokenVocab option)
+	if ( grammar.type==Grammar.PARSER &&
+	     grammar.getTokenType(t.getText())==Label.INVALID )
+    {
+		ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
+								  grammar,
+								  t.token,
+								  t.getText());
+	}
+	// otherwise add literal to token types if referenced from parser rule
+	// or in the tokens{} section
+	if ( (currentRuleName==null ||
+         Character.isLowerCase(currentRuleName.charAt(0))) &&
+         grammar.getTokenType(t.getText())==Label.INVALID )
+	{
+		stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
+	}
+}
+
+protected void trackToken(GrammarAST t) {
+	// imported token names might exist, only add if new
+	if ( grammar.getTokenType(t.getText())==Label.INVALID ) {
+		tokens.put(t.getText(), UNASSIGNED);
+	}
+}
+
+protected void trackTokenRule(GrammarAST t,
+							  GrammarAST modifier,
+							  GrammarAST block)
+{
+	// imported token names might exist, only add if new
+	if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
+		if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
+			return;
+		}
+		int existing = grammar.getTokenType(t.getText());
+		if ( existing==Label.INVALID ) {
+			tokens.put(t.getText(), UNASSIGNED);
+		}
+		// look for "<TOKEN> : <literal> ;" pattern
+        // (can have optional action last)
+		if ( block.hasSameTreeStructure(charAlias) ||
+             block.hasSameTreeStructure(stringAlias) ||
+             block.hasSameTreeStructure(charAlias2) ||
+             block.hasSameTreeStructure(stringAlias2) )
+        {
+			alias(t, (GrammarAST)block.getFirstChild().getFirstChild());
+			tokenRuleDefs.add(t.getText());
+		}
+	}
+	// else error
+}
+
+protected void alias(GrammarAST t, GrammarAST s) {
+	aliases.put(t.getText(), s.getText());
+}
+
+protected void assignTypes() {
+	/*
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+	*/
+
+	assignTokenIDTypes();
+
+	aliasTokenIDsAndLiterals();
+
+	assignStringTypes();
+
+	/*
+	System.out.println("AFTER:");
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+	*/
+
+	notifyGrammarObject();
+}
+
+	protected void assignStringTypes() {
+		// walk string literals assigning types to unassigned ones
+		Set s = stringLiterals.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String lit = (String) it.next();
+			Integer oldTypeI = (Integer)stringLiterals.get(lit);
+			int oldType = oldTypeI.intValue();
+			if ( oldType<Label.MIN_TOKEN_TYPE ) {
+				Integer typeI = Utils.integer(grammar.getNewTokenType());
+				stringLiterals.put(lit, typeI);
+				// if string referenced in combined grammar parser rule,
+				// automatically define in the generated lexer
+				grammar.defineLexerRuleForStringLiteral(lit, typeI.intValue());
+			}
+		}
+	}
+
+	protected void aliasTokenIDsAndLiterals() {
+		if ( grammar.type==Grammar.LEXER ) {
+			return; // strings/chars are never token types in LEXER
+		}
+		// walk aliases if any and assign types to aliased literals if literal
+		// was referenced
+		Set s = aliases.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			String literal = (String)aliases.get(tokenID);
+			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
+				stringLiterals.put(literal, tokens.get(tokenID));
+				// an alias still means you need a lexer rule for it
+				Integer typeI = (Integer)tokens.get(tokenID);
+				if ( !tokenRuleDefs.contains(tokenID) ) {
+					grammar.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI.intValue());
+				}
+			}
+		}
+	}
+
+	protected void assignTokenIDTypes() {
+		// walk token names, assigning values if unassigned
+		Set s = tokens.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			if ( tokens.get(tokenID)==UNASSIGNED ) {
+				tokens.put(tokenID, Utils.integer(grammar.getNewTokenType()));
+			}
+		}
+	}
+
+	protected void notifyGrammarObject() {
+		Set s = tokens.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			int ttype = ((Integer)tokens.get(tokenID)).intValue();
+			grammar.defineToken(tokenID, ttype);
+		}
+		s = stringLiterals.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String lit = (String) it.next();
+			int ttype = ((Integer)stringLiterals.get(lit)).intValue();
+			grammar.defineToken(lit, ttype);
+		}
+	}
+
+	protected void init(Grammar g) {
+		this.grammar = g;
+        stringAlias = 
+            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(STRING_LITERAL)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
+        charAlias =
+            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(CHAR_LITERAL)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
+        stringAlias2 =
+            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(4)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(STRING_LITERAL)).add((GrammarAST)astFactory.create(ACTION)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
+        charAlias2 = 
+            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(4)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(CHAR_LITERAL)).add((GrammarAST)astFactory.create(ACTION)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
+	}
+public AssignTokenTypesWalker() {
+	tokenNames = _tokenNames;
+}
+
+	public final void grammar(AST _t,
+		Grammar g
+	) throws RecognitionException {
+		
+		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+			init(g);
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LEXER_GRAMMAR:
+			{
+				AST __t3 = _t;
+				GrammarAST tmp1_AST_in = (GrammarAST)_t;
+				match(_t,LEXER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammar.type = Grammar.LEXER;
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t3;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PARSER_GRAMMAR:
+			{
+				AST __t4 = _t;
+				GrammarAST tmp2_AST_in = (GrammarAST)_t;
+				match(_t,PARSER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammar.type = Grammar.PARSER;
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t4;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TREE_GRAMMAR:
+			{
+				AST __t5 = _t;
+				GrammarAST tmp3_AST_in = (GrammarAST)_t;
+				match(_t,TREE_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammar.type = Grammar.TREE_PARSER;
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t5;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case COMBINED_GRAMMAR:
+			{
+				AST __t6 = _t;
+				GrammarAST tmp4_AST_in = (GrammarAST)_t;
+				match(_t,COMBINED_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammar.type = Grammar.COMBINED;
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t6;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			assignTypes();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void grammarSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST cmt = null;
+		Map opts=null;
+		
+		try {      // for error handling
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			grammar.setName(id.getText());
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case DOC_COMMENT:
+			{
+				cmt = (GrammarAST)_t;
+				match(_t,DOC_COMMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case OPTIONS:
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKENS:
+			{
+				tokensSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop12:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==SCOPE)) {
+					attrScope(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop12;
+				}
+				
+			} while (true);
+			}
+			{
+			_loop14:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp5_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop14;
+				}
+				
+			} while (true);
+			}
+			rules(_t);
+			_t = _retTree;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final Map  optionsSpec(AST _t) throws RecognitionException {
+		Map opts=new HashMap();
+		
+		GrammarAST optionsSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t18 = _t;
+			GrammarAST tmp6_AST_in = (GrammarAST)_t;
+			match(_t,OPTIONS);
+			_t = _t.getFirstChild();
+			{
+			int _cnt20=0;
+			_loop20:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ASSIGN)) {
+					option(_t,opts);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt20>=1 ) { break _loop20; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt20++;
+			} while (true);
+			}
+			_t = __t18;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return opts;
+	}
+	
+	public final void tokensSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST tokensSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t30 = _t;
+			GrammarAST tmp7_AST_in = (GrammarAST)_t;
+			match(_t,TOKENS);
+			_t = _t.getFirstChild();
+			{
+			int _cnt32=0;
+			_loop32:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ASSIGN||_t.getType()==TOKEN_REF)) {
+					tokenSpec(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt32>=1 ) { break _loop32; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt32++;
+			} while (true);
+			}
+			_t = __t30;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void attrScope(AST _t) throws RecognitionException {
+		
+		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t16 = _t;
+			GrammarAST tmp8_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			GrammarAST tmp9_AST_in = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			GrammarAST tmp10_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t16;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rules(AST _t) throws RecognitionException {
+		
+		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			int _cnt38=0;
+			_loop38:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==RULE)) {
+					rule(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt38>=1 ) { break _loop38; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt38++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void option(AST _t,
+		Map opts
+	) throws RecognitionException {
+		
+		GrammarAST option_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		
+		String key=null;
+		Object value=null;
+		
+		
+		try {      // for error handling
+			AST __t22 = _t;
+			GrammarAST tmp11_AST_in = (GrammarAST)_t;
+			match(_t,ASSIGN);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			key=id.getText();
+			value=optionValue(_t);
+			_t = _retTree;
+			_t = __t22;
+			_t = _t.getNextSibling();
+			
+			opts.put(key,value);
+			// check for grammar-level option to import vocabulary
+			if ( currentRuleName==null && key.equals("tokenVocab") ) {
+			grammar.importTokenVocabulary((String)value);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final Object  optionValue(AST _t) throws RecognitionException {
+		Object value=null;
+		
+		GrammarAST optionValue_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST s = null;
+		GrammarAST c = null;
+		GrammarAST i = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ID:
+			{
+				id = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				value = id.getText();
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				value = s.getText();
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				value = c.getText();
+				break;
+			}
+			case INT:
+			{
+				i = (GrammarAST)_t;
+				match(_t,INT);
+				_t = _t.getNextSibling();
+				value = new Integer(i.getText());
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return value;
+	}
+	
+	public final void charSet(AST _t) throws RecognitionException {
+		
+		GrammarAST charSet_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t25 = _t;
+			GrammarAST tmp12_AST_in = (GrammarAST)_t;
+			match(_t,CHARSET);
+			_t = _t.getFirstChild();
+			charSetElement(_t);
+			_t = _retTree;
+			_t = __t25;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void charSetElement(AST _t) throws RecognitionException {
+		
+		GrammarAST charSetElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST c = null;
+		GrammarAST c1 = null;
+		GrammarAST c2 = null;
+		GrammarAST c3 = null;
+		GrammarAST c4 = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case OR:
+			{
+				AST __t27 = _t;
+				GrammarAST tmp13_AST_in = (GrammarAST)_t;
+				match(_t,OR);
+				_t = _t.getFirstChild();
+				c1 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				c2 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t27;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RANGE:
+			{
+				AST __t28 = _t;
+				GrammarAST tmp14_AST_in = (GrammarAST)_t;
+				match(_t,RANGE);
+				_t = _t.getFirstChild();
+				c3 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				c4 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t28;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tokenSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST tokenSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST t = null;
+		GrammarAST t2 = null;
+		GrammarAST s = null;
+		GrammarAST c = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKEN_REF:
+			{
+				t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				trackToken(t);
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t34 = _t;
+				GrammarAST tmp15_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				t2 = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				trackToken(t2);
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case STRING_LITERAL:
+				{
+					s = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getNextSibling();
+					trackString(s); alias(t2,s);
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					c = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getNextSibling();
+					trackString(c); alias(t2,c);
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t34;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rule(AST _t) throws RecognitionException {
+		
+		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST m = null;
+		GrammarAST b = null;
+		
+		try {      // for error handling
+			AST __t40 = _t;
+			GrammarAST tmp16_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			currentRuleName=id.getText();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				m = _t==ASTNULL ? null : (GrammarAST)_t;
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			GrammarAST tmp17_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				GrammarAST tmp18_AST_in = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RET:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			}
+			{
+			GrammarAST tmp19_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				GrammarAST tmp20_AST_in = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case OPTIONS:
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop49:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp21_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop49;
+				}
+				
+			} while (true);
+			}
+			b = _t==ASTNULL ? null : (GrammarAST)_t;
+			block(_t);
+			_t = _retTree;
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp22_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			trackTokenRule(id,m,b);
+			_t = __t40;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void modifier(AST _t) throws RecognitionException {
+		
+		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_protected:
+			{
+				GrammarAST tmp23_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_protected);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_public:
+			{
+				GrammarAST tmp24_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_public);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_private:
+			{
+				GrammarAST tmp25_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_private);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case FRAGMENT:
+			{
+				GrammarAST tmp26_AST_in = (GrammarAST)_t;
+				match(_t,FRAGMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ruleScopeSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t53 = _t;
+			GrammarAST tmp27_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ACTION:
+			{
+				GrammarAST tmp28_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			case ID:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop56:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ID)) {
+					GrammarAST tmp29_AST_in = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop56;
+				}
+				
+			} while (true);
+			}
+			_t = __t53;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void block(AST _t) throws RecognitionException {
+		
+		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t58 = _t;
+			GrammarAST tmp30_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case ALT:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			int _cnt61=0;
+			_loop61:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					alternative(_t);
+					_t = _retTree;
+					rewrite(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt61>=1 ) { break _loop61; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt61++;
+			} while (true);
+			}
+			GrammarAST tmp31_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t58;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void exceptionGroup(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			{
+				{
+				int _cnt68=0;
+				_loop68:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==LITERAL_catch)) {
+						exceptionHandler(_t);
+						_t = _retTree;
+					}
+					else {
+						if ( _cnt68>=1 ) { break _loop68; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt68++;
+				} while (true);
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case LITERAL_finally:
+				{
+					finallyClause(_t);
+					_t = _retTree;
+					break;
+				}
+				case EOR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				break;
+			}
+			case LITERAL_finally:
+			{
+				finallyClause(_t);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void alternative(AST _t) throws RecognitionException {
+		
+		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t63 = _t;
+			GrammarAST tmp32_AST_in = (GrammarAST)_t;
+			match(_t,ALT);
+			_t = _t.getFirstChild();
+			{
+			int _cnt65=0;
+			_loop65:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
+					element(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt65>=1 ) { break _loop65; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt65++;
+			} while (true);
+			}
+			GrammarAST tmp33_AST_in = (GrammarAST)_t;
+			match(_t,EOA);
+			_t = _t.getNextSibling();
+			_t = __t63;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			_loop79:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==REWRITE)) {
+					AST __t76 = _t;
+					GrammarAST tmp34_AST_in = (GrammarAST)_t;
+					match(_t,REWRITE);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case SEMPRED:
+					{
+						GrammarAST tmp35_AST_in = (GrammarAST)_t;
+						match(_t,SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case ALT:
+					case TEMPLATE:
+					case ACTION:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case ALT:
+					{
+						GrammarAST tmp36_AST_in = (GrammarAST)_t;
+						match(_t,ALT);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case TEMPLATE:
+					{
+						GrammarAST tmp37_AST_in = (GrammarAST)_t;
+						match(_t,TEMPLATE);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case ACTION:
+					{
+						GrammarAST tmp38_AST_in = (GrammarAST)_t;
+						match(_t,ACTION);
+						_t = _t.getNextSibling();
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t76;
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop79;
+				}
+				
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void element(AST _t) throws RecognitionException {
+		
+		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				AST __t81 = _t;
+				GrammarAST tmp39_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t81;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				AST __t82 = _t;
+				GrammarAST tmp40_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t82;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case WILDCARD:
+			{
+				atom(_t);
+				_t = _retTree;
+				break;
+			}
+			case NOT:
+			{
+				AST __t83 = _t;
+				GrammarAST tmp41_AST_in = (GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t83;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RANGE:
+			{
+				AST __t84 = _t;
+				GrammarAST tmp42_AST_in = (GrammarAST)_t;
+				match(_t,RANGE);
+				_t = _t.getFirstChild();
+				atom(_t);
+				_t = _retTree;
+				atom(_t);
+				_t = _retTree;
+				_t = __t84;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t85 = _t;
+				GrammarAST tmp43_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				atom(_t);
+				_t = _retTree;
+				atom(_t);
+				_t = _retTree;
+				_t = __t85;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t86 = _t;
+				GrammarAST tmp44_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				GrammarAST tmp45_AST_in = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				element(_t);
+				_t = _retTree;
+				_t = __t86;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PLUS_ASSIGN:
+			{
+				AST __t87 = _t;
+				GrammarAST tmp46_AST_in = (GrammarAST)_t;
+				match(_t,PLUS_ASSIGN);
+				_t = _t.getFirstChild();
+				GrammarAST tmp47_AST_in = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				element(_t);
+				_t = _retTree;
+				_t = __t87;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			{
+				ebnf(_t);
+				_t = _retTree;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				tree(_t);
+				_t = _retTree;
+				break;
+			}
+			case SYNPRED:
+			{
+				AST __t88 = _t;
+				GrammarAST tmp48_AST_in = (GrammarAST)_t;
+				match(_t,SYNPRED);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t88;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp49_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case SEMPRED:
+			{
+				GrammarAST tmp50_AST_in = (GrammarAST)_t;
+				match(_t,SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case SYN_SEMPRED:
+			{
+				GrammarAST tmp51_AST_in = (GrammarAST)_t;
+				match(_t,SYN_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BACKTRACK_SEMPRED:
+			{
+				GrammarAST tmp52_AST_in = (GrammarAST)_t;
+				match(_t,BACKTRACK_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case GATED_SEMPRED:
+			{
+				GrammarAST tmp53_AST_in = (GrammarAST)_t;
+				match(_t,GATED_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case EPSILON:
+			{
+				GrammarAST tmp54_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void exceptionHandler(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t71 = _t;
+			GrammarAST tmp55_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_catch);
+			_t = _t.getFirstChild();
+			GrammarAST tmp56_AST_in = (GrammarAST)_t;
+			match(_t,ARG_ACTION);
+			_t = _t.getNextSibling();
+			GrammarAST tmp57_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t71;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void finallyClause(AST _t) throws RecognitionException {
+		
+		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t73 = _t;
+			GrammarAST tmp58_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_finally);
+			_t = _t.getFirstChild();
+			GrammarAST tmp59_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t73;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void atom(AST _t) throws RecognitionException {
+		
+		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST t = null;
+		GrammarAST c = null;
+		GrammarAST s = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case RULE_REF:
+			{
+				GrammarAST tmp60_AST_in = (GrammarAST)_t;
+				match(_t,RULE_REF);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TOKEN_REF:
+			{
+				t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				trackToken(t);
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				trackString(c);
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				trackString(s);
+				break;
+			}
+			case WILDCARD:
+			{
+				GrammarAST tmp61_AST_in = (GrammarAST)_t;
+				match(_t,WILDCARD);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ebnf(AST _t) throws RecognitionException {
+		
+		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case BLOCK:
+			{
+				block(_t);
+				_t = _retTree;
+				break;
+			}
+			case OPTIONAL:
+			{
+				AST __t90 = _t;
+				GrammarAST tmp62_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t90;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CLOSURE:
+			{
+				AST __t91 = _t;
+				GrammarAST tmp63_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t91;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				AST __t92 = _t;
+				GrammarAST tmp64_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t92;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tree(AST _t) throws RecognitionException {
+		
+		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t94 = _t;
+			GrammarAST tmp65_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			element(_t);
+			_t = _retTree;
+			{
+			_loop96:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
+					element(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop96;
+				}
+				
+			} while (true);
+			}
+			_t = __t94;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ast_suffix(AST _t) throws RecognitionException {
+		
+		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				GrammarAST tmp66_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				GrammarAST tmp67_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	
+	public static final String[] _tokenNames = {
+		"<0>",
+		"EOF",
+		"<2>",
+		"NULL_TREE_LOOKAHEAD",
+		"\"options\"",
+		"\"tokens\"",
+		"\"parser\"",
+		"LEXER",
+		"RULE",
+		"BLOCK",
+		"OPTIONAL",
+		"CLOSURE",
+		"POSITIVE_CLOSURE",
+		"SYNPRED",
+		"RANGE",
+		"CHAR_RANGE",
+		"EPSILON",
+		"ALT",
+		"EOR",
+		"EOB",
+		"EOA",
+		"ID",
+		"ARG",
+		"ARGLIST",
+		"RET",
+		"LEXER_GRAMMAR",
+		"PARSER_GRAMMAR",
+		"TREE_GRAMMAR",
+		"COMBINED_GRAMMAR",
+		"INITACTION",
+		"LABEL",
+		"TEMPLATE",
+		"\"scope\"",
+		"GATED_SEMPRED",
+		"SYN_SEMPRED",
+		"BACKTRACK_SEMPRED",
+		"\"fragment\"",
+		"ACTION",
+		"DOC_COMMENT",
+		"SEMI",
+		"\"lexer\"",
+		"\"tree\"",
+		"\"grammar\"",
+		"AMPERSAND",
+		"COLON",
+		"RCURLY",
+		"ASSIGN",
+		"STRING_LITERAL",
+		"CHAR_LITERAL",
+		"INT",
+		"STAR",
+		"TOKEN_REF",
+		"\"protected\"",
+		"\"public\"",
+		"\"private\"",
+		"BANG",
+		"ARG_ACTION",
+		"\"returns\"",
+		"\"throws\"",
+		"COMMA",
+		"LPAREN",
+		"OR",
+		"RPAREN",
+		"\"catch\"",
+		"\"finally\"",
+		"PLUS_ASSIGN",
+		"SEMPRED",
+		"IMPLIES",
+		"ROOT",
+		"RULE_REF",
+		"NOT",
+		"TREE_BEGIN",
+		"QUESTION",
+		"PLUS",
+		"WILDCARD",
+		"REWRITE",
+		"DOLLAR",
+		"DOUBLE_QUOTE_STRING_LITERAL",
+		"DOUBLE_ANGLE_STRING_LITERAL",
+		"WS",
+		"COMMENT",
+		"SL_COMMENT",
+		"ML_COMMENT",
+		"OPEN_ELEMENT_OPTION",
+		"CLOSE_ELEMENT_OPTION",
+		"ESC",
+		"DIGIT",
+		"XDIGIT",
+		"NESTED_ARG_ACTION",
+		"NESTED_ACTION",
+		"ACTION_CHAR_LITERAL",
+		"ACTION_STRING_LITERAL",
+		"ACTION_ESC",
+		"WS_LOOP",
+		"INTERNAL_RULE_REF",
+		"WS_OPT",
+		"SRC",
+		"CHARSET"
+	};
+	
+	}
+	
diff --git a/src/org/antlr/tool/AssignTokenTypesWalker.smap b/src/org/antlr/tool/AssignTokenTypesWalker.smap
new file mode 100644
index 0000000..8599d98
--- /dev/null
+++ b/src/org/antlr/tool/AssignTokenTypesWalker.smap
@@ -0,0 +1,1403 @@
+SMAP
+AssignTokenTypesWalker.java
+G
+*S G
+*F
++ 0 assign.types.g
+assign.types.g
+*L
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+1:32
+1:33
+1:34
+94:101
+95:102
+96:103
+97:104
+98:105
+99:106
+100:107
+101:108
+102:109
+103:110
+104:111
+105:112
+106:113
+107:114
+108:115
+110:117
+111:118
+112:119
+113:120
+115:122
+116:123
+117:124
+118:125
+119:126
+120:127
+121:128
+122:129
+123:130
+124:131
+125:132
+127:134
+128:135
+129:136
+130:137
+131:138
+132:139
+133:140
+134:141
+135:142
+136:143
+137:144
+138:145
+139:146
+140:147
+141:148
+142:149
+143:150
+144:151
+145:152
+146:153
+147:154
+148:155
+149:156
+150:157
+151:158
+152:159
+153:160
+154:161
+155:162
+157:164
+158:165
+159:166
+160:167
+161:168
+162:169
+164:171
+165:172
+166:173
+167:174
+168:175
+169:176
+170:177
+171:178
+172:179
+173:180
+174:181
+175:182
+176:183
+177:184
+178:185
+179:186
+180:187
+181:188
+182:189
+183:190
+184:191
+185:192
+186:193
+187:194
+188:195
+189:196
+191:198
+192:199
+193:200
+195:202
+196:203
+197:204
+198:205
+199:206
+200:207
+202:209
+204:211
+206:213
+208:215
+209:216
+210:217
+211:218
+212:219
+213:220
+215:222
+216:223
+218:225
+219:226
+220:227
+221:228
+222:229
+223:230
+224:231
+225:232
+226:233
+227:234
+228:235
+229:236
+230:237
+231:238
+232:239
+233:240
+235:242
+236:243
+237:244
+238:245
+239:246
+240:247
+241:248
+242:249
+243:250
+244:251
+245:252
+246:253
+247:254
+248:255
+249:256
+250:257
+251:258
+252:259
+253:260
+254:261
+256:263
+257:264
+258:265
+259:266
+260:267
+261:268
+262:269
+263:270
+264:271
+265:272
+267:274
+268:275
+269:276
+270:277
+271:278
+272:279
+273:280
+274:281
+275:282
+276:283
+277:284
+278:285
+279:286
+280:287
+282:289
+283:290
+284:291
+285:292
+286:293
+287:294
+288:295
+289:296
+290:297
+291:298
+292:299
+295:304
+295:305
+295:306
+295:313
+295:376
+295:377
+295:378
+295:379
+295:380
+295:381
+295:382
+296:310
+299:315
+299:316
+299:317
+299:318
+299:319
+299:320
+299:321
+299:322
+299:323
+299:324
+299:325
+299:326
+299:327
+299:369
+299:370
+299:371
+299:372
+299:373
+300:330
+300:331
+300:332
+300:333
+300:334
+300:335
+300:336
+300:337
+300:338
+300:339
+300:340
+301:343
+301:344
+301:345
+301:346
+301:347
+301:348
+301:349
+301:350
+301:351
+301:352
+301:353
+302:356
+302:357
+302:358
+302:359
+302:360
+302:361
+302:362
+302:363
+302:364
+302:365
+302:366
+304:375
+307:384
+307:389
+307:391
+307:494
+307:495
+307:496
+307:497
+307:498
+307:499
+307:500
+309:387
+309:392
+309:393
+309:394
+309:395
+310:388
+310:397
+310:398
+310:399
+310:400
+310:401
+310:402
+310:403
+310:414
+310:415
+310:416
+310:417
+310:418
+311:421
+311:422
+311:423
+311:424
+311:425
+311:426
+311:436
+311:437
+311:438
+311:439
+311:440
+312:443
+312:444
+312:445
+312:446
+312:447
+312:448
+312:457
+312:458
+312:459
+312:460
+312:461
+313:463
+313:464
+313:465
+313:466
+313:467
+313:468
+313:469
+313:470
+313:471
+313:472
+313:473
+313:475
+313:476
+314:477
+314:478
+314:479
+314:480
+314:481
+314:482
+314:483
+314:484
+314:485
+314:486
+314:487
+314:488
+314:490
+314:491
+315:492
+315:493
+318:574
+318:578
+318:591
+318:592
+318:593
+318:594
+318:595
+318:596
+318:597
+319:579
+319:580
+319:581
+319:582
+319:583
+319:584
+319:585
+319:586
+319:587
+319:588
+319:589
+319:590
+322:502
+322:503
+322:507
+322:530
+322:531
+322:532
+322:533
+322:534
+322:535
+322:536
+322:537
+323:508
+323:509
+323:510
+323:511
+323:513
+323:514
+323:515
+323:516
+323:517
+323:518
+323:519
+323:520
+323:521
+323:522
+323:523
+323:525
+323:526
+323:527
+323:528
+323:529
+326:628
+326:629
+326:630
+326:639
+326:659
+326:660
+326:661
+326:662
+326:663
+326:664
+326:665
+327:635
+328:636
+331:633
+331:640
+331:641
+331:642
+331:643
+331:644
+331:645
+331:646
+331:647
+331:648
+331:649
+331:650
+331:651
+333:653
+334:654
+335:655
+336:656
+337:657
+341:667
+341:668
+341:676
+341:677
+341:678
+341:711
+341:712
+341:713
+341:714
+341:715
+341:716
+341:717
+341:718
+341:719
+341:720
+341:721
+341:722
+341:723
+342:671
+342:679
+342:680
+342:681
+342:682
+342:683
+342:684
+343:672
+343:687
+343:688
+343:689
+343:690
+343:691
+343:692
+344:673
+344:695
+344:696
+344:697
+344:698
+344:699
+344:700
+345:674
+345:703
+345:704
+345:705
+345:706
+345:707
+345:708
+349:725
+349:729
+349:738
+349:739
+349:740
+349:741
+349:742
+349:743
+349:744
+350:730
+350:731
+350:732
+350:733
+350:734
+350:735
+350:736
+350:737
+353:746
+353:755
+353:756
+353:757
+353:797
+353:798
+353:799
+353:800
+353:801
+353:802
+353:803
+353:804
+353:805
+353:806
+353:807
+353:808
+354:749
+354:758
+354:759
+354:760
+354:761
+354:762
+355:750
+355:751
+355:765
+355:766
+355:767
+355:768
+355:769
+355:770
+355:771
+355:772
+355:773
+355:774
+355:775
+355:776
+355:777
+355:778
+356:752
+356:753
+356:781
+356:782
+356:783
+356:784
+356:785
+356:786
+356:787
+356:788
+356:789
+356:790
+356:791
+356:792
+356:793
+356:794
+359:539
+359:543
+359:566
+359:567
+359:568
+359:569
+359:570
+359:571
+359:572
+360:544
+360:545
+360:546
+360:547
+360:549
+360:550
+360:551
+360:552
+360:553
+360:554
+360:555
+360:556
+360:557
+360:558
+360:559
+360:561
+360:562
+360:563
+360:564
+360:565
+363:810
+363:818
+363:819
+363:820
+363:868
+363:869
+363:870
+363:871
+363:872
+363:873
+363:874
+363:875
+363:876
+363:877
+363:878
+363:879
+364:813
+364:821
+364:822
+364:823
+364:824
+364:825
+364:826
+365:829
+365:830
+365:831
+365:832
+365:833
+365:834
+365:864
+365:865
+366:814
+366:835
+366:836
+366:837
+366:838
+367:815
+367:840
+367:841
+367:842
+367:843
+367:844
+367:845
+367:846
+367:847
+367:858
+367:859
+367:860
+367:861
+367:862
+368:816
+368:850
+368:851
+368:852
+368:853
+368:854
+368:855
+373:599
+373:603
+373:620
+373:621
+373:622
+373:623
+373:624
+373:625
+373:626
+374:605
+374:606
+374:607
+374:608
+374:609
+374:610
+374:611
+374:612
+374:613
+374:614
+374:615
+374:617
+374:618
+374:619
+377:881
+377:888
+377:1058
+377:1059
+377:1060
+377:1061
+377:1062
+377:1063
+377:1064
+378:884
+378:889
+378:890
+378:891
+378:892
+378:893
+378:894
+378:895
+378:896
+378:1056
+378:1057
+379:885
+379:898
+379:899
+379:900
+379:901
+379:902
+379:903
+379:904
+379:905
+379:906
+379:907
+379:914
+379:915
+379:916
+379:917
+379:918
+380:921
+380:922
+380:923
+380:925
+380:926
+380:927
+380:928
+380:929
+380:930
+380:931
+380:938
+380:939
+380:940
+380:941
+380:942
+381:946
+381:947
+381:948
+381:950
+381:951
+381:952
+381:953
+381:954
+381:955
+381:956
+381:966
+381:967
+381:968
+381:969
+381:970
+382:974
+382:975
+382:976
+382:977
+382:978
+382:979
+382:988
+382:989
+382:990
+382:991
+382:992
+383:995
+383:996
+383:997
+383:998
+383:999
+383:1000
+383:1008
+383:1009
+383:1010
+383:1011
+383:1012
+384:1014
+384:1015
+384:1016
+384:1017
+384:1018
+384:1019
+384:1020
+384:1021
+384:1022
+384:1023
+384:1024
+384:1025
+384:1027
+384:1028
+385:886
+385:1029
+385:1030
+385:1031
+386:1033
+386:1034
+386:1035
+386:1036
+386:1037
+386:1038
+386:1039
+386:1046
+386:1047
+386:1048
+386:1049
+386:1050
+387:1052
+387:1053
+387:1054
+388:1055
+392:1066
+392:1070
+392:1071
+392:1072
+392:1101
+392:1102
+392:1103
+392:1104
+392:1105
+392:1106
+392:1107
+392:1108
+392:1109
+392:1110
+392:1111
+392:1112
+393:1073
+393:1074
+393:1075
+393:1076
+393:1077
+394:1080
+394:1081
+394:1082
+394:1083
+394:1084
+395:1087
+395:1088
+395:1089
+395:1090
+395:1091
+396:1094
+396:1095
+396:1096
+396:1097
+396:1098
+399:1114
+399:1118
+399:1161
+399:1162
+399:1163
+399:1164
+399:1165
+399:1166
+399:1167
+400:1119
+400:1120
+400:1121
+400:1122
+400:1124
+400:1125
+400:1126
+400:1127
+400:1128
+400:1129
+400:1130
+400:1138
+400:1139
+400:1140
+400:1141
+400:1142
+400:1144
+400:1145
+400:1146
+400:1147
+400:1148
+400:1149
+400:1150
+400:1151
+400:1152
+400:1153
+400:1154
+400:1155
+400:1157
+400:1158
+400:1159
+400:1160
+403:1169
+403:1173
+403:1220
+403:1221
+403:1222
+403:1223
+403:1224
+403:1225
+403:1226
+404:1174
+404:1175
+404:1176
+404:1177
+404:1218
+404:1219
+405:1179
+405:1180
+405:1181
+405:1182
+405:1183
+405:1184
+405:1191
+405:1192
+405:1193
+405:1194
+405:1195
+406:1198
+406:1199
+406:1200
+406:1201
+406:1202
+406:1203
+406:1204
+406:1205
+406:1206
+406:1207
+406:1208
+406:1209
+406:1210
+406:1212
+406:1213
+406:1214
+407:1215
+407:1216
+407:1217
+411:1293
+411:1297
+411:1323
+411:1324
+411:1325
+411:1326
+411:1327
+411:1328
+411:1329
+412:1298
+412:1299
+412:1300
+412:1301
+412:1303
+412:1304
+412:1305
+412:1306
+412:1307
+412:1308
+412:1309
+412:1310
+412:1311
+412:1312
+412:1313
+412:1315
+412:1316
+412:1317
+412:1318
+412:1319
+412:1320
+412:1321
+412:1322
+415:1228
+415:1232
+415:1233
+415:1234
+415:1280
+415:1281
+415:1282
+415:1283
+415:1284
+415:1285
+415:1286
+415:1287
+415:1288
+415:1289
+415:1290
+415:1291
+416:1235
+416:1236
+416:1238
+416:1239
+416:1240
+416:1241
+416:1242
+416:1243
+416:1244
+416:1245
+416:1246
+416:1247
+416:1248
+416:1250
+416:1251
+416:1252
+416:1254
+416:1255
+416:1256
+416:1257
+416:1258
+416:1259
+416:1266
+416:1267
+416:1268
+416:1269
+416:1270
+417:1274
+417:1275
+417:1276
+417:1277
+420:1607
+420:1611
+420:1624
+420:1625
+420:1626
+420:1627
+420:1628
+420:1629
+420:1630
+421:1612
+421:1613
+421:1614
+421:1615
+421:1616
+421:1617
+421:1618
+421:1619
+421:1620
+421:1621
+421:1622
+421:1623
+424:1632
+424:1636
+424:1646
+424:1647
+424:1648
+424:1649
+424:1650
+424:1651
+424:1652
+425:1637
+425:1638
+425:1639
+425:1640
+425:1641
+425:1642
+425:1643
+425:1644
+425:1645
+428:1331
+428:1335
+428:1406
+428:1407
+428:1408
+428:1409
+428:1410
+428:1411
+428:1412
+429:1336
+429:1337
+429:1338
+429:1339
+429:1340
+429:1341
+429:1342
+429:1343
+429:1344
+429:1346
+429:1347
+429:1348
+429:1349
+429:1350
+429:1351
+429:1352
+429:1361
+429:1362
+429:1363
+429:1364
+429:1365
+429:1368
+429:1369
+429:1370
+429:1371
+429:1372
+429:1373
+429:1374
+429:1377
+429:1378
+429:1379
+429:1380
+429:1381
+429:1384
+429:1385
+429:1386
+429:1387
+429:1388
+429:1391
+429:1392
+429:1393
+429:1394
+429:1395
+429:1397
+429:1398
+429:1399
+429:1400
+429:1401
+429:1402
+429:1404
+429:1405
+432:1414
+432:1418
+432:1419
+432:1420
+432:1594
+432:1595
+432:1596
+432:1597
+432:1598
+432:1599
+432:1600
+432:1601
+432:1602
+432:1603
+432:1604
+432:1605
+433:1421
+433:1422
+433:1423
+433:1424
+433:1425
+433:1426
+433:1427
+433:1428
+433:1429
+433:1430
+434:1433
+434:1434
+434:1435
+434:1436
+434:1437
+434:1438
+434:1439
+434:1440
+434:1441
+434:1442
+435:1445
+435:1446
+435:1447
+435:1448
+435:1449
+435:1450
+435:1451
+435:1452
+436:1455
+436:1456
+436:1457
+436:1458
+436:1459
+436:1460
+436:1461
+436:1462
+436:1463
+436:1464
+437:1467
+437:1468
+437:1469
+437:1470
+437:1471
+437:1472
+437:1473
+437:1474
+437:1475
+437:1476
+437:1477
+437:1478
+438:1481
+438:1482
+438:1483
+438:1484
+438:1485
+438:1486
+438:1487
+438:1488
+438:1489
+438:1490
+438:1491
+438:1492
+439:1495
+439:1496
+439:1497
+439:1498
+439:1499
+439:1500
+439:1501
+439:1502
+439:1503
+439:1504
+439:1505
+439:1506
+439:1507
+440:1510
+440:1511
+440:1512
+440:1513
+440:1514
+440:1515
+440:1516
+440:1517
+440:1518
+440:1519
+440:1520
+440:1521
+440:1522
+441:1525
+441:1526
+441:1527
+441:1528
+441:1529
+441:1530
+441:1531
+442:1534
+442:1535
+442:1536
+442:1537
+443:1540
+443:1541
+443:1542
+443:1543
+443:1544
+443:1545
+443:1546
+443:1547
+443:1548
+443:1549
+444:1552
+444:1553
+444:1554
+444:1555
+444:1556
+445:1559
+445:1560
+445:1561
+445:1562
+445:1563
+446:1566
+446:1567
+446:1568
+446:1569
+446:1570
+447:1573
+447:1574
+447:1575
+447:1576
+447:1577
+448:1580
+448:1581
+448:1582
+448:1583
+448:1584
+449:1587
+449:1588
+449:1589
+449:1590
+449:1591
+452:1715
+452:1719
+452:1720
+452:1721
+452:1722
+452:1723
+452:1724
+452:1725
+452:1764
+452:1765
+452:1766
+452:1767
+452:1768
+452:1769
+452:1770
+452:1771
+452:1772
+452:1773
+452:1774
+452:1775
+453:1728
+453:1729
+453:1730
+453:1731
+453:1732
+453:1733
+453:1734
+453:1735
+453:1736
+453:1737
+454:1740
+454:1741
+454:1742
+454:1743
+454:1744
+454:1745
+454:1746
+454:1747
+454:1748
+454:1749
+455:1752
+455:1753
+455:1754
+455:1755
+455:1756
+455:1757
+455:1758
+455:1759
+455:1760
+455:1761
+458:1777
+458:1781
+458:1782
+458:1783
+458:1784
+458:1785
+458:1786
+458:1787
+458:1788
+458:1789
+458:1790
+458:1791
+458:1792
+458:1793
+458:1794
+458:1795
+458:1796
+458:1797
+458:1798
+458:1800
+458:1801
+458:1802
+458:1803
+458:1804
+458:1805
+458:1806
+458:1807
+458:1808
+458:1809
+458:1810
+461:1654
+461:1661
+461:1662
+461:1663
+461:1702
+461:1703
+461:1704
+461:1705
+461:1706
+461:1707
+461:1708
+461:1709
+461:1710
+461:1711
+461:1712
+461:1713
+462:1664
+462:1665
+462:1666
+462:1667
+462:1668
+463:1657
+463:1671
+463:1672
+463:1673
+463:1674
+463:1675
+463:1676
+464:1658
+464:1679
+464:1680
+464:1681
+464:1682
+464:1683
+464:1684
+465:1659
+465:1687
+465:1688
+465:1689
+465:1690
+465:1691
+465:1692
+466:1695
+466:1696
+466:1697
+466:1698
+466:1699
+469:1812
+469:1816
+469:1817
+469:1818
+469:1833
+469:1834
+469:1835
+469:1836
+469:1837
+469:1838
+469:1839
+469:1840
+469:1841
+469:1842
+469:1843
+469:1844
+470:1819
+470:1820
+470:1821
+470:1822
+470:1823
+471:1826
+471:1827
+471:1828
+471:1829
+471:1830
+*E
diff --git a/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.java b/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.java
new file mode 100644
index 0000000..bbbc644
--- /dev/null
+++ b/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.java
@@ -0,0 +1,133 @@
+// $ANTLR 2.7.7 (2006-01-29): "assign.types.g" -> "AssignTokenTypesWalker.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.analysis.*;
+	import org.antlr.misc.*;
+	import java.io.*;
+
+public interface AssignTokenTypesWalkerTokenTypes {
+	int EOF = 1;
+	int NULL_TREE_LOOKAHEAD = 3;
+	int OPTIONS = 4;
+	int TOKENS = 5;
+	int PARSER = 6;
+	int LEXER = 7;
+	int RULE = 8;
+	int BLOCK = 9;
+	int OPTIONAL = 10;
+	int CLOSURE = 11;
+	int POSITIVE_CLOSURE = 12;
+	int SYNPRED = 13;
+	int RANGE = 14;
+	int CHAR_RANGE = 15;
+	int EPSILON = 16;
+	int ALT = 17;
+	int EOR = 18;
+	int EOB = 19;
+	int EOA = 20;
+	int ID = 21;
+	int ARG = 22;
+	int ARGLIST = 23;
+	int RET = 24;
+	int LEXER_GRAMMAR = 25;
+	int PARSER_GRAMMAR = 26;
+	int TREE_GRAMMAR = 27;
+	int COMBINED_GRAMMAR = 28;
+	int INITACTION = 29;
+	int LABEL = 30;
+	int TEMPLATE = 31;
+	int SCOPE = 32;
+	int GATED_SEMPRED = 33;
+	int SYN_SEMPRED = 34;
+	int BACKTRACK_SEMPRED = 35;
+	int FRAGMENT = 36;
+	int ACTION = 37;
+	int DOC_COMMENT = 38;
+	int SEMI = 39;
+	int LITERAL_lexer = 40;
+	int LITERAL_tree = 41;
+	int LITERAL_grammar = 42;
+	int AMPERSAND = 43;
+	int COLON = 44;
+	int RCURLY = 45;
+	int ASSIGN = 46;
+	int STRING_LITERAL = 47;
+	int CHAR_LITERAL = 48;
+	int INT = 49;
+	int STAR = 50;
+	int TOKEN_REF = 51;
+	int LITERAL_protected = 52;
+	int LITERAL_public = 53;
+	int LITERAL_private = 54;
+	int BANG = 55;
+	int ARG_ACTION = 56;
+	int LITERAL_returns = 57;
+	int LITERAL_throws = 58;
+	int COMMA = 59;
+	int LPAREN = 60;
+	int OR = 61;
+	int RPAREN = 62;
+	int LITERAL_catch = 63;
+	int LITERAL_finally = 64;
+	int PLUS_ASSIGN = 65;
+	int SEMPRED = 66;
+	int IMPLIES = 67;
+	int ROOT = 68;
+	int RULE_REF = 69;
+	int NOT = 70;
+	int TREE_BEGIN = 71;
+	int QUESTION = 72;
+	int PLUS = 73;
+	int WILDCARD = 74;
+	int REWRITE = 75;
+	int DOLLAR = 76;
+	int DOUBLE_QUOTE_STRING_LITERAL = 77;
+	int DOUBLE_ANGLE_STRING_LITERAL = 78;
+	int WS = 79;
+	int COMMENT = 80;
+	int SL_COMMENT = 81;
+	int ML_COMMENT = 82;
+	int OPEN_ELEMENT_OPTION = 83;
+	int CLOSE_ELEMENT_OPTION = 84;
+	int ESC = 85;
+	int DIGIT = 86;
+	int XDIGIT = 87;
+	int NESTED_ARG_ACTION = 88;
+	int NESTED_ACTION = 89;
+	int ACTION_CHAR_LITERAL = 90;
+	int ACTION_STRING_LITERAL = 91;
+	int ACTION_ESC = 92;
+	int WS_LOOP = 93;
+	int INTERNAL_RULE_REF = 94;
+	int WS_OPT = 95;
+	int SRC = 96;
+	int CHARSET = 97;
+}
diff --git a/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.txt b/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.txt
new file mode 100644
index 0000000..ff59099
--- /dev/null
+++ b/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.txt
@@ -0,0 +1,96 @@
+// $ANTLR 2.7.7 (2006-01-29): assign.types.g -> AssignTokenTypesWalkerTokenTypes.txt$
+AssignTokenTypesWalker    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
+CHARSET=97
diff --git a/src/org/antlr/tool/Attribute.java b/src/org/antlr/tool/Attribute.java
new file mode 100644
index 0000000..001b684
--- /dev/null
+++ b/src/org/antlr/tool/Attribute.java
@@ -0,0 +1,133 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+/** Track the names of attributes define in arg lists, return values,
+ *  scope blocks etc...
+ */
+public class Attribute {
+	/** The entire declaration such as "String foo;" */
+	public String decl;
+
+	/** The type; might be empty such as for Python which has no static typing */
+	public String type;
+
+	/** The name of the attribute "foo" */
+	public String name;
+
+	/** The optional attribute intialization expression */
+	public String initValue;
+
+	public Attribute(String decl) {
+		extractAttribute(decl);
+	}
+
+	public Attribute(String name, String decl) {
+		this.name = name;
+		this.decl = decl;
+	}
+
+	/** For decls like "String foo" or "char *foo32[3]" compute the ID
+	 *  and type declarations.  Also handle "int x=3" and 'T t = new T("foo")'
+	 *  but if the separator is ',' you cannot use ',' in the initvalue.
+	 *  AttributeScope.addAttributes takes care of the separation so we are
+	 *  free here to use from '=' to end of string as the expression.
+	 *
+	 *  Set name, type, initvalue, and full decl instance vars.
+	 */
+	protected void extractAttribute(String decl) {
+		if ( decl==null ) {
+			return;
+		}
+		boolean inID = false;
+		int start = -1;
+		int rightEdgeOfDeclarator = decl.length()-1;
+		int equalsIndex = decl.indexOf('=');
+		if ( equalsIndex>0 ) {
+			// everything after the '=' is the init value
+			this.initValue = decl.substring(equalsIndex+1,decl.length());
+			rightEdgeOfDeclarator = equalsIndex-1;
+		}
+		// walk backwards looking for start of an ID
+		for (int i=rightEdgeOfDeclarator; i>=0; i--) {
+			// if we haven't found the end yet, keep going
+			if ( !inID && Character.isLetterOrDigit(decl.charAt(i)) ) {
+			    inID = true;
+			}
+			else if ( inID &&
+				      !(Character.isLetterOrDigit(decl.charAt(i))||
+				       decl.charAt(i)=='_') ) {
+				start = i+1;
+				break;
+			}
+		}
+		if ( start<0 && inID ) {
+			start = 0;
+		}
+		if ( start<0 ) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL,decl);
+		}
+		// walk forwards looking for end of an ID
+		int stop=-1;
+		for (int i=start; i<=rightEdgeOfDeclarator; i++) {
+			// if we haven't found the end yet, keep going
+			if ( !(Character.isLetterOrDigit(decl.charAt(i))||
+				decl.charAt(i)=='_') )
+			{
+				stop = i;
+				break;
+			}
+			if ( i==rightEdgeOfDeclarator ) {
+				stop = i+1;
+			}
+		}
+
+		// the name is the last ID
+		this.name = decl.substring(start,stop);
+
+		// the type is the decl minus the ID (could be empty)
+		this.type = decl.substring(0,start);
+		if ( stop<=rightEdgeOfDeclarator ) {
+			this.type += decl.substring(stop,rightEdgeOfDeclarator+1);
+		}
+		this.type = type.trim();
+		if ( this.type.length()==0 ) {
+			this.type = null;
+		}
+
+		this.decl = decl;
+	}
+
+	public String toString() {
+		if ( initValue!=null ) {
+			return type+" "+name+"="+initValue;
+		}
+		return type+" "+name;
+	}
+}
+
diff --git a/src/org/antlr/tool/AttributeScope.java b/src/org/antlr/tool/AttributeScope.java
new file mode 100644
index 0000000..fbc98a2
--- /dev/null
+++ b/src/org/antlr/tool/AttributeScope.java
@@ -0,0 +1,179 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.Token;
+
+import java.util.*;
+
+/** Track the attributes within a scope.  A named scoped has just its list
+ *  of attributes.  Each rule has potentially 3 scopes: return values,
+ *  parameters, and an implicitly-named scope (i.e., a scope defined in a rule).
+ *  Implicitly-defined scopes are named after the rule; rules and scopes then
+ *  must live in the same name space--no collisions allowed.
+ */
+public class AttributeScope {
+
+	/** All token scopes (token labels) share the same fixed scope of
+	 *  of predefined attributes.  I keep this out of the runtime.Token
+	 *  object to avoid a runtime space burden.
+	 */
+	public static AttributeScope tokenScope = new AttributeScope("Token",null);
+	static {
+		tokenScope.addAttribute("text", null);
+		tokenScope.addAttribute("type", null);
+		tokenScope.addAttribute("line", null);
+		tokenScope.addAttribute("index", null);
+		tokenScope.addAttribute("pos", null);
+		tokenScope.addAttribute("channel", null);
+		tokenScope.addAttribute("tree", null);
+	}
+
+	/** This scope is associated with which input token (for error handling)? */
+	public Token derivedFromToken;
+
+	public Grammar grammar;
+
+	/** The scope name */
+	private String name;
+
+	/** Not a rule scope, but visible to all rules "scope symbols { ...}" */
+	public boolean isDynamicGlobalScope;
+
+	/** Visible to all rules, but defined in rule "scope { int i; }" */
+	public boolean isDynamicRuleScope;
+
+	public boolean isParameterScope;
+
+	public boolean isReturnScope;
+
+	public boolean isPredefinedRuleScope;
+
+	public boolean isPredefinedLexerRuleScope;
+	
+	/** The list of Attribute objects */
+
+	protected LinkedHashMap attributes = new LinkedHashMap();
+
+	public AttributeScope(String name, Token derivedFromToken) {
+		this(null,name,derivedFromToken);
+	}
+
+	public AttributeScope(Grammar grammar, String name, Token derivedFromToken) {
+		this.grammar = grammar;
+		this.name = name;
+		this.derivedFromToken = derivedFromToken;
+	}
+
+	public String getName() {
+		if ( isParameterScope ) {
+			return name+"_parameter";
+		}
+		else if ( isReturnScope ) {
+			return name+"_return";
+		}
+		return name;
+	}
+
+	/** From a chunk of text holding the definitions of the attributes,
+	 *  pull them apart and create an Attribute for each one.  Add to
+	 *  the list of attributes for this scope.  Pass in the character
+	 *  that terminates a definition such as ',' or ';'.  For example,
+	 *
+	 *  scope symbols {
+	 *  	int n;
+	 *  	List names;
+	 *  }
+	 *
+	 *  would pass in definitions equal to the text in between {...} and
+	 *  separator=';'.  It results in two Attribute objects.
+	 */
+	public void addAttributes(String definitions, String separator) {
+        StringTokenizer st = new StringTokenizer(definitions,separator);
+		while (st.hasMoreElements()) {
+			String decl = (String) st.nextElement();
+			decl = decl.trim();
+			if ( decl.length()==0 ) {
+				break; // final bit of whitespace; ignore
+			}
+			Attribute attr = new Attribute(decl);
+			if ( !isReturnScope && attr.initValue!=null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL,
+										  grammar,
+										  derivedFromToken,
+										  attr.name);
+				attr.initValue=null; // wipe it out
+			}
+			attributes.put(attr.name, attr);
+		}
+	}
+
+	public void addAttribute(String name, String decl) {
+		attributes.put(name, new Attribute(name,decl));
+	}
+
+	public Attribute getAttribute(String name) {
+		return (Attribute)attributes.get(name);
+	}
+
+	/** Used by templates to get all attributes */
+	public List getAttributes() {
+		List a = new ArrayList();
+		a.addAll(attributes.values());
+		return a;
+	}
+
+	/** Return the set of keys that collide from
+	 *  this and other.
+	 */
+	public Set intersection(AttributeScope other) {
+		if ( other==null || other.size()==0 || size()==0 ) {
+			return null;
+		}
+		Set inter = new HashSet();
+		Set thisKeys = attributes.keySet();
+		for (Iterator it = thisKeys.iterator(); it.hasNext();) {
+			String key = (String) it.next();
+			if ( other.attributes.get(key)!=null ) {
+				inter.add(key);
+			}
+		}
+		if ( inter.size()==0 ) {
+			return null;
+		}
+		return inter;
+	}
+
+	public int size() {
+		return attributes==null?0:attributes.size();
+	}
+
+	public String toString() {
+		return (isDynamicGlobalScope?"global ":"")+getName()+":"+attributes;
+	}
+}
diff --git a/src/org/antlr/tool/BuildDependencyGenerator.java b/src/org/antlr/tool/BuildDependencyGenerator.java
new file mode 100644
index 0000000..45ae500
--- /dev/null
+++ b/src/org/antlr/tool/BuildDependencyGenerator.java
@@ -0,0 +1,193 @@
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.misc.Utils;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.io.*;
+
+/** Given a grammar file, show the dependencies on .tokens etc...
+ *  Using ST, emit a simple "make compatible" list of dependencies.
+ *  For example, combined grammar T.g (no token import) generates:
+ *
+ *		TParser.java : T.g
+ * 		T.tokens : T.g
+ * 		T__g : T.g
+ *
+ *  For tree grammar TP with import of T.tokens:
+ *
+ * 		TP.g : T.tokens
+ * 		TP.java : TP.g
+ *
+ *  If "-lib libdir" is used on command-line with -depend, then include the
+ *  path like
+ *
+ * 		TP.g : libdir/T.tokens
+ *
+ *  Pay attention to -o as well:
+ *
+ * 		outputdir/TParser.java : T.g
+ *
+ *  So this output shows what the grammar depends on *and* what it generates.
+ *
+ *  Operate on one grammar file at a time.  If given a list of .g on the
+ *  command-line with -depend, just emit the dependencies.  The grammars
+ *  may depend on each other, but the order doesn't matter.  Build tools,
+ *  reading in this output, will know how to organize it.
+ *
+ *  This is a wee bit slow probably because the code generator has to load
+ *  all of its template files in order to figure out the file extension
+ *  for the generated recognizer.
+ *
+ *  This code was obvious until I removed redundant "./" on front of files
+ *  and had to escape spaces in filenames :(
+ */
+public class BuildDependencyGenerator {
+	protected String grammarFileName;
+	protected Tool tool;
+	protected Grammar grammar;
+	protected CodeGenerator generator;
+	protected StringTemplateGroup templates;
+
+	public BuildDependencyGenerator(Tool tool, String grammarFileName)
+		throws IOException, antlr.TokenStreamException, antlr.RecognitionException
+	{
+		this.tool = tool;
+		this.grammarFileName = grammarFileName;
+		grammar = tool.getGrammar(grammarFileName);
+		String language = (String)grammar.getOption("language");
+		generator = new CodeGenerator(tool, grammar, language);
+		generator.loadTemplates(language);
+	}
+
+	/** From T.g return a list of File objects that
+	 *  names files ANTLR will emit from T.g.
+	 */
+	public List getGeneratedFileList() {
+		List files = new ArrayList();
+		File outputDir = tool.getOutputDirectory(grammarFileName);
+		if ( outputDir.getName().equals(".") ) {
+			outputDir = null;
+		}
+		else if ( outputDir.getName().indexOf(' ')>=0 ) { // has spaces?
+			String escSpaces = Utils.replace(outputDir.toString(),
+											 " ",
+											 "\\ ");
+			outputDir = new File(escSpaces);
+		}
+		// add generated recognizer; e.g., TParser.java
+		String recognizer =
+			generator.getRecognizerFileName(grammar.name, grammar.type);
+		files.add(new File(outputDir, recognizer));
+		// add output vocab file; e.g., T.tokens
+		files.add(new File(outputDir, generator.getVocabFileName()));
+		// are we generating a .h file?
+		StringTemplate headerExtST = null;
+		if ( generator.getTemplates().isDefined("headerFile") ) {
+			headerExtST = generator.getTemplates().getInstanceOf("headerFileExtension");
+			files.add(new File(outputDir,headerExtST.toString()));
+		}
+		if ( grammar.type==Grammar.COMBINED ) {
+			// add autogenerated lexer; e.g., TLexer.java TLexer.h TLexer.tokens
+			// don't add T__.g (just a temp file)
+			String lexer =
+				generator.getRecognizerFileName(grammar.name, Grammar.LEXER);
+			files.add(new File(outputDir,lexer));
+			// TLexer.h
+			String suffix = Grammar.grammarTypeToFileNameSuffix[Grammar.LEXER];
+			if ( headerExtST !=null ) {
+				String header =	 grammar.name+suffix+headerExtST.toString();
+				files.add(new File(outputDir,header));
+			}
+			// for combined, don't generate TLexer.tokens
+		}
+
+		if ( files.size()==0 ) {
+			return null;
+		}
+		return files;
+	}
+
+	/** Return a list of File objects that name files ANTLR will read
+	 *  to process T.g; for now, this can only be .tokens files and only
+	 *  if they use the tokenVocab option.
+	 */
+	public List getDependenciesFileList() {
+		List files = new ArrayList();
+		String vocabName = (String)grammar.getOption("tokenVocab");
+		if ( vocabName == null ) {
+			return null;
+		}
+		File vocabFile = grammar.getImportedVocabFileName(vocabName);
+		File outputDir = vocabFile.getParentFile();
+		if ( outputDir.getName().equals(".") ) {
+			files.add(vocabFile.getName());
+		}
+		else if ( outputDir.getName().indexOf(' ')>=0 ) { // has spaces?
+			String escSpaces = Utils.replace(outputDir.toString(),
+											 " ",
+											 "\\ ");
+			outputDir = new File(escSpaces);
+			files.add(new File(outputDir, vocabFile.getName()));
+		}
+		else {
+			files.add(vocabFile);
+		}
+
+		if ( files.size()==0 ) {
+			return null;
+		}
+		return files;
+	}
+
+	public StringTemplate getDependencies() {
+		loadDependencyTemplates();
+		StringTemplate dependenciesST = templates.getInstanceOf("dependencies");
+		dependenciesST.setAttribute("in", getDependenciesFileList());
+		dependenciesST.setAttribute("out", getGeneratedFileList());
+		dependenciesST.setAttribute("grammarFileName", grammar.fileName);
+		return dependenciesST;
+	}
+
+	public void loadDependencyTemplates() {
+		if ( templates!=null ) {
+			return;
+		}
+		String fileName = "org/antlr/tool/templates/depend.stg";
+		ClassLoader cl = Thread.currentThread().getContextClassLoader();
+		InputStream is = cl.getResourceAsStream(fileName);
+		if ( is==null ) {
+			cl = ErrorManager.class.getClassLoader();
+			is = cl.getResourceAsStream(fileName);
+		}
+		if ( is==null ) {
+			ErrorManager.internalError("Can't load dependency templates: "+fileName);
+			return;
+		}
+		BufferedReader br = null;
+		try {
+			br = new BufferedReader(new InputStreamReader(is));
+			templates = new StringTemplateGroup(br,
+												AngleBracketTemplateLexer.class);
+			br.close();
+		}
+		catch (IOException ioe) {
+			ErrorManager.internalError("error reading dependency templates file "+fileName, ioe);
+		}
+		finally {
+			if ( br!=null ) {
+				try {
+					br.close();
+				}
+				catch (IOException ioe) {
+					ErrorManager.internalError("cannot close dependency templates file "+fileName, ioe);
+				}
+			}
+		}
+	}
+}
diff --git a/src/org/antlr/tool/DOTGenerator.java b/src/org/antlr/tool/DOTGenerator.java
new file mode 100644
index 0000000..1fd4f46
--- /dev/null
+++ b/src/org/antlr/tool/DOTGenerator.java
@@ -0,0 +1,383 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.analysis.*;
+import org.antlr.misc.Utils;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.util.*;
+
+/** The DOT (part of graphviz) generation aspect. */
+public class DOTGenerator {
+	public static final boolean STRIP_NONREDUCED_STATES = false;
+
+	protected String arrowhead="normal";
+	protected String rankdir="LR";
+
+	/** Library of output templates; use <attrname> format */
+    public static StringTemplateGroup stlib =
+            new StringTemplateGroup("toollib", AngleBracketTemplateLexer.class);
+
+    /** To prevent infinite recursion when walking state machines, record
+     *  which states we've visited.  Make a new set every time you start
+     *  walking in case you reuse this object.
+     */
+    protected Set markedStates = null;
+
+    protected Grammar grammar;
+
+    /** This aspect is associated with a grammar */
+	public DOTGenerator(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+    /** Return a String containing a DOT description that, when displayed,
+     *  will show the incoming state machine visually.  All nodes reachable
+     *  from startState will be included.
+     */
+    public String getDOT(State startState) {
+        // The output DOT graph for visualization
+		StringTemplate dot = null;
+		markedStates = new HashSet();
+        if ( startState instanceof DFAState ) {
+            dot = stlib.getInstanceOf("org/antlr/tool/templates/dot/dfa");
+			dot.setAttribute("startState",
+					Utils.integer(startState.stateNumber));
+			dot.setAttribute("useBox",
+							 Boolean.valueOf(Tool.internalOption_ShowNFConfigsInDFA));
+			walkCreatingDFADOT(dot, (DFAState)startState);
+        }
+        else {
+            dot = stlib.getInstanceOf("org/antlr/tool/templates/dot/nfa");
+			dot.setAttribute("startState",
+					Utils.integer(startState.stateNumber));
+			walkRuleNFACreatingDOT(dot, startState);
+        }
+		dot.setAttribute("rankdir", rankdir);
+        return dot.toString();
+    }
+
+    /** Return a String containing a DOT description that, when displayed,
+     *  will show the incoming state machine visually.  All nodes reachable
+     *  from startState will be included.
+    public String getRuleNFADOT(State startState) {
+        // The output DOT graph for visualization
+        StringTemplate dot = stlib.getInstanceOf("org/antlr/tool/templates/dot/nfa");
+
+        markedStates = new HashSet();
+        dot.setAttribute("startState",
+                Utils.integer(startState.stateNumber));
+        walkRuleNFACreatingDOT(dot, startState);
+        return dot.toString();
+    }
+	 */
+
+    /** Do a depth-first walk of the state machine graph and
+     *  fill a DOT description template.  Keep filling the
+     *  states and edges attributes.
+     */
+    protected void walkCreatingDFADOT(StringTemplate dot,
+									  DFAState s)
+    {
+		if ( markedStates.contains(Utils.integer(s.stateNumber)) ) {
+			return; // already visited this node
+        }
+
+		markedStates.add(Utils.integer(s.stateNumber)); // mark this node as completed.
+
+        // first add this node
+        StringTemplate st;
+        if ( s.isAcceptState() ) {
+            st = stlib.getInstanceOf("org/antlr/tool/templates/dot/stopstate");
+        }
+        else {
+            st = stlib.getInstanceOf("org/antlr/tool/templates/dot/state");
+        }
+        st.setAttribute("name", getStateLabel(s));
+        dot.setAttribute("states", st);
+
+        // make a DOT edge for each transition
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) s.transition(i);
+			/*
+			System.out.println("dfa "+s.dfa.decisionNumber+
+				" edge from s"+s.stateNumber+" ["+i+"] of "+s.getNumberOfTransitions());
+			*/
+			if ( STRIP_NONREDUCED_STATES ) {
+				if ( edge.target instanceof DFAState &&
+					((DFAState)edge.target).getAcceptStateReachable()!=DFA.REACHABLE_YES )
+				{
+					continue; // don't generate nodes for terminal states
+				}
+			}
+			st = stlib.getInstanceOf("org/antlr/tool/templates/dot/edge");
+			st.setAttribute("label", getEdgeLabel(edge));
+			st.setAttribute("src", getStateLabel(s));
+            st.setAttribute("target", getStateLabel(edge.target));
+			st.setAttribute("arrowhead", arrowhead);
+            dot.setAttribute("edges", st);
+            walkCreatingDFADOT(dot, (DFAState)edge.target); // keep walkin'
+        }
+    }
+
+    /** Do a depth-first walk of the state machine graph and
+     *  fill a DOT description template.  Keep filling the
+     *  states and edges attributes.  We know this is an NFA
+     *  for a rule so don't traverse edges to other rules and
+     *  don't go past rule end state.
+     */
+    protected void walkRuleNFACreatingDOT(StringTemplate dot,
+                                          State s)
+    {
+        if ( markedStates.contains(s) ) {
+            return; // already visited this node
+        }
+
+        markedStates.add(s); // mark this node as completed.
+
+        // first add this node
+        StringTemplate stateST;
+        if ( s.isAcceptState() ) {
+            stateST = stlib.getInstanceOf("org/antlr/tool/templates/dot/stopstate");
+        }
+        else {
+            stateST = stlib.getInstanceOf("org/antlr/tool/templates/dot/state");
+        }
+        stateST.setAttribute("name", getStateLabel(s));
+        dot.setAttribute("states", stateST);
+
+        if ( s.isAcceptState() )  {
+            return; // don't go past end of rule node to the follow states
+        }
+
+        // special case: if decision point, then line up the alt start states
+        // unless it's an end of block
+		if ( ((NFAState)s).isDecisionState() ) {
+			GrammarAST n = ((NFAState)s).getAssociatedASTNode();
+			if ( n!=null && n.getType()!=ANTLRParser.EOB ) {
+				StringTemplate rankST = stlib.getInstanceOf("org/antlr/tool/templates/dot/decision-rank");
+				NFAState alt = (NFAState)s;
+				while ( alt!=null ) {
+					rankST.setAttribute("states", getStateLabel(alt));
+					if ( alt.transition(1)!=null ) {
+						alt = (NFAState)alt.transition(1).target;
+					}
+					else {
+						alt=null;
+					}
+				}
+				dot.setAttribute("decisionRanks", rankST);
+			}
+		}
+
+        // make a DOT edge for each transition
+		StringTemplate edgeST = null;
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+            Transition edge = (Transition) s.transition(i);
+            if ( edge instanceof RuleClosureTransition ) {
+                RuleClosureTransition rr = ((RuleClosureTransition)edge);
+                // don't jump to other rules, but display edge to follow node
+                edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/edge");
+                edgeST.setAttribute("label", "<"+grammar.getRuleName(rr.getRuleIndex())+">");
+                edgeST.setAttribute("src", getStateLabel(s));
+                edgeST.setAttribute("target", getStateLabel(rr.getFollowState()));
+				edgeST.setAttribute("arrowhead", arrowhead);
+                dot.setAttribute("edges", edgeST);
+                walkRuleNFACreatingDOT(dot, rr.getFollowState());
+                continue;
+            }
+			if ( edge.isEpsilon() ) {
+				edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/epsilon-edge");				
+			}
+			else {
+				edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/edge");
+			}
+			edgeST.setAttribute("label", getEdgeLabel(edge));
+            edgeST.setAttribute("src", getStateLabel(s));
+			edgeST.setAttribute("target", getStateLabel(edge.target));
+			edgeST.setAttribute("arrowhead", arrowhead);
+            dot.setAttribute("edges", edgeST);
+            walkRuleNFACreatingDOT(dot, edge.target); // keep walkin'
+        }
+    }
+
+    /*
+	public void writeDOTFilesForAllRuleNFAs() throws IOException {
+        Collection rules = grammar.getRules();
+        for (Iterator itr = rules.iterator(); itr.hasNext();) {
+			Grammar.Rule r = (Grammar.Rule) itr.next();
+            String ruleName = r.name;
+            writeDOTFile(
+                    ruleName,
+                    getRuleNFADOT(grammar.getRuleStartState(ruleName)));
+        }
+    }
+    */
+
+    /*
+	public void writeDOTFilesForAllDecisionDFAs() throws IOException {
+        // for debugging, create a DOT file for each decision in
+        // a directory named for the grammar.
+        File grammarDir = new File(grammar.name+"_DFAs");
+        grammarDir.mkdirs();
+        List decisionList = grammar.getDecisionNFAStartStateList();
+        if ( decisionList==null ) {
+            return;
+        }
+        int i = 1;
+        Iterator iter = decisionList.iterator();
+        while (iter.hasNext()) {
+            NFAState decisionState = (NFAState)iter.next();
+            DFA dfa = decisionState.getDecisionASTNode().getLookaheadDFA();
+            if ( dfa!=null ) {
+                String dot = getDOT( dfa.startState );
+                writeDOTFile(grammarDir+"/dec-"+i, dot);
+            }
+            i++;
+        }
+    }
+    */
+
+    /** Fix edge strings so they print out in DOT properly;
+	 *  generate any gated predicates on edge too.
+	 */
+    protected String getEdgeLabel(Transition edge) {
+		String label = edge.label.toString(grammar);
+		label = Utils.replace(label,"\\", "\\\\");
+		label = Utils.replace(label,"\"", "\\\"");
+        if ( label.equals(Label.EPSILON_STR) ) {
+            label = "e";
+        }
+		State target = edge.target;
+		if ( !edge.isSemanticPredicate() && target instanceof DFAState ) {
+			// look for gated predicates; don't add gated to simple sempred edges
+			SemanticContext preds =
+				((DFAState)target).getGatedPredicatesInNFAConfigurations();
+			if ( preds!=null ) {
+				String predsStr = "";
+				predsStr = "&&{"+
+					preds.genExpr(grammar.generator,
+								  grammar.generator.getTemplates(), null).toString()
+					+"}?";
+				label += predsStr;
+			}
+		}
+        return label;
+    }
+
+    protected String getStateLabel(State s) {
+        if ( s==null ) {
+            return "null";
+        }
+        String stateLabel = String.valueOf(s.stateNumber);
+		if ( s instanceof DFAState ) {
+            StringBuffer buf = new StringBuffer(250);
+			buf.append('s');
+			buf.append(s.stateNumber);
+			if ( Tool.internalOption_ShowNFConfigsInDFA ) {
+				buf.append("\\n");
+				// separate alts
+				Set alts = ((DFAState)s).getAltSet();
+				List altList = new ArrayList();
+				altList.addAll(alts);
+				Collections.sort(altList);
+				Set configurations = ((DFAState)s).getNFAConfigurations();
+				for (int altIndex = 0; altIndex < altList.size(); altIndex++) {
+					Integer altI = (Integer) altList.get(altIndex);
+					int alt = altI.intValue();
+					if ( altIndex>0 ) {
+						buf.append("\\n");
+					}
+					buf.append("alt");
+					buf.append(alt);
+					buf.append(':');
+					// get a list of configs for just this alt
+					// it will help us print better later
+					List configsInAlt = new ArrayList();
+					for (Iterator it = configurations.iterator(); it.hasNext();) {
+						NFAConfiguration c = (NFAConfiguration) it.next();
+						if ( c.alt!=alt ) continue;
+						configsInAlt.add(c);
+					}
+					int n = 0;
+					for (int cIndex = 0; cIndex < configsInAlt.size(); cIndex++) {
+						NFAConfiguration c =
+							(NFAConfiguration)configsInAlt.get(cIndex);
+						n++;
+						buf.append(c.toString(false));
+						if ( (cIndex+1)<configsInAlt.size() ) {
+							buf.append(", ");
+						}
+						if ( n%5==0 && (configsInAlt.size()-cIndex)>3 ) {
+							buf.append("\\n");
+						}
+					}
+				}
+			}
+            stateLabel = buf.toString();
+        }
+		if ( (s instanceof NFAState) && ((NFAState)s).isDecisionState() ) {
+			stateLabel = stateLabel+",d="+
+					((NFAState)s).getDecisionNumber();
+			if ( ((NFAState)s).endOfBlockStateNumber!=State.INVALID_STATE_NUMBER ) {
+				stateLabel += ",eob="+((NFAState)s).endOfBlockStateNumber;
+			}
+		}
+		else if ( (s instanceof NFAState) &&
+			((NFAState)s).endOfBlockStateNumber!=State.INVALID_STATE_NUMBER)
+		{
+			NFAState n = ((NFAState)s);
+			stateLabel = stateLabel+",eob="+n.endOfBlockStateNumber;
+		}
+        else if ( s instanceof DFAState && ((DFAState)s).isAcceptState() ) {
+            stateLabel = stateLabel+
+                    "=>"+((DFAState)s).getUniquelyPredictedAlt();
+        }
+        return '"'+stateLabel+'"';
+    }
+
+	public String getArrowheadType() {
+		return arrowhead;
+	}
+
+	public void setArrowheadType(String arrowhead) {
+		this.arrowhead = arrowhead;
+	}
+
+	public String getRankdir() {
+		return rankdir;
+	}
+
+	public void setRankdir(String rankdir) {
+		this.rankdir = rankdir;
+	}
+}
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalker.java b/src/org/antlr/tool/DefineGrammarItemsWalker.java
new file mode 100644
index 0000000..3e28ebb
--- /dev/null
+++ b/src/org/antlr/tool/DefineGrammarItemsWalker.java
@@ -0,0 +1,2995 @@
+// $ANTLR 2.7.7 (2006-01-29): "define.g" -> "DefineGrammarItemsWalker.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.misc.*;
+
+import antlr.TreeParser;
+import antlr.Token;
+import antlr.collections.AST;
+import antlr.RecognitionException;
+import antlr.ANTLRException;
+import antlr.NoViableAltException;
+import antlr.MismatchedTokenException;
+import antlr.SemanticException;
+import antlr.collections.impl.BitSet;
+import antlr.ASTPair;
+import antlr.collections.impl.ASTArray;
+
+
+public class DefineGrammarItemsWalker extends antlr.TreeParser       implements DefineGrammarItemsWalkerTokenTypes
+ {
+
+protected Grammar grammar;
+protected GrammarAST root;
+protected String currentRuleName;
+protected GrammarAST currentRewriteBlock;
+protected GrammarAST currentRewriteRule;
+protected int outerAltNum = 0;
+protected int blockLevel = 0;
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "define: "+ex.toString(),
+            ex);
+    }
+
+	protected void finish() {
+		trimGrammar();
+	}
+
+	/** Remove any lexer rules from a COMBINED; already passed to lexer */
+	protected void trimGrammar() {
+		if ( grammar.type!=Grammar.COMBINED ) {
+			return;
+		}
+		// form is (header ... ) ( grammar ID (scope ...) ... ( rule ... ) ( rule ... ) ... )
+		GrammarAST p = root;
+		// find the grammar spec
+		while ( !p.getText().equals("grammar") ) {
+			p = (GrammarAST)p.getNextSibling();
+		}
+		p = (GrammarAST)p.getFirstChild(); // jump down to first child of grammar
+		// look for first RULE def
+		GrammarAST prev = p; // points to the ID (grammar name)
+		while ( p.getType()!=RULE ) {
+			prev = p;
+			p = (GrammarAST)p.getNextSibling();
+		}
+		// prev points at last node before first rule subtree at this point
+		while ( p!=null ) {
+			String ruleName = p.getFirstChild().getText();
+			//System.out.println("rule "+ruleName+" prev="+prev.getText());
+			if ( Character.isUpperCase(ruleName.charAt(0)) ) {
+				// remove lexer rule
+				prev.setNextSibling(p.getNextSibling());
+			}
+			else {
+				prev = p; // non-lexer rule; move on
+			}
+			p = (GrammarAST)p.getNextSibling();
+		}
+		//System.out.println("root after removal is: "+root.toStringList());
+	}
+
+    protected void trackInlineAction(GrammarAST actionAST) {
+		Rule r = grammar.getRule(currentRuleName);
+        if ( r!=null ) {
+            r.trackInlineAction(actionAST);
+        }
+    }
+
+public DefineGrammarItemsWalker() {
+	tokenNames = _tokenNames;
+}
+
+	public final void grammar(AST _t,
+		Grammar g
+	) throws RecognitionException {
+		
+		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		grammar = g;
+		root = grammar_AST_in;
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LEXER_GRAMMAR:
+			{
+				AST __t3 = _t;
+				GrammarAST tmp1_AST_in = (GrammarAST)_t;
+				match(_t,LEXER_GRAMMAR);
+				_t = _t.getFirstChild();
+				if ( inputState.guessing==0 ) {
+					grammar.type = Grammar.LEXER;
+				}
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t3;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PARSER_GRAMMAR:
+			{
+				AST __t4 = _t;
+				GrammarAST tmp2_AST_in = (GrammarAST)_t;
+				match(_t,PARSER_GRAMMAR);
+				_t = _t.getFirstChild();
+				if ( inputState.guessing==0 ) {
+					grammar.type = Grammar.PARSER;
+				}
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t4;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TREE_GRAMMAR:
+			{
+				AST __t5 = _t;
+				GrammarAST tmp3_AST_in = (GrammarAST)_t;
+				match(_t,TREE_GRAMMAR);
+				_t = _t.getFirstChild();
+				if ( inputState.guessing==0 ) {
+					grammar.type = Grammar.TREE_PARSER;
+				}
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t5;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case COMBINED_GRAMMAR:
+			{
+				AST __t6 = _t;
+				GrammarAST tmp4_AST_in = (GrammarAST)_t;
+				match(_t,COMBINED_GRAMMAR);
+				_t = _t.getFirstChild();
+				if ( inputState.guessing==0 ) {
+					grammar.type = Grammar.COMBINED;
+				}
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t6;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			if ( inputState.guessing==0 ) {
+				finish();
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void grammarSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST cmt = null;
+		
+		Map opts=null;
+		Token optionsStartToken=null;
+		
+		
+		try {      // for error handling
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case DOC_COMMENT:
+			{
+				cmt = (GrammarAST)_t;
+				match(_t,DOC_COMMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case OPTIONS:
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				if ( inputState.guessing==0 ) {
+					optionsStartToken=((GrammarAST)_t).getToken();
+				}
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKENS:
+			{
+				tokensSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop14:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==SCOPE)) {
+					attrScope(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop14;
+				}
+				
+			} while (true);
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case AMPERSAND:
+			{
+				actions(_t);
+				_t = _retTree;
+				break;
+			}
+			case RULE:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			rules(_t);
+			_t = _retTree;
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void attrScope(AST _t) throws RecognitionException {
+		
+		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST name = null;
+		GrammarAST attrs = null;
+		
+		try {      // for error handling
+			AST __t8 = _t;
+			GrammarAST tmp5_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			name = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			attrs = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t8;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				
+						AttributeScope scope = grammar.defineGlobalScope(name.getText(),attrs.token);
+						scope.isDynamicGlobalScope = true;
+						scope.addAttributes(attrs.getText(), ";");
+						
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void optionsSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST optionsSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			GrammarAST tmp6_AST_in = (GrammarAST)_t;
+			match(_t,OPTIONS);
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tokensSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST tokensSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t24 = _t;
+			GrammarAST tmp7_AST_in = (GrammarAST)_t;
+			match(_t,TOKENS);
+			_t = _t.getFirstChild();
+			{
+			int _cnt26=0;
+			_loop26:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ASSIGN||_t.getType()==TOKEN_REF)) {
+					tokenSpec(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt26>=1 ) { break _loop26; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt26++;
+			} while (true);
+			}
+			_t = __t24;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void actions(AST _t) throws RecognitionException {
+		
+		GrammarAST actions_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			int _cnt18=0;
+			_loop18:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					action(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt18>=1 ) { break _loop18; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt18++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rules(AST _t) throws RecognitionException {
+		
+		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			int _cnt32=0;
+			_loop32:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==RULE)) {
+					rule(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt32>=1 ) { break _loop32; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt32++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void action(AST _t) throws RecognitionException {
+		
+		GrammarAST action_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST amp = null;
+		GrammarAST id1 = null;
+		GrammarAST id2 = null;
+		GrammarAST a1 = null;
+		GrammarAST a2 = null;
+		
+		String scope=null;
+		GrammarAST nameAST=null, actionAST=null;
+		
+		
+		try {      // for error handling
+			AST __t20 = _t;
+			amp = _t==ASTNULL ? null :(GrammarAST)_t;
+			match(_t,AMPERSAND);
+			_t = _t.getFirstChild();
+			id1 = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ID:
+			{
+				id2 = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				a1 = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					scope=id1.getText(); nameAST=id2; actionAST=a1;
+				}
+				break;
+			}
+			case ACTION:
+			{
+				a2 = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					scope=null; nameAST=id1; actionAST=a2;
+				}
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t20;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				
+						 grammar.defineNamedAction(amp,scope,nameAST,actionAST);
+						
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tokenSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST tokenSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST t = null;
+		GrammarAST t2 = null;
+		GrammarAST s = null;
+		GrammarAST c = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKEN_REF:
+			{
+				t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t28 = _t;
+				GrammarAST tmp8_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				t2 = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case STRING_LITERAL:
+				{
+					s = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					c = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t28;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rule(AST _t) throws RecognitionException {
+		
+		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST args = null;
+		GrammarAST ret = null;
+		GrammarAST b = null;
+		
+		String mod=null;
+		String name=null;
+		Map opts=null;
+		Rule r = null;
+		
+		
+		try {      // for error handling
+			AST __t34 = _t;
+			GrammarAST tmp9_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				opts = tmp9_AST_in.options;
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				mod=modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			AST __t36 = _t;
+			GrammarAST tmp10_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				args = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t36;
+			_t = _t.getNextSibling();
+			AST __t38 = _t;
+			GrammarAST tmp11_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				ret = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			_t = __t38;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			if ( inputState.guessing==0 ) {
+				
+							name = id.getText();
+							currentRuleName = name;
+							if ( Character.isUpperCase(name.charAt(0)) &&
+								 grammar.type==Grammar.COMBINED )
+							{
+								// a merged grammar spec, track lexer rules and send to another grammar
+								grammar.defineLexerRuleFoundInParser(id.getToken(), rule_AST_in);
+							}
+							else {
+								int numAlts = countAltsForRule(rule_AST_in);
+								grammar.defineRule(id.getToken(), mod, opts, rule_AST_in, args, numAlts);
+								r = grammar.getRule(name);
+								if ( args!=null ) {
+									r.parameterScope = grammar.createParameterScope(name,args.token);
+									r.parameterScope.addAttributes(args.getText(), ",");
+								}
+								if ( ret!=null ) {
+									r.returnScope = grammar.createReturnScope(name,ret.token);
+									r.returnScope.addAttributes(ret.getText(), ",");
+								}
+							}
+							
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t,r);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop43:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					ruleAction(_t,r);
+					_t = _retTree;
+				}
+				else {
+					break _loop43;
+				}
+				
+			} while (true);
+			}
+			if ( inputState.guessing==0 ) {
+				this.blockLevel=0;
+			}
+			b = _t==ASTNULL ? null : (GrammarAST)_t;
+			block(_t);
+			_t = _retTree;
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp12_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				
+				// copy rule options into the block AST, which is where
+				// the analysis will look for k option etc...
+				b.options = opts;
+				
+			}
+			_t = __t34;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final String  modifier(AST _t) throws RecognitionException {
+		String mod;
+		
+		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		mod = modifier_AST_in.getText();
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_protected:
+			{
+				GrammarAST tmp13_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_protected);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_public:
+			{
+				GrammarAST tmp14_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_public);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_private:
+			{
+				GrammarAST tmp15_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_private);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case FRAGMENT:
+			{
+				GrammarAST tmp16_AST_in = (GrammarAST)_t;
+				match(_t,FRAGMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+		return mod;
+	}
+	
+	public final void ruleScopeSpec(AST _t,
+		Rule r
+	) throws RecognitionException {
+		
+		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST attrs = null;
+		GrammarAST uses = null;
+		
+		try {      // for error handling
+			AST __t63 = _t;
+			GrammarAST tmp17_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ACTION:
+			{
+				attrs = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+						         r.ruleScope = grammar.createRuleScope(r.name,attrs.token);
+								 r.ruleScope.isDynamicRuleScope = true;
+								 r.ruleScope.addAttributes(attrs.getText(), ";");
+								
+				}
+				break;
+			}
+			case 3:
+			case ID:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop66:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ID)) {
+					uses = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+					if ( inputState.guessing==0 ) {
+						
+							         if ( grammar.getGlobalScope(uses.getText())==null ) {
+										 ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
+																   grammar,
+																   uses.token,
+																   uses.getText());
+							         }
+							         else {
+							         	if ( r.useScopes==null ) {r.useScopes=new ArrayList();}
+							         	r.useScopes.add(uses.getText());
+							         }
+							
+					}
+				}
+				else {
+					break _loop66;
+				}
+				
+			} while (true);
+			}
+			_t = __t63;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ruleAction(AST _t,
+		Rule r
+	) throws RecognitionException {
+		
+		GrammarAST ruleAction_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST amp = null;
+		GrammarAST id = null;
+		GrammarAST a = null;
+		
+		try {      // for error handling
+			AST __t60 = _t;
+			amp = _t==ASTNULL ? null :(GrammarAST)_t;
+			match(_t,AMPERSAND);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			a = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t60;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				if (r!=null) r.defineNamedAction(amp,id,a);
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void block(AST _t) throws RecognitionException {
+		
+		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		this.blockLevel++;
+		if ( this.blockLevel==1 ) {this.outerAltNum=1;}
+		
+		
+		try {      // for error handling
+			AST __t68 = _t;
+			GrammarAST tmp18_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				optionsSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case ALT:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop71:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					blockAction(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop71;
+				}
+				
+			} while (true);
+			}
+			{
+			int _cnt73=0;
+			_loop73:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					alternative(_t);
+					_t = _retTree;
+					rewrite(_t);
+					_t = _retTree;
+					if ( inputState.guessing==0 ) {
+						if ( this.blockLevel==1 ) {this.outerAltNum++;}
+					}
+				}
+				else {
+					if ( _cnt73>=1 ) { break _loop73; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt73++;
+			} while (true);
+			}
+			GrammarAST tmp19_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t68;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				this.blockLevel--;
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void exceptionGroup(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			{
+				{
+				int _cnt82=0;
+				_loop82:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==LITERAL_catch)) {
+						exceptionHandler(_t);
+						_t = _retTree;
+					}
+					else {
+						if ( _cnt82>=1 ) { break _loop82; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt82++;
+				} while (true);
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case LITERAL_finally:
+				{
+					finallyClause(_t);
+					_t = _retTree;
+					break;
+				}
+				case EOR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				break;
+			}
+			case LITERAL_finally:
+			{
+				finallyClause(_t);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final int  countAltsForRule(AST _t) throws RecognitionException {
+		int n=0;
+		
+		GrammarAST countAltsForRule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		
+		try {      // for error handling
+			AST __t46 = _t;
+			GrammarAST tmp20_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp21_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getNextSibling();
+			GrammarAST tmp22_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp23_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				GrammarAST tmp24_AST_in = (GrammarAST)_t;
+				match(_t,SCOPE);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop51:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp25_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop51;
+				}
+				
+			} while (true);
+			}
+			AST __t52 = _t;
+			GrammarAST tmp26_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp27_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ALT:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			int _cnt57=0;
+			_loop57:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					GrammarAST tmp28_AST_in = (GrammarAST)_t;
+					match(_t,ALT);
+					_t = _t.getNextSibling();
+					{
+					_loop56:
+					do {
+						if (_t==null) _t=ASTNULL;
+						if ((_t.getType()==REWRITE)) {
+							GrammarAST tmp29_AST_in = (GrammarAST)_t;
+							match(_t,REWRITE);
+							_t = _t.getNextSibling();
+						}
+						else {
+							break _loop56;
+						}
+						
+					} while (true);
+					}
+					if ( inputState.guessing==0 ) {
+						n++;
+					}
+				}
+				else {
+					if ( _cnt57>=1 ) { break _loop57; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt57++;
+			} while (true);
+			}
+			GrammarAST tmp30_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t52;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp31_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			_t = __t46;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+		return n;
+	}
+	
+	public final void blockAction(AST _t) throws RecognitionException {
+		
+		GrammarAST blockAction_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST amp = null;
+		GrammarAST id = null;
+		GrammarAST a = null;
+		
+		try {      // for error handling
+			AST __t75 = _t;
+			amp = _t==ASTNULL ? null :(GrammarAST)_t;
+			match(_t,AMPERSAND);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			a = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t75;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void alternative(AST _t) throws RecognitionException {
+		
+		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		if ( grammar.type!=Grammar.LEXER && grammar.getOption("output")!=null && blockLevel==1 ) {
+			GrammarAST aRewriteNode = alternative_AST_in.findFirstType(REWRITE);
+			if ( aRewriteNode!=null||
+				 (alternative_AST_in.getNextSibling()!=null &&
+				  alternative_AST_in.getNextSibling().getType()==REWRITE) )
+			{
+				Rule r = grammar.getRule(currentRuleName);
+				r.trackAltsWithRewrites(alternative_AST_in,this.outerAltNum);
+			}
+		}
+		
+		
+		try {      // for error handling
+			AST __t77 = _t;
+			GrammarAST tmp32_AST_in = (GrammarAST)_t;
+			match(_t,ALT);
+			_t = _t.getFirstChild();
+			{
+			int _cnt79=0;
+			_loop79:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
+					element(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt79>=1 ) { break _loop79; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt79++;
+			} while (true);
+			}
+			GrammarAST tmp33_AST_in = (GrammarAST)_t;
+			match(_t,EOA);
+			_t = _t.getNextSibling();
+			_t = __t77;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST pred = null;
+		
+		currentRewriteRule = rewrite_AST_in; // has to execute during guessing
+		if ( grammar.buildAST() ) {
+		rewrite_AST_in.rewriteRefsDeep = new HashSet<GrammarAST>();
+		}
+		
+		
+		try {      // for error handling
+			{
+			_loop124:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==REWRITE)) {
+					AST __t122 = _t;
+					GrammarAST tmp34_AST_in = (GrammarAST)_t;
+					match(_t,REWRITE);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case SEMPRED:
+					{
+						pred = (GrammarAST)_t;
+						match(_t,SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case ALT:
+					case TEMPLATE:
+					case ACTION:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					rewrite_alternative(_t);
+					_t = _retTree;
+					_t = __t122;
+					_t = _t.getNextSibling();
+					if ( inputState.guessing==0 ) {
+						
+						if ( pred!=null ) {
+						pred.outerAltNum = this.outerAltNum;
+						trackInlineAction(pred);
+						}
+						
+					}
+				}
+				else {
+					break _loop124;
+				}
+				
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void element(AST _t) throws RecognitionException {
+		
+		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST el = null;
+		GrammarAST id2 = null;
+		GrammarAST a2 = null;
+		GrammarAST act = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				AST __t89 = _t;
+				GrammarAST tmp35_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t89;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				AST __t90 = _t;
+				GrammarAST tmp36_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t90;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case WILDCARD:
+			{
+				atom(_t);
+				_t = _retTree;
+				break;
+			}
+			case NOT:
+			{
+				AST __t91 = _t;
+				GrammarAST tmp37_AST_in = (GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				element(_t);
+				_t = _retTree;
+				_t = __t91;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RANGE:
+			{
+				AST __t92 = _t;
+				GrammarAST tmp38_AST_in = (GrammarAST)_t;
+				match(_t,RANGE);
+				_t = _t.getFirstChild();
+				atom(_t);
+				_t = _retTree;
+				atom(_t);
+				_t = _retTree;
+				_t = __t92;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t93 = _t;
+				GrammarAST tmp39_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				atom(_t);
+				_t = _retTree;
+				atom(_t);
+				_t = _retTree;
+				_t = __t93;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t94 = _t;
+				GrammarAST tmp40_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				id = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				el = _t==ASTNULL ? null : (GrammarAST)_t;
+				element(_t);
+				_t = _retTree;
+				_t = __t94;
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+							if ( el.getType()==ANTLRParser.ROOT ||
+					el.getType()==ANTLRParser.BANG )
+							{
+					el = (GrammarAST)el.getFirstChild();
+					}
+						if ( el.getType()==RULE_REF) {
+							grammar.defineRuleRefLabel(currentRuleName,id.getToken(),el);
+						}
+						else {
+							grammar.defineTokenRefLabel(currentRuleName,id.getToken(),el);
+						}
+						
+				}
+				break;
+			}
+			case PLUS_ASSIGN:
+			{
+				AST __t95 = _t;
+				GrammarAST tmp41_AST_in = (GrammarAST)_t;
+				match(_t,PLUS_ASSIGN);
+				_t = _t.getFirstChild();
+				id2 = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				a2 = _t==ASTNULL ? null : (GrammarAST)_t;
+				element(_t);
+				_t = _retTree;
+				if ( inputState.guessing==0 ) {
+					
+					if ( a2.getType()==ANTLRParser.ROOT ||
+					a2.getType()==ANTLRParser.BANG )
+					{
+					a2 = (GrammarAST)a2.getFirstChild();
+					}
+						    if ( a2.getType()==RULE_REF ) {
+						    	grammar.defineRuleListLabel(currentRuleName,id2.getToken(),a2);
+						    }
+						    else {
+						    	grammar.defineTokenListLabel(currentRuleName,id2.getToken(),a2);
+						    }
+						
+				}
+				_t = __t95;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			{
+				ebnf(_t);
+				_t = _retTree;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				tree(_t);
+				_t = _retTree;
+				break;
+			}
+			case SYNPRED:
+			{
+				AST __t96 = _t;
+				GrammarAST tmp42_AST_in = (GrammarAST)_t;
+				match(_t,SYNPRED);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t96;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ACTION:
+			{
+				act = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					act.outerAltNum = this.outerAltNum;
+							trackInlineAction(act);
+					
+				}
+				break;
+			}
+			case SEMPRED:
+			{
+				GrammarAST tmp43_AST_in = (GrammarAST)_t;
+				match(_t,SEMPRED);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					tmp43_AST_in.outerAltNum = this.outerAltNum;
+					trackInlineAction(tmp43_AST_in);
+					
+				}
+				break;
+			}
+			case SYN_SEMPRED:
+			{
+				GrammarAST tmp44_AST_in = (GrammarAST)_t;
+				match(_t,SYN_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BACKTRACK_SEMPRED:
+			{
+				GrammarAST tmp45_AST_in = (GrammarAST)_t;
+				match(_t,BACKTRACK_SEMPRED);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case GATED_SEMPRED:
+			{
+				GrammarAST tmp46_AST_in = (GrammarAST)_t;
+				match(_t,GATED_SEMPRED);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					tmp46_AST_in.outerAltNum = this.outerAltNum;
+					trackInlineAction(tmp46_AST_in);
+					
+				}
+				break;
+			}
+			case EPSILON:
+			{
+				GrammarAST tmp47_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void exceptionHandler(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t85 = _t;
+			GrammarAST tmp48_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_catch);
+			_t = _t.getFirstChild();
+			GrammarAST tmp49_AST_in = (GrammarAST)_t;
+			match(_t,ARG_ACTION);
+			_t = _t.getNextSibling();
+			GrammarAST tmp50_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t85;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				trackInlineAction(tmp50_AST_in);
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void finallyClause(AST _t) throws RecognitionException {
+		
+		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t87 = _t;
+			GrammarAST tmp51_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_finally);
+			_t = _t.getFirstChild();
+			GrammarAST tmp52_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t87;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				trackInlineAction(tmp52_AST_in);
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void atom(AST _t) throws RecognitionException {
+		
+		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST rr = null;
+		GrammarAST rarg = null;
+		GrammarAST t = null;
+		GrammarAST targ = null;
+		GrammarAST c = null;
+		GrammarAST s = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case RULE_REF:
+			{
+				AST __t115 = _t;
+				rr = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,RULE_REF);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ARG_ACTION:
+				{
+					rarg = (GrammarAST)_t;
+					match(_t,ARG_ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t115;
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					grammar.altReferencesRule(currentRuleName, rr, this.outerAltNum);
+							if ( rarg!=null ) {
+					rarg.outerAltNum = this.outerAltNum;
+					trackInlineAction(rarg);
+					}
+					
+				}
+				break;
+			}
+			case TOKEN_REF:
+			{
+				AST __t117 = _t;
+				t = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ARG_ACTION:
+				{
+					targ = (GrammarAST)_t;
+					match(_t,ARG_ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t117;
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+							if ( targ!=null ) {
+					targ.outerAltNum = this.outerAltNum;
+					trackInlineAction(targ);
+					}
+						if ( grammar.type==Grammar.LEXER ) {
+							grammar.altReferencesRule(currentRuleName, t, this.outerAltNum);
+						}
+						else {
+							grammar.altReferencesTokenID(currentRuleName, t, this.outerAltNum);
+						}
+						
+				}
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+						if ( grammar.type!=Grammar.LEXER ) {
+							Rule rule = grammar.getRule(currentRuleName);
+								if ( rule!=null ) {
+									rule.trackTokenReferenceInAlt(c, outerAltNum);
+							}
+						}
+						
+				}
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+						if ( grammar.type!=Grammar.LEXER ) {
+							Rule rule = grammar.getRule(currentRuleName);
+								if ( rule!=null ) {
+									rule.trackTokenReferenceInAlt(s, outerAltNum);
+							}
+						}
+						
+				}
+				break;
+			}
+			case WILDCARD:
+			{
+				GrammarAST tmp53_AST_in = (GrammarAST)_t;
+				match(_t,WILDCARD);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ebnf(AST _t) throws RecognitionException {
+		
+		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case BLOCK:
+			{
+				block(_t);
+				_t = _retTree;
+				break;
+			}
+			case OPTIONAL:
+			{
+				AST __t100 = _t;
+				GrammarAST tmp54_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t100;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+				boolean synPredMatched99 = false;
+				if (_t==null) _t=ASTNULL;
+				if (((_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE))) {
+					AST __t99 = _t;
+					synPredMatched99 = true;
+					inputState.guessing++;
+					try {
+						{
+						dotLoop(_t);
+						_t = _retTree;
+						}
+					}
+					catch (RecognitionException pe) {
+						synPredMatched99 = false;
+					}
+					_t = __t99;
+inputState.guessing--;
+				}
+				if ( synPredMatched99 ) {
+					dotLoop(_t);
+					_t = _retTree;
+				}
+				else if ((_t.getType()==CLOSURE)) {
+					AST __t101 = _t;
+					GrammarAST tmp55_AST_in = (GrammarAST)_t;
+					match(_t,CLOSURE);
+					_t = _t.getFirstChild();
+					block(_t);
+					_t = _retTree;
+					_t = __t101;
+					_t = _t.getNextSibling();
+				}
+				else if ((_t.getType()==POSITIVE_CLOSURE)) {
+					AST __t102 = _t;
+					GrammarAST tmp56_AST_in = (GrammarAST)_t;
+					match(_t,POSITIVE_CLOSURE);
+					_t = _t.getFirstChild();
+					block(_t);
+					_t = _retTree;
+					_t = __t102;
+					_t = _t.getNextSibling();
+				}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void tree(AST _t) throws RecognitionException {
+		
+		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t111 = _t;
+			GrammarAST tmp57_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			element(_t);
+			_t = _retTree;
+			{
+			_loop113:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
+					element(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop113;
+				}
+				
+			} while (true);
+			}
+			_t = __t111;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+/** Track the .* and .+ idioms and make them nongreedy by default.
+ */
+	public final void dotLoop(AST _t) throws RecognitionException {
+		
+		GrammarAST dotLoop_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		GrammarAST block = (GrammarAST)dotLoop_AST_in.getFirstChild();
+		
+		
+		try {      // for error handling
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case CLOSURE:
+			{
+				AST __t105 = _t;
+				GrammarAST tmp58_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				dotBlock(_t);
+				_t = _retTree;
+				_t = __t105;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				AST __t106 = _t;
+				GrammarAST tmp59_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				dotBlock(_t);
+				_t = _retTree;
+				_t = __t106;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			if ( inputState.guessing==0 ) {
+				
+				Map opts=new HashMap();
+				opts.put("greedy", "false");
+				if ( grammar.type!=Grammar.LEXER ) {
+				// parser grammars assume k=1 for .* loops
+				// otherwise they (analysis?) look til EOF!
+				opts.put("k", Utils.integer(1));
+				}
+				block.setOptions(grammar,opts);
+				
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void dotBlock(AST _t) throws RecognitionException {
+		
+		GrammarAST dotBlock_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t108 = _t;
+			GrammarAST tmp60_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			AST __t109 = _t;
+			GrammarAST tmp61_AST_in = (GrammarAST)_t;
+			match(_t,ALT);
+			_t = _t.getFirstChild();
+			GrammarAST tmp62_AST_in = (GrammarAST)_t;
+			match(_t,WILDCARD);
+			_t = _t.getNextSibling();
+			GrammarAST tmp63_AST_in = (GrammarAST)_t;
+			match(_t,EOA);
+			_t = _t.getNextSibling();
+			_t = __t109;
+			_t = _t.getNextSibling();
+			GrammarAST tmp64_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t108;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ast_suffix(AST _t) throws RecognitionException {
+		
+		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				GrammarAST tmp65_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				GrammarAST tmp66_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_alternative(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST a = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			if (((_t.getType()==ALT))&&(grammar.buildAST())) {
+				AST __t128 = _t;
+				a = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,ALT);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case OPTIONAL:
+				case CLOSURE:
+				case POSITIVE_CLOSURE:
+				case LABEL:
+				case ACTION:
+				case STRING_LITERAL:
+				case CHAR_LITERAL:
+				case TOKEN_REF:
+				case RULE_REF:
+				case TREE_BEGIN:
+				{
+					{
+					int _cnt131=0;
+					_loop131:
+					do {
+						if (_t==null) _t=ASTNULL;
+						if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
+							rewrite_element(_t);
+							_t = _retTree;
+						}
+						else {
+							if ( _cnt131>=1 ) { break _loop131; } else {throw new NoViableAltException(_t);}
+						}
+						
+						_cnt131++;
+					} while (true);
+					}
+					break;
+				}
+				case EPSILON:
+				{
+					GrammarAST tmp67_AST_in = (GrammarAST)_t;
+					match(_t,EPSILON);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				GrammarAST tmp68_AST_in = (GrammarAST)_t;
+				match(_t,EOA);
+				_t = _t.getNextSibling();
+				_t = __t128;
+				_t = _t.getNextSibling();
+			}
+			else if (((_t.getType()==ALT||_t.getType()==TEMPLATE||_t.getType()==ACTION))&&(grammar.buildTemplate())) {
+				rewrite_template(_t);
+				_t = _retTree;
+			}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_block(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		GrammarAST enclosingBlock = currentRewriteBlock;
+		if ( inputState.guessing==0 ) {  // don't do if guessing
+		currentRewriteBlock=rewrite_block_AST_in; // pts to BLOCK node
+		currentRewriteBlock.rewriteRefsShallow = new HashSet<GrammarAST>();
+		currentRewriteBlock.rewriteRefsDeep = new HashSet<GrammarAST>();
+		}
+		
+		
+		try {      // for error handling
+			AST __t126 = _t;
+			GrammarAST tmp69_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			rewrite_alternative(_t);
+			_t = _retTree;
+			GrammarAST tmp70_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t126;
+			_t = _t.getNextSibling();
+			if ( inputState.guessing==0 ) {
+				
+				// copy the element refs in this block to the surrounding block
+				if ( enclosingBlock!=null ) {
+				enclosingBlock.rewriteRefsDeep
+				.addAll(currentRewriteBlock.rewriteRefsShallow);
+				}
+				currentRewriteBlock = enclosingBlock; // restore old BLOCK ptr
+				
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_element(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LABEL:
+			case ACTION:
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			{
+				rewrite_atom(_t);
+				_t = _retTree;
+				break;
+			}
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			{
+				rewrite_ebnf(_t);
+				_t = _retTree;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				rewrite_tree(_t);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_template(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_template_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		GrammarAST ind = null;
+		GrammarAST arg = null;
+		GrammarAST a = null;
+		GrammarAST act = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ALT:
+			{
+				AST __t146 = _t;
+				GrammarAST tmp71_AST_in = (GrammarAST)_t;
+				match(_t,ALT);
+				_t = _t.getFirstChild();
+				GrammarAST tmp72_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				GrammarAST tmp73_AST_in = (GrammarAST)_t;
+				match(_t,EOA);
+				_t = _t.getNextSibling();
+				_t = __t146;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TEMPLATE:
+			{
+				AST __t147 = _t;
+				GrammarAST tmp74_AST_in = (GrammarAST)_t;
+				match(_t,TEMPLATE);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ID:
+				{
+					id = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case ACTION:
+				{
+					ind = (GrammarAST)_t;
+					match(_t,ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				AST __t149 = _t;
+				GrammarAST tmp75_AST_in = (GrammarAST)_t;
+				match(_t,ARGLIST);
+				_t = _t.getFirstChild();
+				{
+				_loop152:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==ARG)) {
+						AST __t151 = _t;
+						GrammarAST tmp76_AST_in = (GrammarAST)_t;
+						match(_t,ARG);
+						_t = _t.getFirstChild();
+						arg = (GrammarAST)_t;
+						match(_t,ID);
+						_t = _t.getNextSibling();
+						a = (GrammarAST)_t;
+						match(_t,ACTION);
+						_t = _t.getNextSibling();
+						_t = __t151;
+						_t = _t.getNextSibling();
+						if ( inputState.guessing==0 ) {
+							
+							a.outerAltNum = this.outerAltNum;
+							trackInlineAction(a);
+							
+						}
+					}
+					else {
+						break _loop152;
+					}
+					
+				} while (true);
+				}
+				_t = __t149;
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					if ( ind!=null ) {
+					ind.outerAltNum = this.outerAltNum;
+					trackInlineAction(ind);
+					}
+					
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case DOUBLE_QUOTE_STRING_LITERAL:
+				{
+					GrammarAST tmp77_AST_in = (GrammarAST)_t;
+					match(_t,DOUBLE_QUOTE_STRING_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case DOUBLE_ANGLE_STRING_LITERAL:
+				{
+					GrammarAST tmp78_AST_in = (GrammarAST)_t;
+					match(_t,DOUBLE_ANGLE_STRING_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t147;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ACTION:
+			{
+				act = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					act.outerAltNum = this.outerAltNum;
+					trackInlineAction(act);
+					
+				}
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_atom(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST arg = null;
+		
+		Rule r = grammar.getRule(currentRuleName);
+		Set tokenRefsInAlt = r.getTokenRefsInAlt(outerAltNum);
+		boolean imaginary =
+		rewrite_atom_AST_in.getType()==TOKEN_REF &&
+		!tokenRefsInAlt.contains(rewrite_atom_AST_in.getText());
+		if ( !imaginary && grammar.buildAST() &&
+		(rewrite_atom_AST_in.getType()==RULE_REF ||
+		rewrite_atom_AST_in.getType()==LABEL ||
+		rewrite_atom_AST_in.getType()==TOKEN_REF ||
+		rewrite_atom_AST_in.getType()==CHAR_LITERAL ||
+		rewrite_atom_AST_in.getType()==STRING_LITERAL) )
+		{
+		// track per block and for entire rewrite rule
+		if ( currentRewriteBlock!=null ) {
+		currentRewriteBlock.rewriteRefsShallow.add(rewrite_atom_AST_in);
+		currentRewriteBlock.rewriteRefsDeep.add(rewrite_atom_AST_in);
+		}
+		currentRewriteRule.rewriteRefsDeep.add(rewrite_atom_AST_in);
+		}
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case RULE_REF:
+			{
+				GrammarAST tmp79_AST_in = (GrammarAST)_t;
+				match(_t,RULE_REF);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			{
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case TOKEN_REF:
+				{
+					AST __t143 = _t;
+					GrammarAST tmp80_AST_in = (GrammarAST)_t;
+					match(_t,TOKEN_REF);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case ARG_ACTION:
+					{
+						arg = (GrammarAST)_t;
+						match(_t,ARG_ACTION);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t143;
+					_t = _t.getNextSibling();
+					break;
+				}
+				case CHAR_LITERAL:
+				{
+					GrammarAST tmp81_AST_in = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case STRING_LITERAL:
+				{
+					GrammarAST tmp82_AST_in = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getNextSibling();
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				if ( inputState.guessing==0 ) {
+					
+					if ( arg!=null ) {
+					arg.outerAltNum = this.outerAltNum;
+					trackInlineAction(arg);
+					}
+					
+				}
+				break;
+			}
+			case LABEL:
+			{
+				GrammarAST tmp83_AST_in = (GrammarAST)_t;
+				match(_t,LABEL);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp84_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				if ( inputState.guessing==0 ) {
+					
+					tmp84_AST_in.outerAltNum = this.outerAltNum;
+					trackInlineAction(tmp84_AST_in);
+					
+				}
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_ebnf(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONAL:
+			{
+				AST __t134 = _t;
+				GrammarAST tmp85_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				rewrite_block(_t);
+				_t = _retTree;
+				_t = __t134;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case CLOSURE:
+			{
+				AST __t135 = _t;
+				GrammarAST tmp86_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				rewrite_block(_t);
+				_t = _retTree;
+				_t = __t135;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				AST __t136 = _t;
+				GrammarAST tmp87_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				rewrite_block(_t);
+				_t = _retTree;
+				_t = __t136;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rewrite_tree(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t138 = _t;
+			GrammarAST tmp88_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			rewrite_atom(_t);
+			_t = _retTree;
+			{
+			_loop140:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
+					rewrite_element(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop140;
+				}
+				
+			} while (true);
+			}
+			_t = __t138;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			if (inputState.guessing==0) {
+				reportError(ex);
+				if (_t!=null) {_t = _t.getNextSibling();}
+			} else {
+			  throw ex;
+			}
+		}
+		_retTree = _t;
+	}
+	
+	
+	public static final String[] _tokenNames = {
+		"<0>",
+		"EOF",
+		"<2>",
+		"NULL_TREE_LOOKAHEAD",
+		"\"options\"",
+		"\"tokens\"",
+		"\"parser\"",
+		"LEXER",
+		"RULE",
+		"BLOCK",
+		"OPTIONAL",
+		"CLOSURE",
+		"POSITIVE_CLOSURE",
+		"SYNPRED",
+		"RANGE",
+		"CHAR_RANGE",
+		"EPSILON",
+		"ALT",
+		"EOR",
+		"EOB",
+		"EOA",
+		"ID",
+		"ARG",
+		"ARGLIST",
+		"RET",
+		"LEXER_GRAMMAR",
+		"PARSER_GRAMMAR",
+		"TREE_GRAMMAR",
+		"COMBINED_GRAMMAR",
+		"INITACTION",
+		"LABEL",
+		"TEMPLATE",
+		"\"scope\"",
+		"GATED_SEMPRED",
+		"SYN_SEMPRED",
+		"BACKTRACK_SEMPRED",
+		"\"fragment\"",
+		"ACTION",
+		"DOC_COMMENT",
+		"SEMI",
+		"\"lexer\"",
+		"\"tree\"",
+		"\"grammar\"",
+		"AMPERSAND",
+		"COLON",
+		"RCURLY",
+		"ASSIGN",
+		"STRING_LITERAL",
+		"CHAR_LITERAL",
+		"INT",
+		"STAR",
+		"TOKEN_REF",
+		"\"protected\"",
+		"\"public\"",
+		"\"private\"",
+		"BANG",
+		"ARG_ACTION",
+		"\"returns\"",
+		"\"throws\"",
+		"COMMA",
+		"LPAREN",
+		"OR",
+		"RPAREN",
+		"\"catch\"",
+		"\"finally\"",
+		"PLUS_ASSIGN",
+		"SEMPRED",
+		"IMPLIES",
+		"ROOT",
+		"RULE_REF",
+		"NOT",
+		"TREE_BEGIN",
+		"QUESTION",
+		"PLUS",
+		"WILDCARD",
+		"REWRITE",
+		"DOLLAR",
+		"DOUBLE_QUOTE_STRING_LITERAL",
+		"DOUBLE_ANGLE_STRING_LITERAL",
+		"WS",
+		"COMMENT",
+		"SL_COMMENT",
+		"ML_COMMENT",
+		"OPEN_ELEMENT_OPTION",
+		"CLOSE_ELEMENT_OPTION",
+		"ESC",
+		"DIGIT",
+		"XDIGIT",
+		"NESTED_ARG_ACTION",
+		"NESTED_ACTION",
+		"ACTION_CHAR_LITERAL",
+		"ACTION_STRING_LITERAL",
+		"ACTION_ESC",
+		"WS_LOOP",
+		"INTERNAL_RULE_REF",
+		"WS_OPT",
+		"SRC"
+	};
+	
+	}
+	
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalker.smap b/src/org/antlr/tool/DefineGrammarItemsWalker.smap
new file mode 100644
index 0000000..5ce8cf0
--- /dev/null
+++ b/src/org/antlr/tool/DefineGrammarItemsWalker.smap
@@ -0,0 +1,2248 @@
+SMAP
+DefineGrammarItemsWalker.java
+G
+*S G
+*F
++ 0 define.g
+define.g
+*L
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+1:32
+43:50
+44:51
+45:52
+46:53
+47:54
+48:55
+49:56
+51:58
+52:59
+53:60
+54:61
+55:62
+56:63
+57:64
+58:65
+59:66
+60:67
+61:68
+62:69
+63:70
+64:71
+65:72
+67:74
+68:75
+69:76
+71:78
+72:79
+73:80
+74:81
+75:82
+76:83
+77:84
+78:85
+79:86
+80:87
+81:88
+82:89
+83:90
+84:91
+85:92
+86:93
+87:94
+88:95
+89:96
+90:97
+91:98
+92:99
+93:100
+94:101
+95:102
+96:103
+97:104
+98:105
+99:106
+100:107
+101:108
+102:109
+103:110
+105:112
+106:113
+107:114
+108:115
+109:116
+110:117
+114:123
+114:124
+114:125
+114:133
+114:206
+114:207
+114:208
+114:209
+114:210
+114:211
+114:212
+114:213
+114:214
+114:215
+114:216
+115:129
+116:130
+119:135
+119:136
+119:137
+119:138
+119:139
+119:140
+119:141
+119:142
+119:143
+119:144
+119:146
+119:147
+119:148
+119:149
+119:197
+119:198
+119:199
+119:200
+119:201
+120:152
+120:153
+120:154
+120:155
+120:156
+120:157
+120:158
+120:159
+120:161
+120:162
+120:163
+120:164
+121:167
+121:168
+121:169
+121:170
+121:171
+121:172
+121:173
+121:174
+121:176
+121:177
+121:178
+121:179
+122:182
+122:183
+122:184
+122:185
+122:186
+122:187
+122:188
+122:189
+122:191
+122:192
+122:193
+122:194
+124:203
+124:204
+127:349
+127:355
+127:375
+127:376
+127:377
+127:378
+127:379
+127:380
+127:381
+127:382
+127:383
+127:384
+127:385
+128:352
+128:353
+128:356
+128:357
+128:358
+128:359
+128:360
+128:361
+128:362
+128:363
+128:364
+128:365
+128:366
+128:367
+129:368
+130:370
+131:371
+132:372
+136:218
+136:228
+136:337
+136:338
+136:339
+136:340
+136:341
+136:342
+136:343
+136:344
+136:345
+136:346
+136:347
+137:224
+138:225
+141:221
+141:229
+141:230
+141:231
+142:222
+142:233
+142:234
+142:235
+142:236
+142:237
+142:238
+142:239
+142:250
+142:251
+142:252
+142:253
+142:254
+144:257
+144:258
+144:259
+144:260
+144:261
+144:262
+144:275
+144:276
+144:277
+144:278
+144:279
+145:264
+145:265
+147:282
+147:283
+147:284
+147:285
+147:286
+147:287
+147:296
+147:297
+147:298
+147:299
+147:300
+148:302
+148:303
+148:304
+148:305
+148:306
+148:307
+148:308
+148:309
+148:310
+148:311
+148:312
+148:314
+148:315
+149:317
+149:318
+149:319
+149:320
+149:321
+149:322
+149:329
+149:330
+149:331
+149:332
+149:333
+150:335
+150:336
+153:446
+153:450
+153:467
+153:468
+153:469
+153:470
+153:471
+153:472
+153:473
+153:474
+153:475
+153:476
+153:477
+154:452
+154:453
+154:454
+154:455
+154:456
+154:457
+154:458
+154:459
+154:460
+154:461
+154:462
+154:464
+154:465
+154:466
+157:512
+157:525
+157:572
+157:573
+157:574
+157:575
+157:576
+157:577
+157:578
+157:579
+157:580
+157:581
+157:582
+158:521
+159:522
+162:515
+162:516
+162:526
+162:527
+162:528
+162:529
+162:530
+162:531
+162:532
+162:565
+162:566
+163:517
+163:518
+163:534
+163:535
+163:536
+163:537
+163:538
+163:539
+163:540
+163:541
+163:542
+163:543
+163:559
+163:560
+163:561
+163:562
+163:563
+164:544
+164:545
+165:519
+165:549
+165:550
+165:551
+165:552
+165:553
+166:554
+166:555
+169:567
+170:569
+174:387
+174:391
+174:395
+174:396
+174:397
+174:398
+174:399
+174:400
+174:401
+174:402
+174:403
+174:404
+174:405
+175:392
+175:393
+175:394
+178:407
+178:411
+178:434
+178:435
+178:436
+178:437
+178:438
+178:439
+178:440
+178:441
+178:442
+178:443
+178:444
+179:412
+179:413
+179:414
+179:415
+179:417
+179:418
+179:419
+179:420
+179:421
+179:422
+179:423
+179:424
+179:425
+179:426
+179:427
+179:429
+179:430
+179:431
+179:432
+179:433
+182:584
+182:592
+182:593
+182:594
+182:638
+182:639
+182:640
+182:641
+182:642
+182:643
+182:644
+182:645
+182:646
+182:647
+182:648
+182:649
+182:650
+182:651
+182:652
+182:653
+183:587
+183:595
+183:596
+183:597
+183:598
+183:599
+184:602
+184:603
+184:604
+184:605
+184:606
+184:607
+184:634
+184:635
+185:588
+185:608
+185:609
+185:610
+186:589
+186:612
+186:613
+186:614
+186:615
+186:616
+186:617
+186:618
+186:628
+186:629
+186:630
+186:631
+186:632
+187:590
+187:621
+187:622
+187:623
+187:624
+187:625
+192:479
+192:483
+192:500
+192:501
+192:502
+192:503
+192:504
+192:505
+192:506
+192:507
+192:508
+192:509
+192:510
+193:485
+193:486
+193:487
+193:488
+193:489
+193:490
+193:491
+193:492
+193:493
+193:494
+193:495
+193:497
+193:498
+193:499
+196:655
+196:669
+196:872
+196:873
+196:874
+196:875
+196:876
+196:877
+196:878
+196:879
+196:880
+196:881
+196:882
+197:663
+198:664
+199:665
+200:666
+203:658
+203:670
+203:671
+203:672
+203:673
+203:674
+203:675
+203:676
+203:677
+203:678
+203:870
+203:871
+204:681
+204:682
+204:683
+204:684
+204:685
+204:686
+204:687
+204:688
+204:689
+204:696
+204:697
+204:698
+204:699
+204:700
+205:659
+205:702
+205:703
+205:704
+205:705
+205:707
+205:708
+205:709
+205:710
+205:711
+205:712
+205:713
+205:720
+205:721
+205:722
+205:723
+205:724
+205:726
+205:727
+206:660
+206:728
+206:729
+206:730
+206:731
+206:733
+206:734
+206:735
+206:736
+206:737
+206:738
+206:739
+206:746
+206:747
+206:748
+206:749
+206:750
+206:752
+206:753
+207:755
+207:756
+207:757
+207:758
+207:759
+207:760
+207:769
+207:770
+207:771
+207:772
+207:773
+208:775
+209:777
+210:778
+211:779
+212:780
+213:781
+214:782
+215:783
+216:784
+217:785
+218:786
+219:787
+220:788
+221:789
+222:790
+223:791
+224:792
+225:793
+226:794
+227:795
+228:796
+229:797
+231:801
+231:802
+231:803
+231:804
+231:805
+231:806
+231:814
+231:815
+231:816
+231:817
+231:818
+232:820
+232:821
+232:822
+232:823
+232:824
+232:825
+232:826
+232:827
+232:828
+232:829
+232:830
+232:832
+232:833
+233:834
+233:835
+234:661
+234:837
+234:838
+234:839
+235:841
+235:842
+235:843
+235:844
+235:845
+235:846
+235:847
+235:854
+235:855
+235:856
+235:857
+235:858
+236:860
+236:861
+236:862
+237:863
+238:865
+239:866
+240:867
+245:1219
+245:1220
+245:1225
+245:1408
+245:1409
+245:1410
+245:1411
+245:1412
+245:1413
+245:1414
+245:1415
+245:1416
+245:1417
+245:1418
+245:1419
+246:1223
+246:1226
+246:1227
+246:1228
+246:1229
+246:1230
+246:1231
+246:1232
+246:1234
+246:1235
+246:1236
+246:1237
+246:1238
+246:1239
+246:1240
+246:1241
+246:1242
+246:1249
+246:1250
+246:1251
+246:1252
+246:1253
+246:1255
+246:1256
+246:1257
+246:1258
+246:1259
+246:1260
+246:1262
+246:1263
+246:1264
+246:1265
+246:1266
+246:1267
+246:1268
+246:1277
+246:1278
+246:1279
+246:1280
+246:1281
+246:1284
+246:1285
+246:1286
+246:1287
+246:1288
+246:1289
+246:1290
+246:1298
+246:1299
+246:1300
+246:1301
+246:1302
+246:1304
+246:1305
+246:1306
+246:1307
+246:1308
+246:1309
+246:1310
+246:1311
+246:1312
+246:1313
+246:1314
+246:1315
+246:1317
+246:1318
+246:1406
+246:1407
+247:1319
+247:1320
+247:1321
+247:1322
+247:1324
+247:1325
+247:1326
+247:1327
+247:1328
+247:1329
+247:1330
+247:1337
+247:1338
+247:1339
+247:1340
+247:1341
+247:1344
+247:1345
+247:1346
+247:1347
+247:1348
+247:1349
+247:1350
+247:1351
+247:1352
+247:1353
+247:1354
+247:1355
+247:1356
+247:1357
+247:1358
+247:1359
+247:1360
+247:1361
+247:1362
+247:1363
+247:1365
+247:1366
+247:1367
+247:1368
+247:1370
+247:1371
+247:1372
+247:1373
+247:1375
+247:1376
+247:1377
+247:1378
+247:1379
+247:1380
+247:1381
+247:1382
+248:1384
+248:1385
+248:1386
+248:1387
+248:1388
+248:1389
+248:1390
+248:1397
+248:1398
+248:1399
+248:1400
+248:1401
+249:1403
+249:1404
+249:1405
+253:1025
+253:1026
+253:1027
+253:1034
+253:1050
+253:1051
+253:1052
+253:1053
+253:1054
+253:1055
+253:1056
+253:1057
+253:1058
+253:1059
+253:1060
+254:1030
+254:1031
+254:1032
+254:1035
+254:1036
+254:1037
+254:1038
+254:1039
+254:1040
+254:1041
+254:1042
+254:1043
+254:1044
+254:1045
+254:1046
+254:1047
+254:1048
+257:884
+257:885
+257:892
+257:893
+257:894
+257:923
+257:924
+257:925
+257:926
+257:927
+257:928
+257:929
+257:930
+257:931
+257:932
+257:933
+257:934
+257:935
+257:936
+257:937
+257:938
+257:939
+258:889
+261:895
+261:896
+261:897
+261:898
+261:899
+262:902
+262:903
+262:904
+262:905
+262:906
+263:909
+263:910
+263:911
+263:912
+263:913
+264:916
+264:917
+264:918
+264:919
+264:920
+267:941
+267:942
+267:943
+267:949
+267:1013
+267:1014
+267:1015
+267:1016
+267:1017
+267:1018
+267:1019
+267:1020
+267:1021
+267:1022
+267:1023
+268:950
+268:951
+268:952
+268:953
+268:1011
+268:1012
+269:946
+269:955
+269:956
+269:957
+269:958
+269:959
+269:960
+269:961
+269:976
+269:977
+269:978
+269:979
+269:980
+270:962
+271:964
+272:965
+273:966
+276:947
+276:982
+276:983
+276:984
+276:985
+276:986
+276:987
+276:988
+276:989
+276:1004
+276:1005
+276:1006
+276:1007
+276:1009
+276:1010
+277:990
+278:992
+279:993
+280:994
+281:995
+282:996
+283:997
+284:998
+285:999
+286:1000
+287:1001
+293:1062
+293:1070
+293:1138
+293:1139
+293:1140
+293:1141
+293:1142
+293:1143
+293:1144
+293:1145
+293:1146
+293:1147
+293:1148
+294:1066
+295:1067
+298:1071
+298:1072
+298:1073
+298:1074
+298:1133
+298:1134
+299:1076
+299:1077
+299:1078
+299:1079
+299:1080
+299:1081
+299:1089
+299:1090
+299:1091
+299:1092
+299:1093
+300:1095
+300:1096
+300:1097
+300:1098
+300:1099
+300:1100
+300:1101
+300:1102
+300:1103
+300:1104
+300:1105
+300:1107
+300:1108
+301:1110
+301:1111
+301:1112
+301:1113
+301:1114
+301:1115
+301:1116
+301:1117
+301:1118
+301:1122
+301:1123
+301:1124
+301:1125
+301:1127
+301:1128
+301:1129
+302:1119
+302:1120
+304:1130
+304:1131
+304:1132
+306:1135
+306:1136
+310:1421
+310:1428
+310:1441
+310:1442
+310:1443
+310:1444
+310:1445
+310:1446
+310:1447
+310:1448
+310:1449
+310:1450
+310:1451
+311:1424
+311:1425
+311:1426
+311:1429
+311:1430
+311:1431
+311:1432
+311:1433
+311:1434
+311:1435
+311:1436
+311:1437
+311:1438
+311:1439
+311:1440
+314:1453
+314:1469
+314:1495
+314:1496
+314:1497
+314:1498
+314:1499
+314:1500
+314:1501
+314:1502
+314:1503
+314:1504
+314:1505
+315:1457
+316:1458
+317:1459
+318:1460
+319:1461
+320:1462
+321:1463
+322:1464
+323:1465
+324:1466
+327:1470
+327:1471
+327:1472
+327:1473
+327:1475
+327:1476
+327:1477
+327:1478
+327:1479
+327:1480
+327:1481
+327:1482
+327:1483
+327:1484
+327:1485
+327:1487
+327:1488
+327:1489
+327:1490
+327:1491
+327:1492
+327:1493
+327:1494
+330:1150
+330:1154
+330:1155
+330:1156
+330:1202
+330:1203
+330:1204
+330:1205
+330:1206
+330:1207
+330:1208
+330:1209
+330:1210
+330:1211
+330:1212
+330:1213
+330:1214
+330:1215
+330:1216
+330:1217
+331:1157
+331:1158
+331:1160
+331:1161
+331:1162
+331:1163
+331:1164
+331:1165
+331:1166
+331:1167
+331:1168
+331:1169
+331:1170
+331:1172
+331:1173
+331:1174
+331:1176
+331:1177
+331:1178
+331:1179
+331:1180
+331:1181
+331:1188
+331:1189
+331:1190
+331:1191
+331:1192
+332:1196
+332:1197
+332:1198
+332:1199
+335:1833
+335:1837
+335:1853
+335:1854
+335:1855
+335:1856
+335:1857
+335:1858
+335:1859
+335:1860
+335:1861
+335:1862
+335:1863
+336:1838
+336:1839
+336:1840
+336:1841
+336:1842
+336:1843
+336:1844
+336:1845
+336:1846
+336:1847
+336:1848
+336:1849
+336:1850
+336:1851
+339:1865
+339:1869
+339:1882
+339:1883
+339:1884
+339:1885
+339:1886
+339:1887
+339:1888
+339:1889
+339:1890
+339:1891
+339:1892
+340:1870
+340:1871
+340:1872
+340:1873
+340:1874
+340:1875
+340:1876
+340:1877
+340:1878
+340:1879
+340:1880
+343:1581
+343:1590
+343:1591
+343:1592
+343:1816
+343:1817
+343:1818
+343:1819
+343:1820
+343:1821
+343:1822
+343:1823
+343:1824
+343:1825
+343:1826
+343:1827
+343:1828
+343:1829
+343:1830
+343:1831
+344:1593
+344:1594
+344:1595
+344:1596
+344:1597
+344:1598
+344:1599
+344:1600
+344:1601
+344:1602
+345:1605
+345:1606
+345:1607
+345:1608
+345:1609
+345:1610
+345:1611
+345:1612
+345:1613
+345:1614
+346:1617
+346:1618
+346:1619
+346:1620
+346:1621
+346:1622
+346:1623
+346:1624
+347:1627
+347:1628
+347:1629
+347:1630
+347:1631
+347:1632
+347:1633
+347:1634
+347:1635
+347:1636
+348:1639
+348:1640
+348:1641
+348:1642
+348:1643
+348:1644
+348:1645
+348:1646
+348:1647
+348:1648
+348:1649
+348:1650
+349:1653
+349:1654
+349:1655
+349:1656
+349:1657
+349:1658
+349:1659
+349:1660
+349:1661
+349:1662
+349:1663
+349:1664
+350:1584
+350:1585
+350:1667
+350:1668
+350:1669
+350:1670
+350:1671
+350:1672
+350:1673
+350:1674
+350:1675
+350:1676
+350:1677
+350:1678
+350:1679
+350:1680
+351:1681
+352:1683
+353:1684
+354:1685
+355:1686
+356:1687
+357:1688
+358:1689
+359:1690
+360:1691
+361:1692
+362:1693
+364:1586
+364:1587
+364:1698
+364:1699
+364:1700
+364:1701
+364:1702
+364:1703
+364:1704
+364:1705
+364:1706
+364:1707
+364:1708
+364:1709
+364:1725
+364:1726
+365:1710
+366:1712
+367:1713
+368:1714
+369:1715
+370:1716
+371:1717
+372:1718
+373:1719
+374:1720
+375:1721
+376:1722
+379:1729
+379:1730
+379:1731
+379:1732
+379:1733
+379:1734
+379:1735
+380:1738
+380:1739
+380:1740
+380:1741
+381:1744
+381:1745
+381:1746
+381:1747
+381:1748
+381:1749
+381:1750
+381:1751
+381:1752
+381:1753
+382:1588
+382:1756
+382:1757
+382:1758
+382:1759
+382:1760
+383:1761
+384:1763
+385:1764
+387:1769
+387:1770
+387:1771
+387:1772
+387:1773
+388:1774
+389:1776
+390:1777
+392:1782
+392:1783
+392:1784
+392:1785
+392:1786
+393:1789
+393:1790
+393:1791
+393:1792
+393:1793
+394:1796
+394:1797
+394:1798
+394:1799
+394:1800
+395:1801
+396:1803
+397:1804
+399:1809
+399:1810
+399:1811
+399:1812
+399:1813
+402:2048
+402:2052
+402:2053
+402:2054
+402:2073
+402:2074
+402:2075
+402:2076
+402:2077
+402:2078
+402:2079
+402:2080
+402:2082
+402:2083
+402:2085
+402:2086
+402:2087
+402:2088
+402:2089
+402:2090
+402:2091
+402:2092
+402:2093
+402:2094
+402:2095
+402:2105
+402:2115
+402:2116
+402:2117
+402:2118
+402:2119
+402:2120
+402:2121
+402:2122
+402:2123
+402:2124
+402:2125
+402:2126
+402:2127
+402:2128
+402:2129
+402:2130
+403:2055
+403:2056
+403:2057
+403:2058
+404:2061
+404:2062
+404:2063
+404:2064
+404:2065
+404:2066
+404:2067
+404:2068
+404:2069
+404:2070
+405:2096
+405:2097
+405:2098
+405:2099
+405:2100
+405:2101
+405:2102
+405:2103
+405:2104
+406:2106
+406:2107
+406:2108
+406:2109
+406:2110
+406:2111
+406:2112
+406:2113
+406:2114
+411:2171
+411:2173
+411:2180
+411:2226
+411:2227
+411:2228
+411:2229
+411:2230
+411:2231
+411:2232
+411:2233
+411:2234
+411:2235
+411:2236
+412:2172
+412:2177
+415:2182
+415:2183
+415:2184
+415:2185
+415:2186
+415:2187
+415:2188
+415:2189
+415:2190
+415:2191
+415:2192
+415:2193
+415:2208
+415:2209
+415:2210
+415:2211
+415:2212
+416:2196
+416:2197
+416:2198
+416:2199
+416:2200
+416:2201
+416:2202
+416:2203
+416:2204
+416:2205
+418:2214
+419:2216
+420:2217
+421:2218
+422:2219
+423:2220
+424:2221
+425:2222
+426:2223
+430:2238
+430:2242
+430:2264
+430:2265
+430:2266
+430:2267
+430:2268
+430:2269
+430:2270
+430:2271
+430:2272
+430:2273
+430:2274
+431:2243
+431:2244
+431:2245
+431:2246
+431:2247
+431:2248
+431:2249
+431:2250
+431:2251
+431:2252
+431:2253
+431:2254
+431:2255
+431:2256
+431:2257
+431:2258
+431:2259
+431:2260
+431:2261
+431:2262
+431:2263
+434:2132
+434:2136
+434:2137
+434:2138
+434:2139
+434:2140
+434:2141
+434:2142
+434:2143
+434:2144
+434:2145
+434:2146
+434:2147
+434:2148
+434:2149
+434:2150
+434:2151
+434:2152
+434:2153
+434:2155
+434:2156
+434:2157
+434:2158
+434:2159
+434:2160
+434:2161
+434:2162
+434:2163
+434:2164
+434:2165
+434:2166
+434:2167
+434:2168
+434:2169
+437:1894
+437:1904
+437:1905
+437:1906
+437:2031
+437:2032
+437:2033
+437:2034
+437:2035
+437:2036
+437:2037
+437:2038
+437:2039
+437:2040
+437:2041
+437:2042
+437:2043
+437:2044
+437:2045
+437:2046
+438:1897
+438:1898
+438:1907
+438:1908
+438:1909
+438:1910
+438:1911
+438:1912
+438:1914
+438:1915
+438:1916
+438:1917
+438:1918
+438:1919
+438:1920
+438:1927
+438:1928
+438:1929
+438:1930
+438:1931
+438:1933
+438:1934
+439:1935
+440:1937
+441:1938
+442:1939
+443:1940
+444:1941
+446:1899
+446:1900
+446:1946
+446:1947
+446:1948
+446:1949
+446:1950
+446:1951
+446:1953
+446:1954
+446:1955
+446:1956
+446:1957
+446:1958
+446:1959
+446:1966
+446:1967
+446:1968
+446:1969
+446:1970
+446:1972
+446:1973
+447:1974
+448:1976
+449:1977
+450:1978
+451:1979
+452:1980
+453:1981
+454:1982
+455:1983
+456:1984
+457:1985
+459:1901
+459:1990
+459:1991
+459:1992
+459:1993
+459:1994
+460:1995
+461:1997
+462:1998
+463:1999
+464:2000
+465:2001
+466:2002
+468:1902
+468:2007
+468:2008
+468:2009
+468:2010
+468:2011
+469:2012
+470:2014
+471:2015
+472:2016
+473:2017
+474:2018
+475:2019
+477:2024
+477:2025
+477:2026
+477:2027
+477:2028
+480:2276
+480:2280
+480:2281
+480:2282
+480:2297
+480:2298
+480:2299
+480:2300
+480:2301
+480:2302
+480:2303
+480:2304
+480:2305
+480:2306
+480:2307
+480:2308
+480:2309
+480:2310
+480:2311
+480:2312
+481:2283
+481:2284
+481:2285
+481:2286
+481:2287
+482:2290
+482:2291
+482:2292
+482:2293
+482:2294
+485:1507
+485:1518
+485:1569
+485:1570
+485:1571
+485:1572
+485:1573
+485:1574
+485:1575
+485:1576
+485:1577
+485:1578
+485:1579
+486:1512
+487:1513
+488:1514
+489:1515
+492:1519
+492:1520
+492:1521
+492:1562
+492:1563
+492:1564
+492:1565
+492:1567
+492:1568
+493:1510
+493:1522
+493:1523
+493:1524
+493:1525
+493:1526
+493:1527
+493:1529
+493:1530
+493:1531
+493:1532
+493:1533
+493:1534
+493:1535
+493:1544
+493:1545
+493:1546
+493:1547
+493:1548
+493:1550
+493:1551
+493:1552
+493:1553
+494:1554
+495:1556
+496:1557
+497:1558
+498:1559
+504:2397
+504:2409
+504:2431
+504:2432
+504:2433
+504:2434
+504:2435
+504:2436
+504:2437
+504:2438
+504:2439
+504:2440
+504:2441
+505:2401
+506:2402
+507:2403
+508:2404
+509:2405
+510:2406
+513:2410
+513:2411
+513:2412
+513:2413
+513:2414
+513:2415
+513:2416
+513:2417
+513:2418
+513:2419
+513:2420
+515:2421
+516:2423
+517:2424
+518:2425
+519:2426
+520:2427
+521:2428
+525:2314
+525:2319
+525:2376
+525:2380
+525:2381
+525:2382
+525:2383
+525:2385
+525:2386
+525:2387
+525:2388
+525:2389
+525:2390
+525:2391
+525:2392
+525:2393
+525:2394
+525:2395
+527:2317
+527:2320
+527:2321
+527:2322
+527:2323
+527:2324
+527:2325
+527:2327
+527:2328
+527:2329
+527:2330
+527:2331
+527:2332
+527:2333
+527:2334
+527:2335
+527:2336
+527:2337
+527:2338
+527:2339
+527:2341
+527:2342
+527:2343
+527:2344
+527:2345
+527:2346
+527:2347
+527:2348
+527:2349
+527:2350
+527:2351
+527:2353
+527:2354
+527:2355
+527:2358
+527:2359
+527:2360
+527:2361
+527:2362
+527:2365
+527:2366
+527:2367
+527:2368
+527:2369
+527:2371
+527:2372
+527:2373
+527:2374
+527:2375
+528:2377
+528:2378
+528:2379
+531:2443
+531:2447
+531:2448
+531:2449
+531:2475
+531:2476
+531:2477
+531:2478
+531:2479
+531:2480
+531:2481
+531:2482
+531:2483
+531:2484
+531:2485
+531:2486
+531:2487
+531:2488
+531:2489
+531:2490
+532:2450
+532:2451
+532:2452
+532:2453
+532:2454
+532:2455
+532:2456
+532:2457
+532:2458
+533:2461
+533:2462
+533:2463
+533:2464
+533:2465
+533:2466
+534:2469
+534:2470
+534:2471
+534:2472
+537:2794
+537:2798
+537:2799
+537:2800
+537:2837
+537:2838
+537:2839
+537:2840
+537:2841
+537:2842
+537:2843
+537:2844
+537:2845
+537:2846
+537:2847
+537:2848
+537:2849
+537:2850
+537:2851
+537:2852
+538:2801
+538:2802
+538:2803
+538:2804
+538:2805
+538:2806
+538:2807
+538:2808
+538:2809
+538:2810
+539:2813
+539:2814
+539:2815
+539:2816
+539:2817
+539:2818
+539:2819
+539:2820
+539:2821
+539:2822
+540:2825
+540:2826
+540:2827
+540:2828
+540:2829
+540:2830
+540:2831
+540:2832
+540:2833
+540:2834
+543:2854
+543:2858
+543:2881
+543:2882
+543:2883
+543:2884
+543:2885
+543:2886
+543:2887
+543:2888
+543:2889
+543:2890
+543:2891
+544:2859
+544:2860
+544:2861
+544:2862
+544:2863
+544:2864
+544:2865
+544:2866
+544:2867
+544:2868
+544:2869
+544:2870
+544:2871
+544:2872
+544:2873
+544:2874
+544:2875
+544:2877
+544:2878
+544:2879
+544:2880
+547:2654
+547:2680
+547:2681
+547:2682
+547:2777
+547:2778
+547:2779
+547:2780
+547:2781
+547:2782
+547:2783
+547:2784
+547:2785
+547:2786
+547:2787
+547:2788
+547:2789
+547:2790
+547:2791
+547:2792
+548:2659
+549:2660
+550:2661
+551:2662
+552:2663
+553:2664
+554:2665
+555:2666
+556:2667
+557:2668
+558:2669
+559:2670
+560:2671
+561:2672
+562:2673
+563:2674
+564:2675
+565:2676
+566:2677
+569:2683
+569:2684
+569:2685
+569:2686
+569:2687
+570:2657
+570:2690
+570:2691
+570:2692
+570:2693
+570:2695
+570:2696
+570:2697
+570:2698
+570:2699
+570:2700
+570:2701
+570:2702
+570:2704
+570:2705
+570:2706
+570:2707
+570:2708
+570:2709
+570:2710
+570:2717
+570:2718
+570:2719
+570:2720
+570:2721
+570:2723
+570:2724
+570:2727
+570:2728
+570:2729
+570:2730
+570:2731
+570:2734
+570:2735
+570:2736
+570:2737
+570:2738
+570:2741
+570:2742
+570:2743
+570:2744
+570:2745
+571:2747
+572:2749
+573:2750
+574:2751
+575:2752
+578:2757
+578:2758
+578:2759
+578:2760
+578:2761
+580:2764
+580:2765
+580:2766
+580:2767
+580:2768
+581:2769
+582:2771
+583:2772
+587:2492
+587:2501
+587:2502
+587:2503
+587:2637
+587:2638
+587:2639
+587:2640
+587:2641
+587:2642
+587:2643
+587:2644
+587:2645
+587:2646
+587:2647
+587:2648
+587:2649
+587:2650
+587:2651
+587:2652
+588:2504
+588:2505
+588:2506
+588:2507
+588:2508
+588:2509
+588:2510
+588:2511
+588:2512
+588:2513
+588:2514
+588:2515
+588:2516
+588:2517
+589:2495
+589:2496
+589:2520
+589:2521
+589:2522
+589:2523
+589:2524
+589:2525
+589:2527
+589:2528
+589:2529
+589:2530
+589:2531
+589:2532
+589:2533
+589:2536
+589:2537
+589:2538
+589:2539
+589:2540
+589:2543
+589:2544
+589:2545
+589:2546
+589:2547
+589:2620
+589:2621
+590:2549
+590:2550
+590:2551
+590:2552
+590:2583
+590:2584
+591:2497
+591:2498
+591:2553
+591:2554
+591:2555
+591:2556
+591:2557
+591:2558
+591:2559
+591:2560
+591:2561
+591:2562
+591:2563
+591:2564
+591:2565
+591:2566
+591:2567
+591:2568
+591:2569
+591:2576
+591:2577
+591:2578
+591:2579
+591:2581
+591:2582
+592:2570
+593:2572
+594:2573
+598:2585
+599:2587
+600:2588
+601:2589
+602:2590
+605:2594
+605:2595
+605:2596
+605:2597
+605:2598
+605:2599
+605:2600
+605:2614
+605:2615
+605:2616
+605:2617
+605:2618
+606:2603
+606:2604
+606:2605
+606:2606
+606:2607
+610:2499
+610:2624
+610:2625
+610:2626
+610:2627
+610:2628
+611:2629
+612:2631
+613:2632
+*E
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.java b/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.java
new file mode 100644
index 0000000..e4b12fa
--- /dev/null
+++ b/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.java
@@ -0,0 +1,130 @@
+// $ANTLR 2.7.7 (2006-01-29): "define.g" -> "DefineGrammarItemsWalker.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.misc.*;
+
+public interface DefineGrammarItemsWalkerTokenTypes {
+	int EOF = 1;
+	int NULL_TREE_LOOKAHEAD = 3;
+	int OPTIONS = 4;
+	int TOKENS = 5;
+	int PARSER = 6;
+	int LEXER = 7;
+	int RULE = 8;
+	int BLOCK = 9;
+	int OPTIONAL = 10;
+	int CLOSURE = 11;
+	int POSITIVE_CLOSURE = 12;
+	int SYNPRED = 13;
+	int RANGE = 14;
+	int CHAR_RANGE = 15;
+	int EPSILON = 16;
+	int ALT = 17;
+	int EOR = 18;
+	int EOB = 19;
+	int EOA = 20;
+	int ID = 21;
+	int ARG = 22;
+	int ARGLIST = 23;
+	int RET = 24;
+	int LEXER_GRAMMAR = 25;
+	int PARSER_GRAMMAR = 26;
+	int TREE_GRAMMAR = 27;
+	int COMBINED_GRAMMAR = 28;
+	int INITACTION = 29;
+	int LABEL = 30;
+	int TEMPLATE = 31;
+	int SCOPE = 32;
+	int GATED_SEMPRED = 33;
+	int SYN_SEMPRED = 34;
+	int BACKTRACK_SEMPRED = 35;
+	int FRAGMENT = 36;
+	int ACTION = 37;
+	int DOC_COMMENT = 38;
+	int SEMI = 39;
+	int LITERAL_lexer = 40;
+	int LITERAL_tree = 41;
+	int LITERAL_grammar = 42;
+	int AMPERSAND = 43;
+	int COLON = 44;
+	int RCURLY = 45;
+	int ASSIGN = 46;
+	int STRING_LITERAL = 47;
+	int CHAR_LITERAL = 48;
+	int INT = 49;
+	int STAR = 50;
+	int TOKEN_REF = 51;
+	int LITERAL_protected = 52;
+	int LITERAL_public = 53;
+	int LITERAL_private = 54;
+	int BANG = 55;
+	int ARG_ACTION = 56;
+	int LITERAL_returns = 57;
+	int LITERAL_throws = 58;
+	int COMMA = 59;
+	int LPAREN = 60;
+	int OR = 61;
+	int RPAREN = 62;
+	int LITERAL_catch = 63;
+	int LITERAL_finally = 64;
+	int PLUS_ASSIGN = 65;
+	int SEMPRED = 66;
+	int IMPLIES = 67;
+	int ROOT = 68;
+	int RULE_REF = 69;
+	int NOT = 70;
+	int TREE_BEGIN = 71;
+	int QUESTION = 72;
+	int PLUS = 73;
+	int WILDCARD = 74;
+	int REWRITE = 75;
+	int DOLLAR = 76;
+	int DOUBLE_QUOTE_STRING_LITERAL = 77;
+	int DOUBLE_ANGLE_STRING_LITERAL = 78;
+	int WS = 79;
+	int COMMENT = 80;
+	int SL_COMMENT = 81;
+	int ML_COMMENT = 82;
+	int OPEN_ELEMENT_OPTION = 83;
+	int CLOSE_ELEMENT_OPTION = 84;
+	int ESC = 85;
+	int DIGIT = 86;
+	int XDIGIT = 87;
+	int NESTED_ARG_ACTION = 88;
+	int NESTED_ACTION = 89;
+	int ACTION_CHAR_LITERAL = 90;
+	int ACTION_STRING_LITERAL = 91;
+	int ACTION_ESC = 92;
+	int WS_LOOP = 93;
+	int INTERNAL_RULE_REF = 94;
+	int WS_OPT = 95;
+	int SRC = 96;
+}
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.txt b/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.txt
new file mode 100644
index 0000000..02b9fdc
--- /dev/null
+++ b/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.txt
@@ -0,0 +1,95 @@
+// $ANTLR 2.7.7 (2006-01-29): define.g -> DefineGrammarItemsWalkerTokenTypes.txt$
+DefineGrammarItemsWalker    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
diff --git a/src/org/antlr/tool/ErrorManager.java b/src/org/antlr/tool/ErrorManager.java
new file mode 100644
index 0000000..5941864
--- /dev/null
+++ b/src/org/antlr/tool/ErrorManager.java
@@ -0,0 +1,922 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.Token;
+import org.antlr.Tool;
+import org.antlr.misc.BitSet;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateErrorListener;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.util.*;
+
+/** Defines all the errors ANTLR can generator for both the tool and for
+ *  issues with a grammar.
+ *
+ *  Here is a list of language names:
+ *
+ *  http://ftp.ics.uci.edu/pub/ietf/http/related/iso639.txt
+ *
+ *  Here is a list of country names:
+ *
+ *  http://www.chemie.fu-berlin.de/diverse/doc/ISO_3166.html
+ *
+ *  I use constants not strings to identify messages as the compiler will
+ *  find any errors/mismatches rather than leaving a mistyped string in
+ *  the code to be found randomly in the future.  Further, Intellij can
+ *  do field name expansion to save me some typing.  I have to map
+ *  int constants to template names, however, which could introduce a mismatch.
+ *  Someone could provide a .stg file that had a template name wrong.  When
+ *  I load the group, then, I must verify that all messages are there.
+ *
+ *  This is essentially the functionality of the resource bundle stuff Java
+ *  has, but I don't want to load a property file--I want to load a template
+ *  group file and this is so simple, why mess with their junk.
+ *
+ *  I use the default Locale as defined by java to compute a group file name
+ *  in the org/antlr/tool/templates/messages dir called en_US.stg and so on.
+ *
+ *  Normally we want to use the default locale, but often a message file will
+ *  not exist for it so we must fall back on the US local.
+ *
+ *  During initialization of this class, all errors go straight to System.err.
+ *  There is no way around this.  If I have not set up the error system, how
+ *  can I do errors properly?  For example, if the string template group file
+ *  full of messages has an error, how could I print to anything but System.err?
+ *
+ *  TODO: how to map locale to a file encoding for the stringtemplate group file?
+ *  StringTemplate knows how to pay attention to the default encoding so it
+ *  should probably just work unless a GUI sets the local to some chinese
+ *  variation but System.getProperty("file.encoding") is US.  Hmm...
+ *
+ *  TODO: get antlr.g etc.. parsing errors to come here.
+ */
+public class ErrorManager {
+	// TOOL ERRORS
+	// file errors
+	public static final int MSG_CANNOT_WRITE_FILE = 1;
+	public static final int MSG_CANNOT_CLOSE_FILE = 2;
+	public static final int MSG_CANNOT_FIND_TOKENS_FILE = 3;
+	public static final int MSG_ERROR_READING_TOKENS_FILE = 4;
+	public static final int MSG_DIR_NOT_FOUND = 5;
+	public static final int MSG_OUTPUT_DIR_IS_FILE = 6;
+	public static final int MSG_CANNOT_OPEN_FILE = 7;
+	public static final int MSG_FILE_AND_GRAMMAR_NAME_DIFFER = 8;
+	public static final int MSG_FILENAME_EXTENSION_ERROR = 9;
+
+	public static final int MSG_INTERNAL_ERROR = 10;
+	public static final int MSG_INTERNAL_WARNING = 11;
+	public static final int MSG_ERROR_CREATING_ARTIFICIAL_RULE = 12;
+	public static final int MSG_TOKENS_FILE_SYNTAX_ERROR = 13;
+	public static final int MSG_CANNOT_GEN_DOT_FILE = 14;
+	public static final int MSG_BAD_AST_STRUCTURE = 15;
+	public static final int MSG_BAD_ACTION_AST_STRUCTURE = 16;
+
+	// code gen errors
+	public static final int MSG_MISSING_CODE_GEN_TEMPLATES = 20;
+	public static final int MSG_MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES = 21;
+	public static final int MSG_CODE_GEN_TEMPLATES_INCOMPLETE = 22;
+	public static final int MSG_CANNOT_CREATE_TARGET_GENERATOR = 23;
+	//public static final int MSG_CANNOT_COMPUTE_SAMPLE_INPUT_SEQ = 24;
+
+	// GRAMMAR ERRORS
+	public static final int MSG_SYNTAX_ERROR = 100;
+	public static final int MSG_RULE_REDEFINITION = 101;
+	public static final int MSG_LEXER_RULES_NOT_ALLOWED = 102;
+	public static final int MSG_PARSER_RULES_NOT_ALLOWED = 103;
+	public static final int MSG_CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL = 104;
+	public static final int MSG_NO_TOKEN_DEFINITION = 105;
+	public static final int MSG_UNDEFINED_RULE_REF = 106;
+	public static final int MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE = 107;
+	public static final int MSG_CANNOT_ALIAS_TOKENS_IN_LEXER = 108;
+	public static final int MSG_ATTRIBUTE_REF_NOT_IN_RULE = 111;
+	public static final int MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF = 112;
+	public static final int MSG_UNKNOWN_ATTRIBUTE_IN_SCOPE = 113;
+	public static final int MSG_UNKNOWN_SIMPLE_ATTRIBUTE = 114;
+	public static final int MSG_INVALID_RULE_PARAMETER_REF = 115;
+	public static final int MSG_UNKNOWN_RULE_ATTRIBUTE = 116;
+	public static final int MSG_ISOLATED_RULE_SCOPE = 117;
+	public static final int MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE = 118;
+	public static final int MSG_LABEL_CONFLICTS_WITH_RULE = 119;
+	public static final int MSG_LABEL_CONFLICTS_WITH_TOKEN = 120;
+	public static final int MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE = 121;
+	public static final int MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL = 122;
+	public static final int MSG_ATTRIBUTE_CONFLICTS_WITH_RULE = 123;
+	public static final int MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL = 124;
+	public static final int MSG_LABEL_TYPE_CONFLICT = 125;
+	public static final int MSG_ARG_RETVAL_CONFLICT = 126;
+	public static final int MSG_NONUNIQUE_REF = 127;
+	public static final int MSG_FORWARD_ELEMENT_REF = 128;
+	public static final int MSG_MISSING_RULE_ARGS = 129;
+	public static final int MSG_RULE_HAS_NO_ARGS = 130;
+	public static final int MSG_ARGS_ON_TOKEN_REF = 131;
+	public static final int MSG_AMBIGUOUS_RULE_SCOPE = 132;
+	public static final int MSG_ILLEGAL_OPTION = 133;
+	public static final int MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT = 134;
+	public static final int MSG_UNDEFINED_TOKEN_REF_IN_REWRITE = 135;
+	public static final int MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS = 136;
+	public static final int MSG_UNDEFINED_LABEL_REF_IN_REWRITE = 137;
+	public static final int MSG_NO_GRAMMAR_START_RULE = 138;
+	public static final int MSG_EMPTY_COMPLEMENT = 139;
+	public static final int MSG_UNKNOWN_DYNAMIC_SCOPE = 140;
+	public static final int MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE = 141;
+	public static final int MSG_ISOLATED_RULE_ATTRIBUTE = 142;
+	public static final int MSG_INVALID_ACTION_SCOPE = 143;
+	public static final int MSG_ACTION_REDEFINITION = 144;
+	public static final int MSG_DOUBLE_QUOTES_ILLEGAL = 145;
+	public static final int MSG_INVALID_TEMPLATE_ACTION = 146;
+	public static final int MSG_MISSING_ATTRIBUTE_NAME = 147;
+	public static final int MSG_ARG_INIT_VALUES_ILLEGAL = 148;
+	public static final int MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION = 149;
+	public static final int MSG_NO_RULES = 150;
+	public static final int MSG_WRITE_TO_READONLY_ATTR = 151;
+	public static final int MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR = 152;
+	public static final int MSG_REWRITE_FOR_MULTI_ELEMENT_ALT = 153;
+	public static final int MSG_RULE_INVALID_SET = 154;
+
+
+	// GRAMMAR WARNINGS
+	public static final int MSG_GRAMMAR_NONDETERMINISM = 200; // A predicts alts 1,2
+	public static final int MSG_UNREACHABLE_ALTS = 201;       // nothing predicts alt i
+	public static final int MSG_DANGLING_STATE = 202;        // no edges out of state
+	public static final int MSG_INSUFFICIENT_PREDICATES = 203;
+	public static final int MSG_DUPLICATE_SET_ENTRY = 204;    // (A|A)
+	public static final int MSG_ANALYSIS_ABORTED = 205;
+	public static final int MSG_RECURSION_OVERLOW = 206;
+	public static final int MSG_LEFT_RECURSION = 207;
+	public static final int MSG_UNREACHABLE_TOKENS = 208; // nothing predicts token
+	public static final int MSG_TOKEN_NONDETERMINISM = 209; // alts of Tokens rule
+	public static final int MSG_LEFT_RECURSION_CYCLES = 210;
+	public static final int MSG_NONREGULAR_DECISION = 211;
+
+
+	public static final int MAX_MESSAGE_NUMBER = 211;
+
+	/** Do not do perform analysis and code gen if one of these happens */
+	public static final BitSet ERRORS_FORCING_NO_ANALYSIS = new BitSet() {
+		{
+			add(MSG_RULE_REDEFINITION);
+			add(MSG_UNDEFINED_RULE_REF);
+			add(MSG_LEFT_RECURSION_CYCLES);
+			add(MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION);
+			add(MSG_NO_RULES);
+			// TODO: ...
+		}
+	};
+
+	/** Do not do perform analysis and code gen if one of these happens */
+	public static final BitSet ERRORS_FORCING_NO_CODEGEN = new BitSet() {
+		{
+			add(MSG_NONREGULAR_DECISION);
+			add(MSG_FILE_AND_GRAMMAR_NAME_DIFFER);
+			// TODO: ...
+		}
+	};
+
+	/** Only one error can be emitted for any entry in this table.
+	 *  Map<String,Set> where the key is a method name like danglingState.
+	 *  The set is whatever that method accepts or derives like a DFA.
+	 */
+	public static final Map emitSingleError = new HashMap() {
+		{
+			put("danglingState", new HashSet());
+		}
+	};
+
+
+	/** Messages should be sensitive to the locale. */
+	private static Locale locale;
+	private static String formatName;
+
+	/** Each thread might need it's own error listener; e.g., a GUI with
+	 *  multiple window frames holding multiple grammars.
+	 */
+	private static Map threadToListenerMap = new HashMap();
+
+	static class ErrorState {
+		public int errors;
+		public int warnings;
+		public int infos;
+		/** Track all msgIDs; we use to abort later if necessary
+		 *  also used in Message to find out what type of message it is via getMessageType()
+		 */
+		public BitSet errorMsgIDs = new BitSet();
+		public BitSet warningMsgIDs = new BitSet();
+		// TODO: figure out how to do info messages. these do not have IDs...kr
+		//public BitSet infoMsgIDs = new BitSet();
+	}
+
+	/** Track the number of errors regardless of the listener but track
+	 *  per thread.
+	 */
+	private static Map threadToErrorCountMap = new HashMap();
+
+	/** Each thread has its own ptr to a Tool object, which knows how
+	 *  to panic, for example.  In a GUI, the thread might just throw an Error
+	 *  to exit rather than the suicide System.exit.
+	 */
+	private static Map threadToToolMap = new HashMap();
+
+	/** The group of templates that represent all possible ANTLR errors. */
+	private static StringTemplateGroup messages;
+	/** The group of templates that represent the current message format. */
+	private static StringTemplateGroup format;
+
+	/** From a msgID how can I get the name of the template that describes
+	 *  the error or warning?
+	 */
+	private static String[] idToMessageTemplateName = new String[MAX_MESSAGE_NUMBER+1];
+
+	static ANTLRErrorListener theDefaultErrorListener = new ANTLRErrorListener() {
+		public void info(String msg) {
+			if (formatWantsSingleLineMessage()) {
+				msg = msg.replaceAll("\n", " ");
+			}
+			System.err.println(msg);
+		}
+
+		public void error(Message msg) {
+			String outputMsg = msg.toString();
+			if (formatWantsSingleLineMessage()) {
+				outputMsg = outputMsg.replaceAll("\n", " ");
+			}
+			System.err.println(outputMsg);
+		}
+
+		public void warning(Message msg) {
+			String outputMsg = msg.toString();
+			if (formatWantsSingleLineMessage()) {
+				outputMsg = outputMsg.replaceAll("\n", " ");
+			}
+			System.err.println(outputMsg);
+		}
+
+		public void error(ToolMessage msg) {
+			String outputMsg = msg.toString();
+			if (formatWantsSingleLineMessage()) {
+				outputMsg = outputMsg.replaceAll("\n", " ");
+			}
+			System.err.println(outputMsg);
+		}
+	};
+
+	/** Handle all ST error listeners here (code gen, Grammar, and this class
+	 *  use templates.
+	 */
+	static StringTemplateErrorListener initSTListener =
+		new StringTemplateErrorListener() {
+			public void error(String s, Throwable e) {
+				System.err.println("ErrorManager init error: "+s);
+				if ( e!=null ) {
+					System.err.println("exception: "+e);
+				}
+				/*
+				if ( e!=null ) {
+					e.printStackTrace(System.err);
+				}
+				*/
+			}
+			public void warning(String s) {
+				System.err.println("ErrorManager init warning: "+s);
+			}
+			public void debug(String s) {}
+		};
+
+	/** During verification of the messages group file, don't gen errors.
+	 *  I'll handle them here.  This is used only after file has loaded ok
+	 *  and only for the messages STG.
+	 */
+	static StringTemplateErrorListener blankSTListener =
+		new StringTemplateErrorListener() {
+			public void error(String s, Throwable e) {}
+			public void warning(String s) {}
+			public void debug(String s) {}
+		};
+
+	/** Errors during initialization related to ST must all go to System.err.
+	 */
+	static StringTemplateErrorListener theDefaultSTListener =
+		new StringTemplateErrorListener() {
+		public void error(String s, Throwable e) {
+			if ( e instanceof InvocationTargetException ) {
+				e = ((InvocationTargetException)e).getTargetException();
+			}
+			ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, s, e);
+		}
+		public void warning(String s) {
+			ErrorManager.warning(ErrorManager.MSG_INTERNAL_WARNING, s);
+		}
+		public void debug(String s) {
+		}
+	};
+
+	// make sure that this class is ready to use after loading
+	static {
+		initIdToMessageNameMapping();
+		// it is inefficient to set the default locale here if another
+		// piece of code is going to set the locale, but that would
+		// require that a user call an init() function or something.  I prefer
+		// that this class be ready to go when loaded as I'm absentminded ;)
+		setLocale(Locale.getDefault());
+		// try to load the message format group
+		// the user might have specified one on the command line
+		// if not, or if the user has given an illegal value, we will fall back to "antlr"
+		setFormat("antlr");
+	}
+
+    public static StringTemplateErrorListener getStringTemplateErrorListener() {
+		return theDefaultSTListener;
+	}
+
+	/** We really only need a single locale for entire running ANTLR code
+	 *  in a single VM.  Only pay attention to the language, not the country
+	 *  so that French Canadians and French Frenchies all get the same
+	 *  template file, fr.stg.  Just easier this way.
+	 */
+	public static void setLocale(Locale locale) {
+		ErrorManager.locale = locale;
+		String language = locale.getLanguage();
+		String fileName = "org/antlr/tool/templates/messages/languages/"+language+".stg";
+		ClassLoader cl = Thread.currentThread().getContextClassLoader();
+		InputStream is = cl.getResourceAsStream(fileName);
+		if ( is==null ) {
+			cl = ErrorManager.class.getClassLoader();
+			is = cl.getResourceAsStream(fileName);
+		}
+		if ( is==null && language.equals(Locale.US.getLanguage()) ) {
+			rawError("ANTLR installation corrupted; cannot find English messages file "+fileName);
+			panic();
+		}
+		else if ( is==null ) {
+			//rawError("no such locale file "+fileName+" retrying with English locale");
+			setLocale(Locale.US); // recurse on this rule, trying the US locale
+			return;
+		}
+		BufferedReader br = null;
+		try {
+			br = new BufferedReader(new InputStreamReader(is));
+			messages = new StringTemplateGroup(br,
+											   AngleBracketTemplateLexer.class,
+											   initSTListener);
+			br.close();
+		}
+		catch (IOException ioe) {
+			rawError("error reading message file "+fileName, ioe);
+		}
+		finally {
+			if ( br!=null ) {
+				try {
+					br.close();
+				}
+				catch (IOException ioe) {
+					rawError("cannot close message file "+fileName, ioe);
+				}
+			}
+		}
+
+		messages.setErrorListener(blankSTListener);
+		boolean messagesOK = verifyMessages();
+		if ( !messagesOK && language.equals(Locale.US.getLanguage()) ) {
+			rawError("ANTLR installation corrupted; English messages file "+language+".stg incomplete");
+			panic();
+		}
+		else if ( !messagesOK ) {
+			setLocale(Locale.US); // try US to see if that will work
+		}
+	}
+
+	/** The format gets reset either from the Tool if the user supplied a command line option to that effect
+	 *  Otherwise we just use the default "antlr".
+	 */
+	public static void setFormat(String formatName) {
+		ErrorManager.formatName = formatName;
+		String fileName = "org/antlr/tool/templates/messages/formats/"+formatName+".stg";
+		ClassLoader cl = Thread.currentThread().getContextClassLoader();
+		InputStream is = cl.getResourceAsStream(fileName);
+		if ( is==null ) {
+			cl = ErrorManager.class.getClassLoader();
+			is = cl.getResourceAsStream(fileName);
+		}
+		if ( is==null && formatName.equals("antlr") ) {
+			rawError("ANTLR installation corrupted; cannot find ANTLR messages format file "+fileName);
+			panic();
+		}
+		else if ( is==null ) {
+			rawError("no such message format file "+fileName+" retrying with default ANTLR format");
+			setFormat("antlr"); // recurse on this rule, trying the default message format
+			return;
+		}
+		BufferedReader br = null;
+		try {
+			br = new BufferedReader(new InputStreamReader(is));
+			format = new StringTemplateGroup(br,
+											   AngleBracketTemplateLexer.class,
+											   initSTListener);
+		}
+		finally {
+			try {
+				if ( br!=null ) {
+					br.close();
+				}
+			}
+			catch (IOException ioe) {
+				rawError("cannot close message format file "+fileName, ioe);
+			}
+		}
+
+		format.setErrorListener(blankSTListener);
+		boolean formatOK = verifyFormat();
+		if ( !formatOK && formatName.equals("antlr") ) {
+			rawError("ANTLR installation corrupted; ANTLR messages format file "+formatName+".stg incomplete");
+			panic();
+		}
+		else if ( !formatOK ) {
+			setFormat("antlr"); // recurse on this rule, trying the default message format
+		}
+	}
+
+	/** Encodes the error handling found in setLocale, but does not trigger
+	 *  panics, which would make GUI tools die if ANTLR's installation was
+	 *  a bit screwy.  Duplicated code...ick.
+	public static Locale getLocaleForValidMessages(Locale locale) {
+		ErrorManager.locale = locale;
+		String language = locale.getLanguage();
+		String fileName = "org/antlr/tool/templates/messages/"+language+".stg";
+		ClassLoader cl = Thread.currentThread().getContextClassLoader();
+		InputStream is = cl.getResourceAsStream(fileName);
+		if ( is==null && language.equals(Locale.US.getLanguage()) ) {
+			return null;
+		}
+		else if ( is==null ) {
+			return getLocaleForValidMessages(Locale.US); // recurse on this rule, trying the US locale
+		}
+
+		boolean messagesOK = verifyMessages();
+		if ( !messagesOK && language.equals(Locale.US.getLanguage()) ) {
+			return null;
+		}
+		else if ( !messagesOK ) {
+			return getLocaleForValidMessages(Locale.US); // try US to see if that will work
+		}
+		return true;
+	}
+	 */
+
+	/** In general, you'll want all errors to go to a single spot.
+	 *  However, in a GUI, you might have two frames up with two
+	 *  different grammars.  Two threads might launch to process the
+	 *  grammars--you would want errors to go to different objects
+	 *  depending on the thread.  I store a single listener per
+	 *  thread.
+	 */
+	public static void setErrorListener(ANTLRErrorListener listener) {
+		threadToListenerMap.put(Thread.currentThread(), listener);
+	}
+
+	public static void setTool(Tool tool) {
+		threadToToolMap.put(Thread.currentThread(), tool);
+	}
+
+	/** Given a message ID, return a StringTemplate that somebody can fill
+	 *  with data.  We need to convert the int ID to the name of a template
+	 *  in the messages ST group.
+	 */
+	public static StringTemplate getMessage(int msgID) {
+        String msgName = idToMessageTemplateName[msgID];
+		return messages.getInstanceOf(msgName);
+	}
+	public static String getMessageType(int msgID) {
+		if (getErrorState().warningMsgIDs.member(msgID)) {
+			return messages.getInstanceOf("warning").toString();
+		}
+		else if (getErrorState().errorMsgIDs.member(msgID)) {
+			return messages.getInstanceOf("error").toString();
+		}
+		assertTrue(false, "Assertion failed! Message ID " + msgID + " created but is not present in errorMsgIDs or warningMsgIDs.");
+		return "";
+	}
+
+	/** Return a StringTemplate that refers to the current format used for
+	 * emitting messages.
+	 */
+	public static StringTemplate getLocationFormat() {
+		return format.getInstanceOf("location");
+	}
+	public static StringTemplate getReportFormat() {
+		return format.getInstanceOf("report");
+	}
+	public static StringTemplate getMessageFormat() {
+		return format.getInstanceOf("message");
+	}
+	public static boolean formatWantsSingleLineMessage() {
+		return format.getInstanceOf("wantsSingleLineMessage").toString().equals("true");
+	}
+
+	public static ANTLRErrorListener getErrorListener() {
+		ANTLRErrorListener el =
+			(ANTLRErrorListener)threadToListenerMap.get(Thread.currentThread());
+		if ( el==null ) {
+			return theDefaultErrorListener;
+		}
+		return el;
+	}
+
+	public static ErrorState getErrorState() {
+		ErrorState ec =
+			(ErrorState)threadToErrorCountMap.get(Thread.currentThread());
+		if ( ec==null ) {
+			ec = new ErrorState();
+			threadToErrorCountMap.put(Thread.currentThread(), ec);
+		}
+		return ec;
+	}
+
+	public static void resetErrorState() {
+		ErrorState ec = new ErrorState();
+		threadToErrorCountMap.put(Thread.currentThread(), ec);
+	}
+
+	public static void info(String msg) {
+		getErrorState().infos++;
+		getErrorListener().info(msg);
+	}
+
+	public static void error(int msgID) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID));
+	}
+
+	public static void error(int msgID, Throwable e) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID,e));
+	}
+
+	public static void error(int msgID, Object arg) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID, arg));
+	}
+
+	public static void error(int msgID, Object arg, Object arg2) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID, arg, arg2));
+	}
+
+	public static void error(int msgID, Object arg, Throwable e) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID, arg, e));
+	}
+
+	public static void warning(int msgID, Object arg) {
+		getErrorState().warnings++;
+		getErrorState().warningMsgIDs.add(msgID);
+		getErrorListener().warning(new ToolMessage(msgID, arg));
+	}
+
+	public static void nondeterminism(DecisionProbe probe,
+									  DFAState d)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarNonDeterminismMessage(probe,d);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void danglingState(DecisionProbe probe,
+									 DFAState d)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarDanglingStateMessage(probe,d);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		Set seen = (Set)emitSingleError.get("danglingState");
+		if ( !seen.contains(d.dfa.decisionNumber+"|"+d.getAltSet()) ) {
+			getErrorListener().warning(msg);
+			// we've seen this decision and this alt set; never again
+			seen.add(d.dfa.decisionNumber+"|"+d.getAltSet());
+		}
+	}
+
+	public static void analysisAborted(DecisionProbe probe)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarAnalysisAbortedMessage(probe);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void unreachableAlts(DecisionProbe probe,
+									   List alts)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarUnreachableAltsMessage(probe,alts);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void insufficientPredicates(DecisionProbe probe,
+											  List alts)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarInsufficientPredicatesMessage(probe,alts);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void nonLLStarDecision(DecisionProbe probe) {
+		getErrorState().errors++;
+		Message msg = new NonRegularDecisionMessage(probe, probe.getNonDeterministicAlts());
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
+	}
+
+	public static void recursionOverflow(DecisionProbe probe,
+										 DFAState sampleBadState,
+										 int alt,
+										 Collection targetRules,
+										 Collection callSiteStates)
+	{
+		getErrorState().warnings++;
+		Message msg = new RecursionOverflowMessage(probe,sampleBadState, alt,
+										 targetRules, callSiteStates);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	/*
+	// TODO: we can remove I think.  All detected now with cycles check.
+	public static void leftRecursion(DecisionProbe probe,
+									 int alt,
+									 Collection targetRules,
+									 Collection callSiteStates)
+	{
+		getErrorState().warnings++;
+		Message msg = new LeftRecursionMessage(probe, alt, targetRules, callSiteStates);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+	*/
+
+	public static void leftRecursionCycles(Collection cycles) {
+		getErrorState().errors++;
+		Message msg = new LeftRecursionCyclesMessage(cycles);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void grammarError(int msgID,
+									Grammar g,
+									Token token,
+									Object arg,
+									Object arg2)
+	{
+		getErrorState().errors++;
+		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(msg);
+	}
+
+	public static void grammarError(int msgID,
+									Grammar g,
+									Token token,
+									Object arg)
+	{
+		grammarError(msgID,g,token,arg,null);
+	}
+
+	public static void grammarError(int msgID,
+									Grammar g,
+									Token token)
+	{
+		grammarError(msgID,g,token,null,null);
+	}
+
+	public static void grammarWarning(int msgID,
+									  Grammar g,
+									  Token token,
+									  Object arg,
+									  Object arg2)
+	{
+		getErrorState().errors++;
+		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
+		getErrorState().warningMsgIDs.add(msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void grammarWarning(int msgID,
+									  Grammar g,
+									  Token token,
+									  Object arg)
+	{
+		grammarWarning(msgID,g,token,arg,null);
+	}
+
+	public static void grammarWarning(int msgID,
+									  Grammar g,
+									  Token token)
+	{
+		grammarWarning(msgID,g,token,null,null);
+	}
+
+	public static void syntaxError(int msgID,
+								   Grammar grammar,
+								   Token token,
+								   Object arg,
+								   antlr.RecognitionException re)
+	{
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(
+			new GrammarSyntaxMessage(msgID,grammar,token,arg,re)
+		);
+	}
+
+	public static void internalError(Object error, Throwable e) {
+		StackTraceElement location = getLastNonErrorManagerCodeLocation(e);
+		String msg = "Exception "+e+"@"+location+": "+error;
+		error(MSG_INTERNAL_ERROR, msg);
+	}
+
+	public static void internalError(Object error) {
+		StackTraceElement location =
+			getLastNonErrorManagerCodeLocation(new Exception());
+		String msg = location+": "+error;
+		error(MSG_INTERNAL_ERROR, msg);
+	}
+
+	public static boolean doNotAttemptAnalysis() {
+		return !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_ANALYSIS).isNil();
+	}
+
+	public static boolean doNotAttemptCodeGen() {
+		return !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_CODEGEN).isNil();
+	}
+
+	/** Return first non ErrorManager code location for generating messages */
+	private static StackTraceElement getLastNonErrorManagerCodeLocation(Throwable e) {
+		StackTraceElement[] stack = e.getStackTrace();
+		int i = 0;
+		for (; i < stack.length; i++) {
+			StackTraceElement t = stack[i];
+			if ( t.toString().indexOf("ErrorManager")<0 ) {
+				break;
+			}
+		}
+		StackTraceElement location = stack[i];
+		return location;
+	}
+
+	// A S S E R T I O N  C O D E
+
+	public static void assertTrue(boolean condition, String message) {
+		if ( !condition ) {
+			internalError(message);
+		}
+	}
+
+	// S U P P O R T  C O D E
+
+	protected static boolean initIdToMessageNameMapping() {
+		// make sure a message exists, even if it's just to indicate a problem
+		for (int i = 0; i < idToMessageTemplateName.length; i++) {
+			idToMessageTemplateName[i] = "INVALID MESSAGE ID: "+i;
+		}
+		// get list of fields and use it to fill in idToMessageTemplateName mapping
+		Field[] fields = ErrorManager.class.getFields();
+		for (int i = 0; i < fields.length; i++) {
+			Field f = fields[i];
+			String fieldName = f.getName();
+			if ( !fieldName.startsWith("MSG_") ) {
+				continue;
+			}
+			String templateName =
+				fieldName.substring("MSG_".length(),fieldName.length());
+			int msgID = 0;
+			try {
+				// get the constant value from this class object
+				msgID = f.getInt(ErrorManager.class);
+			}
+			catch (IllegalAccessException iae) {
+				System.err.println("cannot get const value for "+f.getName());
+				continue;
+			}
+			if ( fieldName.startsWith("MSG_") ) {
+                idToMessageTemplateName[msgID] = templateName;
+			}
+		}
+		return true;
+	}
+
+	/** Use reflection to find list of MSG_ fields and then verify a
+	 *  template exists for each one from the locale's group.
+	 */
+	protected static boolean verifyMessages() {
+		boolean ok = true;
+		Field[] fields = ErrorManager.class.getFields();
+		for (int i = 0; i < fields.length; i++) {
+			Field f = fields[i];
+			String fieldName = f.getName();
+			String templateName =
+				fieldName.substring("MSG_".length(),fieldName.length());
+			if ( fieldName.startsWith("MSG_") ) {
+				if ( !messages.isDefined(templateName) ) {
+					System.err.println("Message "+templateName+" in locale "+
+									   locale+" not found");
+					ok = false;
+				}
+			}
+		}
+		// check for special templates
+		if (!messages.isDefined("warning")) {
+			System.err.println("Message template 'warning' not found in locale "+ locale);
+			ok = false;
+		}
+		if (!messages.isDefined("error")) {
+			System.err.println("Message template 'error' not found in locale "+ locale);
+			ok = false;
+		}
+		return ok;
+	}
+
+	/** Verify the message format template group */
+	protected static boolean verifyFormat() {
+		boolean ok = true;
+		if (!format.isDefined("location")) {
+			System.err.println("Format template 'location' not found in " + formatName);
+			ok = false;
+		}
+		if (!format.isDefined("message")) {
+			System.err.println("Format template 'message' not found in " + formatName);
+			ok = false;
+		}
+		if (!format.isDefined("report")) {
+			System.err.println("Format template 'report' not found in " + formatName);
+			ok = false;
+		}
+		return ok;
+	}
+
+	/** If there are errors during ErrorManager init, we have no choice
+	 *  but to go to System.err.
+	 */
+	static void rawError(String msg) {
+		System.err.println(msg);
+	}
+
+	static void rawError(String msg, Throwable e) {
+		rawError(msg);
+		e.printStackTrace(System.err);
+	}
+
+	/** I *think* this will allow Tool subclasses to exit gracefully
+	 *  for GUIs etc...
+	 */
+	public static void panic() {
+		Tool tool = (Tool)threadToToolMap.get(Thread.currentThread());
+		if ( tool==null ) {
+			// no tool registered, exit
+			throw new Error("ANTLR ErrorManager panic");
+		}
+		else {
+			tool.panic();
+		}
+	}
+}
diff --git a/src/org/antlr/tool/FASerializer.java b/src/org/antlr/tool/FASerializer.java
new file mode 100644
index 0000000..992b31d
--- /dev/null
+++ b/src/org/antlr/tool/FASerializer.java
@@ -0,0 +1,211 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+/** An aspect of FA (finite automata) that knows how to dump them to serialized
+ *  strings.
+ */
+public class FASerializer {
+    /** To prevent infinite recursion when walking state machines, record
+     *  which states we've visited.  Make a new set every time you start
+     *  walking in case you reuse this object.  Multiple threads will trash
+     *  this shared variable.  Use a different FASerializer per thread.
+     */
+    protected Set markedStates;
+
+    /** Each state we walk will get a new state number for serialization
+     *  purposes.  This is the variable that tracks state numbers.
+     */
+    protected int stateCounter = 0;
+
+    /** Rather than add a new instance variable to NFA and DFA just for
+     *  serializing machines, map old state numbers to new state numbers
+     *  by a State object -> Integer new state number HashMap.
+     */
+    protected Map stateNumberTranslator;
+
+    protected Grammar grammar;
+
+    /** This aspect is associated with a grammar; used to get token names */
+    public FASerializer(Grammar grammar) {
+        this.grammar = grammar;
+    }
+
+	public String serialize(State s) {
+		return serialize(s, true);
+	}
+
+	/** Return a string representation of a state machine.  Two identical
+     *  NFAs or DFAs will have identical serialized representations.  The
+     *  state numbers inside the state are not used; instead, a new number
+     *  is computed and because the serialization will walk the two
+     *  machines using the same specific algorithm, then the state numbers
+     *  will be identical.  Accept states are distinguished from regular
+     *  states.
+     */
+    public String serialize(State s, boolean renumber) {
+        markedStates = new HashSet();
+        stateCounter = 0;
+		if ( renumber ) {
+			stateNumberTranslator = new HashMap();
+        	walkFANormalizingStateNumbers(s);
+		}
+		List lines = new ArrayList();
+        if ( s.getNumberOfTransitions()>0 ) {
+			walkSerializingFA(lines, s);
+		}
+		else {
+			// special case: s0 is an accept
+			String s0 = getStateString(0, s);
+			lines.add(s0+"\n");
+		}
+        StringBuffer buf = new StringBuffer(0);
+        // sort lines to normalize; makes states come out ordered
+        // and then ordered by edge labels then by target state number :)
+        Collections.sort(lines);
+        for (int i = 0; i < lines.size(); i++) {
+            String line = (String) lines.get(i);
+            buf.append(line);
+        }
+        return buf.toString();
+    }
+
+    /** In stateNumberTranslator, get a map from State to new, normalized
+     *  state number.  Used by walkSerializingFA to make sure any two
+     *  identical state machines will serialize the same way.
+     */
+    protected void walkFANormalizingStateNumbers(State s) {
+		if ( s==null ) {
+			ErrorManager.internalError("null state s");
+			return;
+		}
+        if ( stateNumberTranslator.get(s)!=null ) {
+            return; // already did this state
+        }
+        // assign a new state number for this node if there isn't one
+        stateNumberTranslator.put(s, Utils.integer(stateCounter));
+        stateCounter++;
+
+        // visit nodes pointed to by each transition;
+        for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+            Transition edge = (Transition) s.transition(i);
+            walkFANormalizingStateNumbers(edge.target); // keep walkin'
+            // if this transition is a rule reference, the node "following" this state
+            // will not be found and appear to be not in graph.  Must explicitly jump
+            // to it, but don't "draw" an edge.
+            if ( edge instanceof RuleClosureTransition ) {
+                walkFANormalizingStateNumbers(((RuleClosureTransition)edge).getFollowState());
+            }
+        }
+    }
+
+    protected void walkSerializingFA(List lines, State s) {
+        if ( markedStates.contains(s) ) {
+            return; // already visited this node
+        }
+
+        markedStates.add(s); // mark this node as completed.
+
+		int normalizedStateNumber = s.stateNumber;
+		if ( stateNumberTranslator!=null ) {
+	        Integer normalizedStateNumberI = (Integer)stateNumberTranslator.get(s);
+			normalizedStateNumber = normalizedStateNumberI.intValue();
+		}
+
+		String stateStr = getStateString(normalizedStateNumber, s);
+
+        // depth first walk each transition, printing its edge first
+        for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+            Transition edge = (Transition) s.transition(i);
+            StringBuffer buf = new StringBuffer();
+            buf.append(stateStr);
+            if ( edge.isEpsilon() ) {
+                buf.append("->");
+            }
+			else if ( edge.isSemanticPredicate() ) {
+				buf.append("-{"+edge.label.getSemanticContext()+"}?->");
+			}
+			else {
+				String predsStr = "";
+				if ( edge.target instanceof DFAState ) {
+					// look for gated predicates; don't add gated to simple sempred edges
+					SemanticContext preds =
+						((DFAState)edge.target).getGatedPredicatesInNFAConfigurations();
+					if ( preds!=null ) {
+						predsStr = "&&{"+
+							preds.genExpr(grammar.generator,
+									   	  grammar.generator.getTemplates(), null).toString()
+							+"}?";
+					}
+				}
+				buf.append("-"+edge.label.toString(grammar)+predsStr+"->");
+			}
+
+			int normalizedTargetStateNumber = edge.target.stateNumber;
+			if ( stateNumberTranslator!=null ) {
+				Integer normalizedTargetStateNumberI =
+					(Integer)stateNumberTranslator.get(edge.target);
+				normalizedTargetStateNumber = normalizedTargetStateNumberI.intValue();
+			}
+			buf.append(getStateString(normalizedTargetStateNumber, edge.target));
+            buf.append("\n");
+            lines.add(buf.toString());
+
+            // walk this transition
+            walkSerializingFA(lines, edge.target);
+
+            // if this transition is a rule reference, the node "following" this state
+            // will not be found and appear to be not in graph.  Must explicitly jump
+            // to it, but don't "draw" an edge.
+            if ( edge instanceof RuleClosureTransition ) {
+                walkSerializingFA(lines, ((RuleClosureTransition)edge).getFollowState());
+            }
+        }
+
+    }
+
+    private String getStateString(int n, State s) {
+        String stateStr = ".s"+n;
+        if ( s.isAcceptState() ) {
+            if ( s instanceof DFAState ) {
+                stateStr = ":s"+n+"=>"+((DFAState)s).getUniquelyPredictedAlt();
+            }
+            else {
+                stateStr = ":s"+n;
+            }
+        }
+        return stateStr;
+    }
+
+
+}
diff --git a/src/org/antlr/tool/Grammar.java b/src/org/antlr/tool/Grammar.java
new file mode 100644
index 0000000..243bb8c
--- /dev/null
+++ b/src/org/antlr/tool/Grammar.java
@@ -0,0 +1,2341 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.RecognitionException;
+import antlr.Token;
+import antlr.TokenStreamRewriteEngine;
+import antlr.TokenWithIndex;
+import antlr.collections.AST;
+import org.antlr.Tool;
+import org.antlr.analysis.*;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.Barrier;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.Utils;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.io.*;
+import java.util.*;
+
+/** Represents a grammar in memory. */
+public class Grammar {
+	public static final String SYNPRED_RULE_PREFIX = "synpred";
+
+	public static final String GRAMMAR_FILE_EXTENSION = ".g";
+
+	/** used for generating lexer temp files */
+	public static final String LEXER_GRAMMAR_FILE_EXTENSION = ".g";
+
+	public static final int INITIAL_DECISION_LIST_SIZE = 300;
+	public static final int INVALID_RULE_INDEX = -1;
+
+	// the various kinds of labels. t=type, id=ID, types+=type ids+=ID
+	public static final int RULE_LABEL = 1;
+	public static final int TOKEN_LABEL = 2;
+	public static final int RULE_LIST_LABEL = 3;
+	public static final int TOKEN_LIST_LABEL = 4;
+	public static final int CHAR_LABEL = 5; // used in lexer for x='a'
+
+	public static String[] LabelTypeToString =
+		{"<invalid>", "rule", "token", "rule-list", "token-list"};
+
+    public static final String ARTIFICIAL_TOKENS_RULENAME = "Tokens";
+	public static final String FRAGMENT_RULE_MODIFIER = "fragment";
+
+	public static final String SYNPREDGATE_ACTION_NAME = "synpredgate";
+
+	/** When converting ANTLR char and string literals, here is the
+	 *  value set of escape chars.
+	 */
+	public static int ANTLRLiteralEscapedCharValue[] = new int[255];
+
+	/** Given a char, we need to be able to show as an ANTLR literal.
+	 */
+	public static String ANTLRLiteralCharValueEscape[] = new String[255];
+
+	static {
+		ANTLRLiteralEscapedCharValue['n'] = '\n';
+		ANTLRLiteralEscapedCharValue['r'] = '\r';
+		ANTLRLiteralEscapedCharValue['t'] = '\t';
+		ANTLRLiteralEscapedCharValue['b'] = '\b';
+		ANTLRLiteralEscapedCharValue['f'] = '\f';
+		ANTLRLiteralEscapedCharValue['\\'] = '\\';
+		ANTLRLiteralEscapedCharValue['\''] = '\'';
+		ANTLRLiteralEscapedCharValue['"'] = '"';
+		ANTLRLiteralCharValueEscape['\n'] = "\\n";
+		ANTLRLiteralCharValueEscape['\r'] = "\\r";
+		ANTLRLiteralCharValueEscape['\t'] = "\\t";
+		ANTLRLiteralCharValueEscape['\b'] = "\\b";
+		ANTLRLiteralCharValueEscape['\f'] = "\\f";
+		ANTLRLiteralCharValueEscape['\\'] = "\\\\";
+		ANTLRLiteralCharValueEscape['\''] = "\\'";
+	}
+
+    public static final int LEXER = 1;
+    public static final int PARSER = 2;
+	public static final int TREE_PARSER = 3;
+	public static final int COMBINED = 4;
+	public static final String[] grammarTypeToString = new String[] {
+		"<invalid>",
+		"lexer",
+		"parser",
+		"tree",
+		"combined"
+	};
+
+	public static final String[] grammarTypeToFileNameSuffix = new String[] {
+		"<invalid>",
+		"Lexer",
+		"Parser",
+		"", // no suffix for tree grammars
+		"Parser" // if combined grammar, gen Parser and Lexer will be done later
+	};
+
+	/** This is the buffer of *all* tokens found in the grammar file
+	 *  including whitespace tokens etc...  I use this to extract
+	 *  lexer rules from combined grammars.
+	 */
+	protected TokenStreamRewriteEngine tokenBuffer;
+	public static final String IGNORE_STRING_IN_GRAMMAR_FILE_NAME = "__";
+
+	public static class Decision {
+		public int decision;
+		public NFAState startState;
+		public GrammarAST blockAST;
+		public DFA dfa;
+	}
+
+	public class LabelElementPair {
+		public antlr.Token label;
+		public GrammarAST elementRef;
+		public String referencedRuleName;
+		/** Has an action referenced the label?  Set by ActionAnalysis.g
+		 *  Currently only set for rule labels.
+		 */
+		public boolean actionReferencesLabel;
+		public int type; // in {RULE_LABEL,TOKEN_LABEL,RULE_LIST_LABEL,TOKEN_LIST_LABEL}
+		public LabelElementPair(antlr.Token label, GrammarAST elementRef) {
+			this.label = label;
+			this.elementRef = elementRef;
+			this.referencedRuleName = elementRef.getText();
+		}
+		public Rule getReferencedRule() {
+			return getRule(referencedRuleName);
+		}
+		public String toString() {
+			return elementRef.toString();
+		}
+	}
+
+	/** What name did the user provide for this grammar? */
+	public String name;
+
+    /** What type of grammar is this: lexer, parser, tree walker */
+    public int type;
+
+    /** A list of options specified at the grammar level such as language=Java.
+     *  The value can be an AST for complicated values such as character sets.
+     *  There may be code generator specific options in here.  I do no
+     *  interpretation of the key/value pairs...they are simply available for
+     *  who wants them.
+     */
+    protected Map options;
+
+	public static final Set legalOptions =
+			new HashSet() {
+				{
+				add("language"); add("tokenVocab");
+				add("output"); add("rewrite"); add("ASTLabelType");
+				add("TokenLabelType");
+				add("superClass");
+				add("filter");
+				add("k");
+				add("backtrack");
+				add("memoize");
+				}
+			};
+
+	public static final Set doNotCopyOptionsToLexer =
+		new HashSet() {
+			{
+				add("output"); add("ASTLabelType"); add("superClass");
+			 	add("k"); add("backtrack"); add("memoize"); add("rewrite");
+			}
+		};
+
+	public static final Map defaultOptions =
+			new HashMap() {
+				{
+					put("language","Java");
+				}
+			};
+
+	/** Is there a global fixed lookahead set for this grammar?
+	 *  If 0, nothing specified.  -1 implies we have not looked at
+	 *  the options table yet to set k.
+	 */
+	protected int global_k = -1;
+
+	/** Map a scope to a map of name:action pairs.
+	 *  Map<String, Map<String,GrammarAST>>
+	 *  The code generator will use this to fill holes in the output files.
+	 *  I track the AST node for the action in case I need the line number
+	 *  for errors.
+	 */
+	protected Map actions = new HashMap();
+
+	/** The NFA that represents the grammar with edges labelled with tokens
+     *  or epsilon.  It is more suitable to analysis than an AST representation.
+     */
+    protected NFA nfa;
+
+    /** Token names and literal tokens like "void" are uniquely indexed.
+     *  with -1 implying EOF.  Characters are different; they go from
+     *  -1 (EOF) to \uFFFE.  For example, 0 could be a binary byte you
+     *  want to lexer.  Labels of DFA/NFA transitions can be both tokens
+     *  and characters.  I use negative numbers for bookkeeping labels
+     *  like EPSILON. Char/String literals and token types overlap in the same
+	 *  space, however.
+     */
+    protected int maxTokenType = Label.MIN_TOKEN_TYPE-1;
+
+	/** TODO: hook this to the charVocabulary option */
+	protected IntSet charVocabulary = null;
+
+    /** Map token like ID (but not literals like "while") to its token type */
+    protected Map tokenIDToTypeMap = new HashMap();
+
+    /** Map token literals like "while" to its token type.  It may be that
+     *  WHILE="while"=35, in which case both tokenNameToTypeMap and this
+     *  field will have entries both mapped to 35.
+     */
+    protected Map stringLiteralToTypeMap = new HashMap();
+
+    /** Map a token type to its token name.
+	 *  Must subtract MIN_TOKEN_TYPE from index.
+	 */
+    protected Vector typeToTokenList = new Vector();
+
+	/** For interpreting and testing, you sometimes want to import token
+	 *  definitions from another grammar (instead of reading token defs from
+	 *  a file).
+	 */
+	protected Grammar importTokenVocabularyFromGrammar;
+
+	/** For ANTLRWorks, we want to be able to map a line:col to a specific
+	 *  decision DFA so it can display DFA.
+	 */
+	Map lineColumnToLookaheadDFAMap = new HashMap();
+
+	public Tool tool;
+
+	/** The unique set of all rule references in any rule; set of Token
+	 *  objects so two refs to same rule can exist but at different line/position.
+	 */
+	protected Set<antlr.Token> ruleRefs = new HashSet<antlr.Token>();
+
+	/** The unique set of all token ID references in any rule */
+	protected Set<antlr.Token> tokenIDRefs = new HashSet<antlr.Token>();
+
+	/** If combined or lexer grammar, track the rules; Set<String>.
+	 * 	Track lexer rules so we can warn about undefined tokens.
+ 	 */
+	protected Set<String> lexerRules = new HashSet<String>();
+
+    /** Be able to assign a number to every decision in grammar;
+     *  decisions in 1..n
+     */
+    protected int decisionNumber = 0;
+
+    /** Rules are uniquely labeled from 1..n */
+    protected int ruleIndex = 1;
+
+	/** A list of all rules that are in any left-recursive cycle.  There
+	 *  could be multiple cycles, but this is a flat list of all problematic
+	 *  rules.
+	 */
+	protected Set leftRecursiveRules;
+
+	/** An external tool requests that DFA analysis abort prematurely.  Stops
+	 *  at DFA granularity, which are limited to a DFA size and time computation
+	 *  as failsafe.
+	 */
+	protected boolean externalAnalysisAbort;
+
+	/** When we read in a grammar, we track the list of syntactic predicates
+	 *  and build faux rules for them later.  See my blog entry Dec 2, 2005:
+	 *  http://www.antlr.org/blog/antlr3/lookahead.tml
+	 *  This maps the name (we make up) for a pred to the AST grammar fragment.
+	 */
+	protected LinkedHashMap nameToSynpredASTMap;
+
+	/** Map a rule to it's Rule object
+	 */
+	protected LinkedHashMap nameToRuleMap = new LinkedHashMap();
+
+	/** Track the scopes defined outside of rules and the scopes associated
+	 *  with all rules (even if empty).
+	 */
+	protected Map scopes = new HashMap();
+
+	/** Map a rule index to its name; use a Vector on purpose as new
+	 *  collections stuff won't let me setSize and make it grow.  :(
+	 *  I need a specific guaranteed index, which the Collections stuff
+	 *  won't let me have.
+	 */
+	protected Vector ruleIndexToRuleList = new Vector();
+
+    /** An AST that records entire input grammar with all rules.  A simple
+     *  grammar with one rule, "grammar t; a : A | B ;", looks like:
+     * ( grammar t ( rule a ( BLOCK ( ALT A ) ( ALT B ) ) <end-of-rule> ) )
+     */
+    protected GrammarAST grammarTree = null;
+
+    /** Each subrule/rule is a decision point and we must track them so we
+     *  can go back later and build DFA predictors for them.  This includes
+     *  all the rules, subrules, optional blocks, ()+, ()* etc...  The
+     *  elements in this list are NFAState objects.
+     */
+	protected Vector indexToDecision = new Vector(INITIAL_DECISION_LIST_SIZE);
+
+    /** If non-null, this is the code generator we will use to generate
+     *  recognizers in the target language.
+     */
+    protected CodeGenerator generator;
+
+	NameSpaceChecker nameSpaceChecker = new NameSpaceChecker(this);
+
+	/** Used during LOOK to detect computation cycles */
+	protected Set lookBusy = new HashSet();
+
+	/** The checkForLeftRecursion method needs to track what rules it has
+	 *  visited to track infinite recursion.
+	 */
+	protected Set visitedDuringRecursionCheck = null;
+
+	protected boolean watchNFAConversion = false;
+
+	/** For merged lexer/parsers, we must construct a separate lexer spec.
+	 *  This is the template for lexer; put the literals first then the
+	 *  regular rules.  We don't need to specify a token vocab import as
+	 *  I make the new grammar import from the old all in memory; don't want
+	 *  to force it to read from the disk.  Lexer grammar will have same
+	 *  name as original grammar but will be in different filename.  Foo.g
+	 *  with combined grammar will have FooParser.java generated and
+	 *  Foo__.g with again Foo inside.  It will however generate FooLexer.java
+	 *  as it's a lexer grammar.  A bit odd, but autogenerated.  Can tweak
+	 *  later if we want.
+	 */
+	protected StringTemplate lexerGrammarST =
+		new StringTemplate(
+			"lexer grammar <name>;\n" +
+			"<if(options)>" +
+			"options {\n" +
+			"  <options:{<it.name>=<it.value>;<\\n>}>\n" +
+			"}<\\n>\n" +
+			"<endif>\n" +
+			"<actionNames,actions:{n,a|@<n> {<a>}\n}>\n" +
+			"<literals:{<it.ruleName> : <it.literal> ;\n}>\n" +
+			"<rules>",
+			AngleBracketTemplateLexer.class
+		);
+
+	/** What file name holds this grammar? */
+	protected String fileName;
+
+	/** How long in ms did it take to build DFAs for this grammar?
+	 *  If this grammar is a combined grammar, it only records time for
+	 *  the parser grammar component.  This only records the time to
+	 *  do the LL(*) work; NFA->DFA conversion.
+	 */
+	public long DFACreationWallClockTimeInMS;
+
+	public int numberOfSemanticPredicates = 0;
+	public int numberOfManualLookaheadOptions = 0;
+	public Set setOfNondeterministicDecisionNumbers = new HashSet();
+	public Set setOfNondeterministicDecisionNumbersResolvedWithPredicates = new HashSet();
+	public Set setOfDFAWhoseConversionTerminatedEarly = new HashSet();
+
+	/** Track decisions with syn preds specified for reporting.
+	 *  This is the a set of BLOCK type AST nodes.
+	 */
+	public Set<GrammarAST> blocksWithSynPreds = new HashSet();
+
+	/** Track decisions that actually use the syn preds in the DFA.
+	 *  Computed during NFA to DFA conversion.
+	 */
+	public Set<DFA> decisionsWhoseDFAsUsesSynPreds = new HashSet();
+
+	/** Track names of preds so we can avoid generating preds that aren't used
+	 *  Computed during NFA to DFA conversion.  Just walk accept states
+	 *  and look for synpreds because that is the only state target whose
+	 *  incident edges can have synpreds.  Same is try for
+	 *  decisionsWhoseDFAsUsesSynPreds.
+	 */
+	public Set<String> synPredNamesUsedInDFA = new HashSet();
+
+	/** Track decisions with syn preds specified for reporting.
+	 *  This is the a set of BLOCK type AST nodes.
+	 */
+	public Set<GrammarAST> blocksWithSemPreds = new HashSet();
+
+	/** Track decisions that actually use the syn preds in the DFA. Set<DFA> */
+	public Set decisionsWhoseDFAsUsesSemPreds = new HashSet();
+
+	protected boolean allDecisionDFACreated = false;
+
+	/** We need a way to detect when a lexer grammar is autogenerated from
+	 *  another grammar or we are just sending in a string representing a
+	 *  grammar.  We don't want to generate a .tokens file, for example,
+	 *  in such cases.
+	 */
+	protected boolean builtFromString = false;
+
+	/** Factored out the sanity checking code; delegate to it. */
+	GrammarSanity sanity = new GrammarSanity(this);
+
+	public Grammar() {
+		initTokenSymbolTables();
+		builtFromString = true;
+	}
+
+	public Grammar(String grammarString)
+			throws antlr.RecognitionException, antlr.TokenStreamException
+	{
+		builtFromString = true;
+		initTokenSymbolTables();
+		setFileName("<string>");
+		setGrammarContent(new StringReader(grammarString));
+	}
+
+	public Grammar(String fileName, String grammarString)
+			throws antlr.RecognitionException, antlr.TokenStreamException
+	{
+		this(null, fileName, new StringReader(grammarString));
+	}
+
+    /** Create a grammar from a Reader.  Parse the grammar, building a tree
+     *  and loading a symbol table of sorts here in Grammar.  Then create
+     *  an NFA and associated factory.  Walk the AST representing the grammar,
+     *  building the state clusters of the NFA.
+     */
+    public Grammar(Tool tool, String fileName, Reader r)
+            throws antlr.RecognitionException, antlr.TokenStreamException
+    {
+		initTokenSymbolTables();
+		setTool(tool);
+		setFileName(fileName);
+		setGrammarContent(r);
+	}
+
+	public void setFileName(String fileName) {
+		this.fileName = fileName;
+	}
+
+	public String getFileName() {
+		return fileName;
+	}
+
+	public void setName(String name) {
+		if ( name==null ) {
+			return;
+		}
+		// don't error check autogenerated files (those with '__' in them)
+		String saneFile = fileName.replace('\\', '/');
+		int lastSlash = saneFile.lastIndexOf('/');
+		String onlyFileName = saneFile.substring(lastSlash+1, fileName.length());
+		if ( !builtFromString ) {
+			int lastDot = onlyFileName.lastIndexOf('.');
+			String onlyFileNameNoSuffix = null;
+			if ( lastDot < 0 ) {
+				ErrorManager.error(ErrorManager.MSG_FILENAME_EXTENSION_ERROR, fileName);
+				onlyFileNameNoSuffix = onlyFileName+GRAMMAR_FILE_EXTENSION;
+			}
+			else {
+				onlyFileNameNoSuffix = onlyFileName.substring(0,lastDot);
+			}
+			if ( !name.equals(onlyFileNameNoSuffix) ) {
+				ErrorManager.error(ErrorManager.MSG_FILE_AND_GRAMMAR_NAME_DIFFER,
+								   name,
+								   fileName);
+			}
+		}
+		this.name = name;
+	}
+
+	public void setGrammarContent(String grammarString)
+		throws antlr.RecognitionException, antlr.TokenStreamException
+	{
+		setGrammarContent(new StringReader(grammarString));
+	}
+
+	public void setGrammarContent(Reader r)
+		throws antlr.RecognitionException, antlr.TokenStreamException
+	{
+		ErrorManager.resetErrorState(); // reset in case > 1 grammar in same thread
+
+		// BUILD AST FROM GRAMMAR
+		ANTLRLexer lexer = new ANTLRLexer(r);
+		lexer.setFilename(this.getFileName());
+		// use the rewrite engine because we want to buffer up all tokens
+		// in case they have a merged lexer/parser, send lexer rules to
+		// new grammar.
+		lexer.setTokenObjectClass("antlr.TokenWithIndex");
+		tokenBuffer = new TokenStreamRewriteEngine(lexer);
+		tokenBuffer.discard(ANTLRParser.WS);
+		tokenBuffer.discard(ANTLRParser.ML_COMMENT);
+		tokenBuffer.discard(ANTLRParser.COMMENT);
+		tokenBuffer.discard(ANTLRParser.SL_COMMENT);
+		ANTLRParser parser = new ANTLRParser(tokenBuffer);
+		parser.getASTFactory().setASTNodeClass(GrammarAST.class);
+		parser.setFilename(this.getFileName());
+		parser.setASTNodeClass("org.antlr.tool.GrammarAST");
+		parser.grammar(this);
+		grammarTree = (GrammarAST)parser.getAST();
+		setFileName(lexer.getFilename()); // the lexer #src might change name
+		if ( grammarTree.findFirstType(ANTLRParser.RULE)==null ) {
+			ErrorManager.error(ErrorManager.MSG_NO_RULES, getFileName());
+			return;
+		}
+
+		// Get syn pred rules and add to existing tree
+		List synpredRules =
+			getArtificialRulesForSyntacticPredicates(parser,
+												 	 nameToSynpredASTMap);
+		for (int i = 0; i < synpredRules.size(); i++) {
+			GrammarAST rAST = (GrammarAST) synpredRules.get(i);
+			grammarTree.addChild(rAST);
+		}
+
+		if ( Tool.internalOption_PrintGrammarTree ) {
+			System.out.println(grammarTree.toStringList());
+		}
+
+		// ASSIGN TOKEN TYPES
+		//System.out.println("### assign types");
+		AssignTokenTypesWalker ttypesWalker = new AssignTokenTypesWalker();
+		ttypesWalker.setASTNodeClass("org.antlr.tool.GrammarAST");
+		try {
+			ttypesWalker.grammar(grammarTree, this);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+
+		// DEFINE RULES
+		//System.out.println("### define rules");
+		DefineGrammarItemsWalker defineItemsWalker = new DefineGrammarItemsWalker();
+		defineItemsWalker.setASTNodeClass("org.antlr.tool.GrammarAST");
+		try {
+			defineItemsWalker.grammar(grammarTree, this);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+
+		// ANALYZE ACTIONS, LOOKING FOR LABEL AND ATTR REFS
+		examineAllExecutableActions();
+		checkAllRulesForUselessLabels();
+
+		nameSpaceChecker.checkConflicts();
+	}
+
+	/** If the grammar is a merged grammar, return the text of the implicit
+	 *  lexer grammar.
+	 */
+	public String getLexerGrammar() {
+		if ( lexerGrammarST.getAttribute("literals")==null &&
+			 lexerGrammarST.getAttribute("rules")==null )
+		{
+			// if no rules, return nothing
+			return null;
+		}
+		lexerGrammarST.setAttribute("name", name);
+		// if there are any actions set for lexer, pass them in
+		if ( actions.get("lexer")!=null ) {
+			lexerGrammarST.setAttribute("actionNames",
+										((Map)actions.get("lexer")).keySet());
+			lexerGrammarST.setAttribute("actions",
+										((Map)actions.get("lexer")).values());
+		}
+		// make sure generated grammar has the same options
+		if ( options!=null ) {
+			Iterator optionNames = options.keySet().iterator();
+			while (optionNames.hasNext()) {
+				String optionName = (String) optionNames.next();
+				if ( !doNotCopyOptionsToLexer.contains(optionName) ) {
+					Object value = options.get(optionName);
+					lexerGrammarST.setAttribute("options.{name,value}", optionName, value);
+				}
+			}
+		}
+		return lexerGrammarST.toString();
+	}
+
+	public String getImplicitlyGeneratedLexerFileName() {
+		return name+
+			IGNORE_STRING_IN_GRAMMAR_FILE_NAME +
+			LEXER_GRAMMAR_FILE_EXTENSION;
+	}
+
+	public File getImportedVocabFileName(String vocabName) {
+		return new File(tool.getLibraryDirectory(),
+						File.separator+
+							vocabName+
+							CodeGenerator.VOCAB_FILE_EXTENSION);
+	}
+
+	/** Parse a rule we add artificially that is a list of the other lexer
+     *  rules like this: "Tokens : ID | INT | SEMI ;"  nextToken() will invoke
+     *  this to set the current token.  Add char literals before
+     *  the rule references.
+	 *
+	 *  If in filter mode, we want every alt to backtrack and we need to
+	 *  do k=1 to force the "first token def wins" rule.  Otherwise, the
+	 *  longest-match rule comes into play with LL(*).
+	 *
+	 *  The ANTLRParser antlr.g file now invokes this when parsing a lexer
+	 *  grammar, which I think is proper even though it peeks at the info
+	 *  that later phases will compute.  It gets a list of lexer rules
+	 *  and builds a string representing the rule; then it creates a parser
+	 *  and adds the resulting tree to the grammar's tree.
+     */
+    public GrammarAST addArtificialMatchTokensRule(GrammarAST grammarAST,
+												   List ruleNames,
+												   boolean filterMode) {
+		StringTemplate matchTokenRuleST = null;
+		if ( filterMode ) {
+			matchTokenRuleST = new StringTemplate(
+					ARTIFICIAL_TOKENS_RULENAME+
+						" options {k=1; backtrack=true;} : <rules; separator=\"|\">;",
+					AngleBracketTemplateLexer.class);
+		}
+		else {
+			matchTokenRuleST = new StringTemplate(
+					ARTIFICIAL_TOKENS_RULENAME+" : <rules; separator=\"|\">;",
+					AngleBracketTemplateLexer.class);
+		}
+
+		// Now add token rule references
+		for (int i = 0; i < ruleNames.size(); i++) {
+			String rname = (String) ruleNames.get(i);
+			matchTokenRuleST.setAttribute("rules", rname);
+		}
+		//System.out.println("tokens rule: "+matchTokenRuleST.toString());
+
+        ANTLRLexer lexer = new ANTLRLexer(new StringReader(matchTokenRuleST.toString()));
+		lexer.setTokenObjectClass("antlr.TokenWithIndex");
+		TokenStreamRewriteEngine tokbuf =
+			new TokenStreamRewriteEngine(lexer);
+		tokbuf.discard(ANTLRParser.WS);
+		tokbuf.discard(ANTLRParser.ML_COMMENT);
+		tokbuf.discard(ANTLRParser.COMMENT);
+		tokbuf.discard(ANTLRParser.SL_COMMENT);
+        ANTLRParser parser = new ANTLRParser(tokbuf);
+		parser.grammar = this;
+		parser.gtype = ANTLRParser.LEXER_GRAMMAR;
+        parser.setASTNodeClass("org.antlr.tool.GrammarAST");
+        try {
+            parser.rule();
+			if ( Tool.internalOption_PrintGrammarTree ) {
+				System.out.println("Tokens rule: "+parser.getAST().toStringTree());
+			}
+			GrammarAST p = grammarAST;
+			while ( p.getType()!=ANTLRParser.LEXER_GRAMMAR ) {
+				p = (GrammarAST)p.getNextSibling();
+			}
+			p.addChild(parser.getAST());
+        }
+        catch (Exception e) {
+			ErrorManager.error(ErrorManager.MSG_ERROR_CREATING_ARTIFICIAL_RULE,
+							   e);
+        }
+		return (GrammarAST)parser.getAST();
+	}
+
+	/** for any syntactic predicates, we need to define rules for them; they will get
+	 *  defined automatically like any other rule. :)
+	 */
+	protected List getArtificialRulesForSyntacticPredicates(ANTLRParser parser,
+															LinkedHashMap nameToSynpredASTMap)
+	{
+		List rules = new ArrayList();
+		if ( nameToSynpredASTMap==null ) {
+			return rules;
+		}
+		Set predNames = nameToSynpredASTMap.keySet();
+		boolean isLexer = grammarTree.getType()==ANTLRParser.LEXER_GRAMMAR;
+		for (Iterator it = predNames.iterator(); it.hasNext();) {
+			String synpredName = (String)it.next();
+			GrammarAST fragmentAST =
+				(GrammarAST) nameToSynpredASTMap.get(synpredName);
+			GrammarAST ruleAST =
+				parser.createSimpleRuleAST(synpredName,
+										   fragmentAST,
+										   isLexer);
+			rules.add(ruleAST);
+		}
+		return rules;
+	}
+
+	protected void initTokenSymbolTables() {
+        // the faux token types take first NUM_FAUX_LABELS positions
+		// then we must have room for the predefined runtime token types
+		// like DOWN/UP used for tree parsing.
+        typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE-1);
+        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, "<INVALID>");
+        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, "<EOT>");
+        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, "<SEMPRED>");
+        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
+        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.DOWN-1, "DOWN");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.UP-1, "UP");
+        tokenIDToTypeMap.put("<INVALID>", Utils.integer(Label.INVALID));
+        tokenIDToTypeMap.put("<EOT>", Utils.integer(Label.EOT));
+        tokenIDToTypeMap.put("<SEMPRED>", Utils.integer(Label.SEMPRED));
+        tokenIDToTypeMap.put("<SET>", Utils.integer(Label.SET));
+        tokenIDToTypeMap.put("<EPSILON>", Utils.integer(Label.EPSILON));
+		tokenIDToTypeMap.put("EOF", Utils.integer(Label.EOF));
+		tokenIDToTypeMap.put("<EOR>", Utils.integer(Label.EOR_TOKEN_TYPE));
+		tokenIDToTypeMap.put("DOWN", Utils.integer(Label.DOWN));
+		tokenIDToTypeMap.put("UP", Utils.integer(Label.UP));
+    }
+
+    /** Walk the list of options, altering this Grammar object according
+     *  to any I recognize.
+    protected void processOptions() {
+        Iterator optionNames = options.keySet().iterator();
+        while (optionNames.hasNext()) {
+            String optionName = (String) optionNames.next();
+            Object value = options.get(optionName);
+            if ( optionName.equals("tokenVocab") ) {
+
+            }
+        }
+    }
+     */
+
+    public void createNFAs() {
+		//System.out.println("### create NFAs");
+		if ( nfa!=null ) {
+			// don't let it create more than once; has side-effects
+			return;
+		}
+		if ( getRules().size()==0 ) {
+			return;
+		}
+		nfa = new NFA(this); // create NFA that TreeToNFAConverter'll fill in
+		NFAFactory factory = new NFAFactory(nfa);
+		TreeToNFAConverter nfaBuilder = new TreeToNFAConverter(this, nfa, factory);
+		try {
+			nfaBuilder.grammar(grammarTree);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   name,
+							   re);
+		}
+		//System.out.println("NFA has "+factory.getNumberOfStates()+" states");
+	}
+
+	/** For each decision in this grammar, compute a single DFA using the
+     *  NFA states associated with the decision.  The DFA construction
+     *  determines whether or not the alternatives in the decision are
+     *  separable using a regular lookahead language.
+     *
+     *  Store the lookahead DFAs in the AST created from the user's grammar
+     *  so the code generator or whoever can easily access it.
+     *
+     *  This is a separate method because you might want to create a
+     *  Grammar without doing the expensive analysis.
+     */
+    public void createLookaheadDFAs() {
+		if ( nfa==null ) {
+			createNFAs();
+		}
+
+		long start = System.currentTimeMillis();
+
+		//System.out.println("### create DFAs");
+		int numDecisions = getNumberOfDecisions();
+		if ( NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION ) {
+			for (int decision=1; decision<=numDecisions; decision++) {
+				NFAState decisionStartState = getDecisionNFAStartState(decision);
+				if ( !externalAnalysisAbort && decisionStartState.getNumberOfTransitions()>1 ) {
+					createLookaheadDFA(decision);
+				}
+			}
+		}
+		else {
+			ErrorManager.info("two-threaded DFA conversion");
+			// create a barrier expecting n DFA and this main creation thread
+			Barrier barrier = new Barrier(3);
+			// assume 2 CPU for now
+			int midpoint = numDecisions/2;
+			NFAConversionThread t1 =
+				new NFAConversionThread(this, barrier, 1, midpoint);
+			new Thread(t1).start();
+			if ( midpoint == (numDecisions/2) ) {
+				midpoint++;
+			}
+			NFAConversionThread t2 =
+				new NFAConversionThread(this, barrier, midpoint, numDecisions);
+			new Thread(t2).start();
+			// wait for these two threads to finish
+			try {
+				barrier.waitForRelease();
+			}
+			catch(InterruptedException e) {
+				ErrorManager.internalError("what the hell? DFA interruptus", e);
+			}
+		}
+
+		long stop = System.currentTimeMillis();
+		DFACreationWallClockTimeInMS = stop - start;
+
+		// indicate that we've finished building DFA (even if #decisions==0)
+		allDecisionDFACreated = true;
+	}
+
+	public void createLookaheadDFA(int decision) {
+		Decision d = getDecision(decision);
+		String enclosingRule = d.startState.getEnclosingRule();
+		Rule r = getRule(enclosingRule);
+
+		//System.out.println("createLookaheadDFA(): "+enclosingRule+" dec "+decision+"; synprednames prev used "+synPredNamesUsedInDFA);
+		if ( r.isSynPred && !synPredNamesUsedInDFA.contains(enclosingRule) ) {
+			return;
+		}
+		NFAState decisionStartState = getDecisionNFAStartState(decision);
+		long startDFA=0,stopDFA=0;
+		if ( watchNFAConversion ) {
+			System.out.println("--------------------\nbuilding lookahead DFA (d="
+							   +decisionStartState.getDecisionNumber()+") for "+
+							   decisionStartState.getDescription());
+			startDFA = System.currentTimeMillis();
+		}
+		DFA lookaheadDFA = new DFA(decision, decisionStartState);
+		if ( (lookaheadDFA.analysisAborted() && // did analysis bug out?
+			 lookaheadDFA.getUserMaxLookahead()!=1) || // either k=* or k>1
+			 (lookaheadDFA.probe.isNonLLStarDecision() && // >1 alt recurses, k=*
+		      lookaheadDFA.getAutoBacktrackMode()) )
+		{
+			// set k=1 option if not already k=1 and try again
+			// clean up tracking stuff
+			decisionsWhoseDFAsUsesSynPreds.remove(lookaheadDFA);
+			// TODO: clean up synPredNamesUsedInDFA also (harder)
+			lookaheadDFA = null; // make sure other memory is "free" before redoing
+			d.blockAST.setOption(this, "k", Utils.integer(1));
+			//System.out.println("trying decision "+decision+" again with k=1");
+			lookaheadDFA = new DFA(decision, decisionStartState);
+			if ( lookaheadDFA.analysisAborted() ) { // did analysis bug out?
+				ErrorManager.internalError("could not even do k=1 for decision "+decision);
+			}
+		}
+
+		setLookaheadDFA(decision, lookaheadDFA);
+
+		// create map from line:col to decision DFA (for ANTLRWorks)
+		GrammarAST decisionAST = nfa.grammar.getDecisionBlockAST(lookaheadDFA.decisionNumber);
+		int line = decisionAST.getLine();
+		int col = decisionAST.getColumn();
+		lineColumnToLookaheadDFAMap.put(new StringBuffer().append(line + ":")
+										.append(col).toString(), lookaheadDFA);
+
+		if ( watchNFAConversion ) {
+			stopDFA = System.currentTimeMillis();
+			System.out.println("cost: "+lookaheadDFA.getNumberOfStates()+
+							   " states, "+(int)(stopDFA-startDFA)+" ms");
+		}
+		//System.out.println("after create DFA; synPredNamesUsedInDFA="+synPredNamesUsedInDFA);
+	}
+
+	/** Terminate DFA creation (grammar analysis).
+	 */
+	public void externallyAbortNFAToDFAConversion() {
+		externalAnalysisAbort = true;
+	}
+
+	public boolean NFAToDFAConversionExternallyAborted() {
+		return externalAnalysisAbort;
+	}
+
+	/** Return a new unique integer in the token type space */
+	public int getNewTokenType() {
+		maxTokenType++;
+		return maxTokenType;
+	}
+
+	/** Define a token at a particular token type value.  Blast an
+	 *  old value with a new one.  This is called directly during import vocab
+     *  operation to set up tokens with specific values.
+     */
+    public void defineToken(String text, int tokenType) {
+		if ( tokenIDToTypeMap.get(text)!=null ) {
+			// already defined?  Must be predefined one like EOF;
+			// do nothing
+			return;
+		}
+		// the index in the typeToTokenList table is actually shifted to
+		// hold faux labels as you cannot have negative indices.
+        if ( text.charAt(0)=='\'' ) {
+            stringLiteralToTypeMap.put(text, Utils.integer(tokenType));
+        }
+        else { // must be a label like ID
+            tokenIDToTypeMap.put(text, Utils.integer(tokenType));
+        }
+		int index = Label.NUM_FAUX_LABELS+tokenType-1;
+		//System.out.println("defining "+name+" token "+text+" at type="+tokenType+", index="+index);
+		this.maxTokenType = Math.max(this.maxTokenType, tokenType);
+        if ( index>=typeToTokenList.size() ) {
+			typeToTokenList.setSize(index+1);
+		}
+		String prevToken = (String)typeToTokenList.get(index);
+		if ( prevToken==null || prevToken.charAt(0)=='\'' ) {
+			// only record if nothing there before or if thing before was a literal
+			typeToTokenList.set(index, text);
+		}
+    }
+
+	/** Define a new rule.  A new rule index is created by incrementing
+     *  ruleIndex.
+     */
+	public void defineRule(antlr.Token ruleToken,
+						   String modifier,
+						   Map options,
+						   GrammarAST tree,
+						   GrammarAST argActionAST,
+						   int numAlts)
+	{
+		String ruleName = ruleToken.getText();
+		/*
+		System.out.println("defineRule("+ruleName+",modifier="+modifier+
+						   "): index="+ruleIndex);
+		*/
+		if ( getRule(ruleName)!=null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_RULE_REDEFINITION,
+									  this, ruleToken, ruleName);
+        }
+
+		Rule r = new Rule(this, ruleName, ruleIndex, numAlts);
+		r.modifier = modifier;
+        nameToRuleMap.put(ruleName, r);
+		setRuleAST(ruleName, tree);
+		r.setOptions(options, ruleToken);
+		r.argActionAST = argActionAST;
+        ruleIndexToRuleList.setSize(ruleIndex+1);
+        ruleIndexToRuleList.set(ruleIndex, ruleName);
+        ruleIndex++;
+		if ( ruleName.startsWith(SYNPRED_RULE_PREFIX) ) {
+			r.isSynPred = true;
+		}
+	}
+
+	/** Define a new predicate and get back its name for use in building
+	 *  a semantic predicate reference to the syn pred.
+	 */
+	public String defineSyntacticPredicate(GrammarAST blockAST,
+										   String currentRuleName)
+	{
+		if ( nameToSynpredASTMap==null ) {
+			nameToSynpredASTMap = new LinkedHashMap();
+		}
+		String predName = null;
+		predName = SYNPRED_RULE_PREFIX+(nameToSynpredASTMap.size() + 1);
+		nameToSynpredASTMap.put(predName, blockAST);
+		return predName;
+	}
+
+	public LinkedHashMap getSyntacticPredicates() {
+		return nameToSynpredASTMap;
+	}
+
+	public GrammarAST getSyntacticPredicate(String name) {
+		if ( nameToSynpredASTMap==null ) {
+			return null;
+		}
+		return (GrammarAST)nameToSynpredASTMap.get(name);
+	}
+
+	public void synPredUsedInDFA(DFA dfa, SemanticContext semCtx) {
+		decisionsWhoseDFAsUsesSynPreds.add(dfa);
+		semCtx.trackUseOfSyntacticPredicates(this); // walk ctx looking for preds
+		//System.out.println("after tracking use for dec "+dfa.decisionNumber+": "+synPredNamesUsedInDFA);
+	}
+
+	/** Given @scope::name {action} define it for this grammar.  Later,
+	 *  the code generator will ask for the actions table.
+	 */
+	public void defineNamedAction(GrammarAST ampersandAST,
+								  String scope,
+								  GrammarAST nameAST,
+								  GrammarAST actionAST)
+	{
+		if ( scope==null ) {
+			scope = getDefaultActionScope(type);
+		}
+		//System.out.println("@"+scope+"::"+nameAST.getText()+"{"+actionAST.getText()+"}");
+		String actionName = nameAST.getText();
+		Map scopeActions = (Map)actions.get(scope);
+		if ( scopeActions==null ) {
+			scopeActions = new HashMap();
+			actions.put(scope, scopeActions);
+		}
+		GrammarAST a = (GrammarAST)scopeActions.get(actionName);
+		if ( a!=null ) {
+			ErrorManager.grammarError(
+				ErrorManager.MSG_ACTION_REDEFINITION,this,
+				nameAST.getToken(),nameAST.getText());
+		}
+		else {
+			scopeActions.put(actionName,actionAST);
+		}
+	}
+
+	public Map getActions() {
+		return actions;
+	}
+
+	/** Given a grammar type, what should be the default action scope?
+	 *  If I say @members in a COMBINED grammar, for example, the
+	 *  default scope should be "parser".
+	 */
+	public String getDefaultActionScope(int grammarType) {
+		switch (grammarType) {
+			case Grammar.LEXER :
+				return "lexer";
+			case Grammar.PARSER :
+			case Grammar.COMBINED :
+				return "parser";
+			case Grammar.TREE_PARSER :
+				return "treeparser";
+		}
+		return null;
+	}
+
+	public void defineLexerRuleFoundInParser(antlr.Token ruleToken,
+											 GrammarAST ruleAST)
+	{
+		//System.out.println("rule tree is:\n"+ruleAST.toStringTree());
+		/*
+		String ruleText = tokenBuffer.toOriginalString(ruleAST.ruleStartTokenIndex,
+											   ruleAST.ruleStopTokenIndex);
+		*/
+		// first, create the text of the rule
+		StringBuffer buf = new StringBuffer();
+		buf.append("// $ANTLR src \"");
+		buf.append(getFileName());
+		buf.append("\" ");
+		buf.append(ruleAST.getLine());
+		buf.append("\n");
+		for (int i=ruleAST.ruleStartTokenIndex;
+			 i<=ruleAST.ruleStopTokenIndex && i<tokenBuffer.size();
+			 i++)
+		{
+			TokenWithIndex t = (TokenWithIndex)tokenBuffer.getToken(i);
+			// undo the text deletions done by the lexer (ugh)
+			if ( t.getType()==ANTLRParser.BLOCK ) {
+				buf.append("(");
+			}
+			else if ( t.getType()==ANTLRParser.ACTION ) {
+				buf.append("{");
+				buf.append(t.getText());
+				buf.append("}");
+			}
+			else if ( t.getType()==ANTLRParser.SEMPRED ||
+				t.getType()==ANTLRParser.SYN_SEMPRED ||
+				t.getType()==ANTLRParser.GATED_SEMPRED ||
+				t.getType()==ANTLRParser.BACKTRACK_SEMPRED )
+			{
+				buf.append("{");
+				buf.append(t.getText());
+				buf.append("}?");
+			}
+			else if ( t.getType()==ANTLRParser.ARG_ACTION ) {
+				buf.append("[");
+				buf.append(t.getText());
+				buf.append("]");
+			}
+			else {
+				buf.append(t.getText());
+			}
+		}
+		String ruleText = buf.toString();
+		//System.out.println("[["+ruleText+"]]");
+		// now put the rule into the lexer grammar template
+		lexerGrammarST.setAttribute("rules", ruleText);
+		// track this lexer rule's name
+		lexerRules.add(ruleToken.getText());
+	}
+
+	/** If someone does PLUS='+' in the parser, must make sure we get
+	 *  "PLUS : '+' ;" in lexer not "T73 : '+';"
+	 */
+	public void defineLexerRuleForAliasedStringLiteral(String tokenID,
+													   String literal,
+													   int tokenType)
+	{
+		//System.out.println("defineLexerRuleForAliasedStringLiteral: "+literal+" "+tokenType);
+		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",
+									tokenID,
+									Utils.integer(tokenType),
+									literal);
+		// track this lexer rule's name
+		lexerRules.add(tokenID);
+	}
+
+	public void defineLexerRuleForStringLiteral(String literal, int tokenType) {
+		//System.out.println("defineLexerRuleForStringLiteral: "+literal+" "+tokenType);
+		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",
+									computeTokenNameFromLiteral(tokenType,literal),
+									Utils.integer(tokenType),
+									literal);
+	}
+
+	public Rule getRule(String ruleName) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		return r;
+	}
+
+    public int getRuleIndex(String ruleName) {
+		Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			return r.index;
+		}
+        return INVALID_RULE_INDEX;
+    }
+
+    public String getRuleName(int ruleIndex) {
+        return (String)ruleIndexToRuleList.get(ruleIndex);
+    }
+
+	public AttributeScope defineGlobalScope(String name, Token scopeAction) {
+		AttributeScope scope = new AttributeScope(this, name, scopeAction);
+		scopes.put(name,scope);
+		return scope;
+	}
+
+	public AttributeScope createReturnScope(String ruleName, Token retAction) {
+		AttributeScope scope = new AttributeScope(this, ruleName, retAction);
+		scope.isReturnScope = true;
+		return scope;
+	}
+
+	public AttributeScope createRuleScope(String ruleName, Token scopeAction) {
+		AttributeScope scope = new AttributeScope(this, ruleName, scopeAction);
+		scope.isDynamicRuleScope = true;
+		return scope;
+	}
+
+	public AttributeScope createParameterScope(String ruleName, Token argAction) {
+		AttributeScope scope = new AttributeScope(this, ruleName, argAction);
+		scope.isParameterScope = true;
+		return scope;
+	}
+
+	/** Get a global scope */
+	public AttributeScope getGlobalScope(String name) {
+		return (AttributeScope)scopes.get(name);
+	}
+
+	public Map getGlobalScopes() {
+		return scopes;
+	}
+
+	/** Define a label defined in a rule r; check the validity then ask the
+	 *  Rule object to actually define it.
+	 */
+	protected void defineLabel(Rule r, antlr.Token label, GrammarAST element, int type) {
+        boolean err = nameSpaceChecker.checkForLabelTypeMismatch(r, label, type);
+		if ( err ) {
+			return;
+		}
+		r.defineLabel(label, element, type);
+	}
+
+	public void defineTokenRefLabel(String ruleName,
+									antlr.Token label,
+									GrammarAST tokenRef)
+	{
+        Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			if ( type==LEXER &&
+				 (tokenRef.getType()==ANTLRParser.CHAR_LITERAL||
+				 tokenRef.getType()==ANTLRParser.BLOCK||
+				 tokenRef.getType()==ANTLRParser.NOT||
+				 tokenRef.getType()==ANTLRParser.CHAR_RANGE||
+				 tokenRef.getType()==ANTLRParser.WILDCARD))
+			{
+				defineLabel(r, label, tokenRef, CHAR_LABEL);				
+			}
+			else {
+				defineLabel(r, label, tokenRef, TOKEN_LABEL);
+			}
+		}
+	}
+
+	public void defineRuleRefLabel(String ruleName,
+								   antlr.Token label,
+								   GrammarAST ruleRef)
+	{
+		Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			defineLabel(r, label, ruleRef, RULE_LABEL);
+		}
+	}
+
+	public void defineTokenListLabel(String ruleName,
+									 antlr.Token label,
+									 GrammarAST element)
+	{
+		Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			defineLabel(r, label, element, TOKEN_LIST_LABEL);
+		}
+	}
+
+	public void defineRuleListLabel(String ruleName,
+									antlr.Token label,
+									GrammarAST element)
+	{
+		Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			if ( !r.getHasMultipleReturnValues() ) {
+				ErrorManager.grammarError(
+					ErrorManager.MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT,this,
+					label,label.getText());
+			}
+			defineLabel(r, label, element, RULE_LIST_LABEL);
+		}
+	}
+
+	/** Given a set of all rewrite elements on right of ->, filter for
+	 *  label types such as Grammar.TOKEN_LABEL, Grammar.TOKEN_LIST_LABEL, ...
+	 *  Return a displayable token type name computed from the GrammarAST.
+	 */
+	public Set<String> getLabels(Set<GrammarAST> rewriteElements, int labelType) {
+		Set<String> labels = new HashSet<String>();
+		for (Iterator it = rewriteElements.iterator(); it.hasNext();) {
+			GrammarAST el = (GrammarAST) it.next();
+			if ( el.getType()==ANTLRParser.LABEL ) {
+				Rule r = getRule(el.enclosingRule);
+				String labelName = el.getText();
+				LabelElementPair pair = r.getLabel(labelName);
+				// if valid label and type is what we're looking for
+				// and not ref to old value val $rule, add to list
+				if ( pair!=null && pair.type==labelType &&
+					 !labelName.equals(el.enclosingRule) )
+				{
+					labels.add(labelName);
+				}
+			}
+		}
+		return labels;
+	}
+
+	/** Before generating code, we examine all actions that can have
+	 *  $x.y and $y stuff in them because some code generation depends on
+	 *  Rule.referencedPredefinedRuleAttributes.  I need to remove unused
+	 *  rule labels for example.
+	 */
+	protected void examineAllExecutableActions() {
+		Collection rules = getRules();
+		for (Iterator it = rules.iterator(); it.hasNext();) {
+			Rule r = (Rule) it.next();
+			// walk all actions within the rule elements, args, and exceptions
+			List<GrammarAST> actions = r.getInlineActions();
+			for (int i = 0; i < actions.size(); i++) {
+				GrammarAST actionAST = (GrammarAST) actions.get(i);
+				ActionAnalysisLexer sniffer =
+					new ActionAnalysisLexer(this, r.name, actionAST);
+				sniffer.analyze();
+			}
+			// walk any named actions like @init, @after
+			Collection<GrammarAST> namedActions = r.getActions().values();
+			for (Iterator it2 = namedActions.iterator(); it2.hasNext();) {
+				GrammarAST actionAST = (GrammarAST) it2.next();
+				ActionAnalysisLexer sniffer =
+					new ActionAnalysisLexer(this, r.name, actionAST);
+				sniffer.analyze();
+			}
+		}
+	}
+
+	/** Remove all labels on rule refs whose target rules have no return value.
+	 *  Do this for all rules in grammar.
+	 */
+	public void checkAllRulesForUselessLabels() {
+		if ( type==LEXER ) {
+			return;
+		}
+		Set rules = nameToRuleMap.keySet();
+		for (Iterator it = rules.iterator(); it.hasNext();) {
+			String ruleName = (String) it.next();
+			Rule r = getRule(ruleName);
+			removeUselessLabels(r.getRuleLabels());
+			removeUselessLabels(r.getRuleListLabels());
+		}
+	}
+
+    /** A label on a rule is useless if the rule has no return value, no
+     *  tree or template output, and it is not referenced in an action.
+     */
+    protected void removeUselessLabels(Map ruleToElementLabelPairMap) {
+		if ( ruleToElementLabelPairMap==null ) {
+			return;
+		}
+		Collection labels = ruleToElementLabelPairMap.values();
+		List kill = new ArrayList();
+		for (Iterator labelit = labels.iterator(); labelit.hasNext();) {
+			LabelElementPair pair = (LabelElementPair) labelit.next();
+			Rule refdRule = getRule(pair.elementRef.getText());
+			if ( refdRule!=null && !refdRule.getHasReturnValue() && !pair.actionReferencesLabel ) {
+				//System.out.println(pair.label.getText()+" is useless");
+				kill.add(pair.label.getText());
+			}
+		}
+		for (int i = 0; i < kill.size(); i++) {
+			String labelToKill = (String) kill.get(i);
+			// System.out.println("kill "+labelToKill);
+			ruleToElementLabelPairMap.remove(labelToKill);
+		}
+	}
+
+	/** Track a rule reference within an outermost alt of a rule.  Used
+	 *  at the moment to decide if $ruleref refers to a unique rule ref in
+	 *  the alt.  Rewrite rules force tracking of all rule AST results.
+	 *
+	 *  This data is also used to verify that all rules have been defined.
+	 */
+	public void altReferencesRule(String ruleName, GrammarAST refAST, int outerAltNum) {
+		Rule r = getRule(ruleName);
+		if ( r==null ) {
+			return;
+		}
+		r.trackRuleReferenceInAlt(refAST, outerAltNum);
+		antlr.Token refToken = refAST.getToken();
+		if ( !ruleRefs.contains(refToken) ) {
+			ruleRefs.add(refToken);
+		}
+	}
+
+	/** Track a token reference within an outermost alt of a rule.  Used
+	 *  to decide if $tokenref refers to a unique token ref in
+	 *  the alt. Does not track literals!
+	 *
+	 *  Rewrite rules force tracking of all tokens.
+	 */
+	public void altReferencesTokenID(String ruleName, GrammarAST refAST, int outerAltNum) {
+		Rule r = getRule(ruleName);
+		if ( r==null ) {
+			return;
+		}
+		r.trackTokenReferenceInAlt(refAST, outerAltNum);
+		if ( !tokenIDRefs.contains(refAST.getToken()) ) {
+			tokenIDRefs.add(refAST.getToken());
+		}
+	}
+
+	/** To yield smaller, more readable code, track which rules have their
+	 *  predefined attributes accessed.  If the rule has no user-defined
+	 *  return values, then don't generate the return value scope classes
+	 *  etc...  Make the rule have void return value.  Don't track for lexer
+	 *  rules.
+	 */
+	public void referenceRuleLabelPredefinedAttribute(String ruleName) {
+		Rule r = getRule(ruleName);
+		if ( r!=null && type!=LEXER ) {
+			// indicate that an action ref'd an attr unless it's in a lexer
+			// so that $ID.text refs don't force lexer rules to define
+			// return values...Token objects are created by the caller instead.
+			r.referencedPredefinedRuleAttributes = true;
+		}
+	}
+
+	public List checkAllRulesForLeftRecursion() {
+		return sanity.checkAllRulesForLeftRecursion();
+	}
+
+	/** Return a list of left-recursive rules; no analysis can be done
+	 *  successfully on these.  Useful to skip these rules then and also
+	 *  for ANTLRWorks to highlight them.
+	 */
+	public Set getLeftRecursiveRules() {
+		if ( nfa==null ) {
+			createNFAs();
+		}
+		if ( leftRecursiveRules!=null ) {
+			return leftRecursiveRules;
+		}
+		sanity.checkAllRulesForLeftRecursion();
+		return leftRecursiveRules;
+	}
+
+	public void checkRuleReference(GrammarAST refAST,
+								   GrammarAST argsAST,
+								   String currentRuleName)
+	{
+		sanity.checkRuleReference(refAST, argsAST, currentRuleName);
+	}
+
+	/** Rules like "a : ;" and "a : {...} ;" should not generate
+	 *  try/catch blocks for RecognitionException.  To detect this
+	 *  it's probably ok to just look for any reference to an atom
+	 *  that can match some input.  W/o that, the rule is unlikey to have
+	 *  any else.
+	 */
+	public boolean isEmptyRule(GrammarAST block) {
+		GrammarAST aTokenRefNode =
+			block.findFirstType(ANTLRParser.TOKEN_REF);
+		GrammarAST aStringLiteralRefNode =
+			block.findFirstType(ANTLRParser.STRING_LITERAL);
+		GrammarAST aCharLiteralRefNode =
+			block.findFirstType(ANTLRParser.CHAR_LITERAL);
+		GrammarAST aWildcardRefNode =
+			block.findFirstType(ANTLRParser.WILDCARD);
+		GrammarAST aRuleRefNode =
+			block.findFirstType(ANTLRParser.RULE_REF);
+		if ( aTokenRefNode==null&&
+			aStringLiteralRefNode==null&&
+			aCharLiteralRefNode==null&&
+			aWildcardRefNode==null&&
+			aRuleRefNode==null )
+		{
+			return true;
+		}
+		return false;
+	}
+
+    public int getTokenType(String tokenName) {
+        Integer I = null;
+        if ( tokenName.charAt(0)=='\'') {
+            I = (Integer)stringLiteralToTypeMap.get(tokenName);
+        }
+        else { // must be a label like ID
+            I = (Integer)tokenIDToTypeMap.get(tokenName);
+        }
+        int i = (I!=null)?I.intValue():Label.INVALID;
+		//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
+        return i;
+    }
+
+	/** Get the list of tokens that are IDs like BLOCK and LPAREN */
+	public Set getTokenIDs() {
+		return tokenIDToTypeMap.keySet();
+	}
+
+	/** Return an ordered integer list of token types that have no
+	 *  corresponding token ID like INT or KEYWORD_BEGIN; for stuff
+	 *  like 'begin'.
+	 */
+	public Collection getTokenTypesWithoutID() {
+		List types = new ArrayList();
+		for (int t =Label.MIN_TOKEN_TYPE; t<=getMaxTokenType(); t++) {
+			String name = getTokenDisplayName(t);
+			if ( name.charAt(0)=='\'' ) {
+				types.add(Utils.integer(t));
+			}
+		}
+		return types;
+	}
+
+	/** Get a list of all token IDs and literals that have an associated
+	 *  token type.
+	 */
+	public Set getTokenDisplayNames() {
+		Set names = new HashSet();
+		for (int t =Label.MIN_TOKEN_TYPE; t <=getMaxTokenType(); t++) {
+			names.add(getTokenDisplayName(t));
+		}
+		return names;
+	}
+
+	/** Given a literal like (the 3 char sequence with single quotes) 'a',
+	 *  return the int value of 'a'. Convert escape sequences here also.
+	 *  ANTLR's antlr.g parser does not convert escape sequences.
+	 *
+	 *  11/26/2005: I changed literals to always be '...' even for strings.
+	 *  This routine still works though.
+     */
+    public static int getCharValueFromGrammarCharLiteral(String literal) {
+        if ( literal.length()==3 ) {
+			// 'x'
+            return literal.charAt(1); // no escape char
+        }
+        else if ( literal.length() == 4 )
+        {
+			// '\x'  (antlr lexer will catch invalid char)
+			int escChar = literal.charAt(2);
+			int charVal = ANTLRLiteralEscapedCharValue[escChar];
+			if ( charVal==0 ) {
+				// Unnecessary escapes like '\{' should just yield {
+				return escChar;
+			}
+			return charVal;
+        }
+        else if( literal.length() == 8 )
+        {
+        	// '\u1234'
+        	String unicodeChars = literal.substring(3,literal.length()-1);
+    		return Integer.parseInt(unicodeChars, 16);
+         }
+		ErrorManager.assertTrue(false, "invalid char literal: "+literal);
+		return -1;
+    }
+
+	/** ANTLR does not convert escape sequences during the parse phase because
+	 *  it could not know how to print String/char literals back out when
+	 *  printing grammars etc...  Someone in China might use the real unicode
+	 *  char in a literal as it will display on their screen; when printing
+	 *  back out, I could not know whether to display or use a unicode escape.
+	 *
+	 *  This routine converts a string literal with possible escape sequences
+	 *  into a pure string of 16-bit char values.  Escapes and unicode \u0000
+	 *  specs are converted to pure chars.  return in a buffer; people may
+	 *  want to walk/manipulate further.
+	 *
+	 *  The NFA construction routine must know the actual char values.
+	 */
+	public static StringBuffer getUnescapedStringFromGrammarStringLiteral(String literal) {
+		//System.out.println("escape: ["+literal+"]");
+		StringBuffer buf = new StringBuffer();
+		int last = literal.length()-1; // skip quotes on outside
+		for (int i=1; i<last; i++) {
+			char c = literal.charAt(i);
+			if ( c=='\\' ) {
+				i++;
+				c = literal.charAt(i);
+				if ( Character.toUpperCase(c)=='U' ) {
+					// \u0000
+					i++;
+					String unicodeChars = literal.substring(i,i+4);
+					// parse the unicode 16 bit hex value
+					int val = Integer.parseInt(unicodeChars, 16);
+					i+=4-1; // loop will inc by 1; only jump 3 then
+					buf.append((char)val);
+				}
+				else {
+					buf.append((char)ANTLRLiteralEscapedCharValue[c]); // normal \x escape
+				}
+			}
+			else {
+				buf.append(c); // simple char x
+			}
+		}
+		//System.out.println("string: ["+buf.toString()+"]");
+		return buf;
+	}
+
+	/** Pull your token definitions from an existing grammar in memory.
+	 *  You must use Grammar() ctor then this method then setGrammarContent()
+	 *  to make this work.  This is useful primarily for testing and
+	 *  interpreting grammars.  Return the max token type found.
+	 */
+	public int importTokenVocabulary(Grammar importFromGr) {
+		Set importedTokenIDs = importFromGr.getTokenIDs();
+		for (Iterator it = importedTokenIDs.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			int tokenType = importFromGr.getTokenType(tokenID);
+			maxTokenType = Math.max(maxTokenType,tokenType);
+			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
+				//System.out.println("import token from grammar "+tokenID+"="+tokenType);
+				defineToken(tokenID, tokenType);
+			}
+		}
+		return maxTokenType; // return max found
+	}
+
+	/** Load a vocab file <vocabName>.tokens and return max token type found. */
+	public int importTokenVocabulary(String vocabName) {
+		File fullFile = getImportedVocabFileName(vocabName);
+		try {
+			FileReader fr = new FileReader(fullFile);
+			BufferedReader br = new BufferedReader(fr);
+			StreamTokenizer tokenizer = new StreamTokenizer(br);
+			tokenizer.parseNumbers();
+			tokenizer.wordChars('_', '_');
+			tokenizer.eolIsSignificant(true);
+			tokenizer.slashSlashComments(true);
+			tokenizer.slashStarComments(true);
+			tokenizer.ordinaryChar('=');
+			tokenizer.quoteChar('\'');
+			tokenizer.whitespaceChars(' ',' ');
+			tokenizer.whitespaceChars('\t','\t');
+			int lineNum = 1;
+			int token = tokenizer.nextToken();
+			while (token != StreamTokenizer.TT_EOF) {
+				String tokenID;
+				if ( token == StreamTokenizer.TT_WORD ) {
+					tokenID = tokenizer.sval;
+				}
+				else if ( token == '\'' ) {
+					tokenID = "'"+tokenizer.sval+"'";
+				}
+				else {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				token = tokenizer.nextToken();
+				if ( token != '=' ) {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				token = tokenizer.nextToken(); // skip '='
+				if ( token != StreamTokenizer.TT_NUMBER ) {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				int tokenType = (int)tokenizer.nval;
+				token = tokenizer.nextToken();
+				//System.out.println("import "+tokenID+"="+tokenType);
+				maxTokenType = Math.max(maxTokenType,tokenType);
+				defineToken(tokenID, tokenType);
+				lineNum++;
+				if ( token != StreamTokenizer.TT_EOL ) {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				token = tokenizer.nextToken(); // skip newline
+			}
+			br.close();
+		}
+		catch (FileNotFoundException fnfe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_TOKENS_FILE,
+							   fullFile);
+		}
+		catch (IOException ioe) {
+			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,
+							   fullFile,
+							   ioe);
+		}
+		catch (Exception e) {
+			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,
+							   fullFile,
+							   e);
+		}
+		return maxTokenType;
+	}
+
+	/** Given a token type, get a meaningful name for it such as the ID
+	 *  or string literal.  If this is a lexer and the ttype is in the
+	 *  char vocabulary, compute an ANTLR-valid (possibly escaped) char literal.
+	 */
+	public String getTokenDisplayName(int ttype) {
+		String tokenName = null;
+		int index=0;
+		// inside any target's char range and is lexer grammar?
+		if ( this.type==LEXER &&
+			 ttype >= Label.MIN_CHAR_VALUE && ttype <= Label.MAX_CHAR_VALUE )
+		{
+			return getANTLRCharLiteralForChar(ttype);
+		}
+		// faux label?
+		else if ( ttype<0 ) {
+			tokenName = (String)typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype);
+		}
+		else {
+			// compute index in typeToTokenList for ttype
+			index = ttype-1; // normalize to 0..n-1
+			index += Label.NUM_FAUX_LABELS;     // jump over faux tokens
+
+			if ( index<typeToTokenList.size() ) {
+				tokenName = (String)typeToTokenList.get(index);
+			}
+			else {
+				tokenName = String.valueOf(ttype);
+			}
+		}
+		//System.out.println("getTokenDisplaYanme ttype="+ttype+", index="+index+", name="+tokenName);
+		return tokenName;
+	}
+
+	/** Get the list of ANTLR String literals */
+	public Set getStringLiterals() {
+		return stringLiteralToTypeMap.keySet();
+	}
+
+	public int getGrammarMaxLookahead() {
+		if ( global_k>=0 ) {
+			return global_k;
+		}
+		/*
+		Integer kI = (Integer)getOption("k");
+		if ( kI!=null ) {
+			global_k = kI.intValue();
+		}
+		else {
+			global_k = 0;
+		}
+		*/
+		Object k = getOption("k");
+		if ( k==null ) {
+			global_k = 0;
+		}
+		else if (k instanceof Integer) {
+			Integer kI = (Integer)k;
+			global_k = kI.intValue();
+		}
+		else {
+			// must be String "*"
+			if ( k.equals("*") ) {  // this the default anyway
+				global_k = 0;
+			}
+		}
+		return global_k;
+	}
+
+	/** Save the option key/value pair and process it; return the key
+	 *  or null if invalid option.
+	 */
+    public String setOption(String key, Object value, antlr.Token optionsStartToken) {
+		if ( !legalOptions.contains(key) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
+									  this,
+									  optionsStartToken,
+									  key);
+			return null;
+		}
+		if ( !optionIsValid(key, value) ) {
+			return null;
+		}
+		if ( options==null ) {
+			options = new HashMap();
+		}
+		options.put(key, value);
+		return key;
+    }
+
+    public void setOptions(Map options, antlr.Token optionsStartToken) {
+		if ( options==null ) {
+			this.options = null;
+			return;
+		}
+        Set keys = options.keySet();
+        for (Iterator it = keys.iterator(); it.hasNext();) {
+            String optionName = (String) it.next();
+            Object optionValue = options.get(optionName);
+            String stored=setOption(optionName, optionValue, optionsStartToken);
+			if ( stored==null ) {
+				it.remove();
+			}
+        }
+    }
+
+    public Object getOption(String key) {
+		Object value = null;
+		if ( options!=null ) {
+			value = options.get(key);
+		}
+		if ( value==null ) {
+			value = defaultOptions.get(key);
+		}
+		return value;
+    }
+
+	public boolean optionIsValid(String key, Object value) {
+		return true;
+	}
+
+	public boolean buildAST() {
+		String outputType = (String)getOption("output");
+		if ( outputType!=null ) {
+			return outputType.equals("AST");
+		}
+		return false;
+	}
+
+	public boolean isBuiltFromString() {
+		return builtFromString;
+	}
+
+	public boolean buildTemplate() {
+		String outputType = (String)getOption("output");
+		if ( outputType!=null ) {
+			return outputType.equals("template");
+		}
+		return false;
+	}
+
+    public Collection getRules() {
+        return nameToRuleMap.values();
+    }
+
+	public void setRuleAST(String ruleName, GrammarAST t) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		if ( r!=null ) {
+			r.tree = t;
+			r.EORNode = t.getLastChild();
+		}
+	}
+
+    public void setRuleStartState(String ruleName, NFAState startState) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		if ( r!=null ) {
+	        r.startState = startState;
+		}
+    }
+
+    public void setRuleStopState(String ruleName, NFAState stopState) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		if ( r!=null ) {
+	        r.stopState = stopState;
+		}
+    }
+
+	public NFAState getRuleStartState(String ruleName) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		if ( r!=null ) {
+			return r.startState;
+		}
+		return null;
+	}
+
+	public String getRuleModifier(String ruleName) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		if ( r!=null ) {
+			return r.modifier;
+		}
+		return null;
+	}
+
+    public NFAState getRuleStopState(String ruleName) {
+		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		if ( r!=null ) {
+			return r.stopState;
+		}
+		return null;
+    }
+
+    public int assignDecisionNumber(NFAState state) {
+        decisionNumber++;
+        state.setDecisionNumber(decisionNumber);
+        return decisionNumber;
+    }
+
+	protected Decision getDecision(int decision) {
+		int index = decision-1;
+		if ( index >= indexToDecision.size() ) {
+			return null;
+		}
+		Decision d = (Decision)indexToDecision.get(index);
+		return d;
+	}
+
+	protected Decision createDecision(int decision) {
+		int index = decision-1;
+		if ( index < indexToDecision.size() ) {
+			return getDecision(decision); // don't recreate
+		}
+		Decision d = new Decision();
+		d.decision = decision;
+        indexToDecision.setSize(getNumberOfDecisions());
+        indexToDecision.set(index, d);
+		return d;
+	}
+
+    public List getDecisionNFAStartStateList() {
+		List states = new ArrayList(100);
+		for (int d = 0; d < indexToDecision.size(); d++) {
+			Decision dec = (Decision) indexToDecision.elementAt(d);
+			states.add(dec.startState);
+		}
+        return states;
+    }
+
+    public NFAState getDecisionNFAStartState(int decision) {
+        Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.startState;
+    }
+
+	public DFA getLookaheadDFA(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.dfa;
+	}
+
+	public GrammarAST getDecisionBlockAST(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.blockAST;
+	}
+
+	/** returns a list of column numbers for all decisions
+	 *  on a particular line so ANTLRWorks choose the decision
+	 *  depending on the location of the cursor (otherwise,
+	 *  ANTLRWorks has to give the *exact* location which
+	 *  is not easy from the user point of view).
+	 *
+	 *  This is not particularly fast as it walks entire line:col->DFA map
+	 *  looking for a prefix of "line:".
+	 */
+	public List getLookaheadDFAColumnsForLineInFile(int line) {
+		String prefix = line+":";
+		List columns = new ArrayList();
+		for(Iterator iter = lineColumnToLookaheadDFAMap.keySet().iterator();
+			iter.hasNext(); ) {
+			String key = (String)iter.next();
+			if(key.startsWith(prefix)) {
+				columns.add(Integer.valueOf(key.substring(prefix.length())));
+			}
+		}
+		return columns;
+	}
+
+	/** Useful for ANTLRWorks to map position in file to the DFA for display */
+	public DFA getLookaheadDFAFromPositionInFile(int line, int col) {
+		return (DFA)lineColumnToLookaheadDFAMap.get(
+			new StringBuffer().append(line + ":").append(col).toString());
+	}
+
+	public Map getLineColumnToLookaheadDFAMap() {
+		return lineColumnToLookaheadDFAMap;
+	}
+
+	/*
+	public void setDecisionOptions(int decision, Map options) {
+		Decision d = createDecision(decision);
+		d.options = options;
+	}
+
+	public void setDecisionOption(int decision, String name, Object value) {
+		Decision d = getDecision(decision);
+		if ( d!=null ) {
+			if ( d.options==null ) {
+				d.options = new HashMap();
+			}
+			d.options.put(name,value);
+		}
+	}
+
+	public Map getDecisionOptions(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.options;
+    }
+    */
+
+	public int getNumberOfDecisions() {
+		return decisionNumber;
+	}
+
+	public int getNumberOfCyclicDecisions() {
+		int n = 0;
+		for (int i=1; i<=getNumberOfDecisions(); i++) {
+			Decision d = getDecision(i);
+			if ( d.dfa!=null && d.dfa.isCyclic() ) {
+				n++;
+			}
+		}
+		return n;
+	}
+
+	/** Set the lookahead DFA for a particular decision.  This means
+	 *  that the appropriate AST node must updated to have the new lookahead
+	 *  DFA.  This method could be used to properly set the DFAs without
+	 *  using the createLookaheadDFAs() method.  You could do this
+	 *
+	 *    Grammar g = new Grammar("...");
+	 *    g.setLookahead(1, dfa1);
+	 *    g.setLookahead(2, dfa2);
+	 *    ...
+	 */
+	public void setLookaheadDFA(int decision, DFA lookaheadDFA) {
+		Decision d = createDecision(decision);
+		d.dfa = lookaheadDFA;
+		GrammarAST ast = d.startState.getAssociatedASTNode();
+		ast.setLookaheadDFA(lookaheadDFA);
+	}
+
+	public void setDecisionNFA(int decision, NFAState state) {
+		Decision d = createDecision(decision);
+		d.startState = state;
+	}
+
+	public void setDecisionBlockAST(int decision, GrammarAST blockAST) {
+		//System.out.println("setDecisionBlockAST("+decision+", "+blockAST.token);
+		Decision d = createDecision(decision);
+		d.blockAST = blockAST;
+	}
+
+	public boolean allDecisionDFAHaveBeenCreated() {
+		return allDecisionDFACreated;
+	}
+
+	/** How many token types have been allocated so far? */
+    public int getMaxTokenType() {
+        return maxTokenType;
+    }
+
+	/** What is the max char value possible for this grammar's target?  Use
+	 *  unicode max if no target defined.
+	 */
+	public int getMaxCharValue() {
+		if ( generator!=null ) {
+			return generator.target.getMaxCharValue(generator);
+		}
+		else {
+			return Label.MAX_CHAR_VALUE;
+		}
+	}
+
+	/** Return a set of all possible token or char types for this grammar */
+	public IntSet getTokenTypes() {
+		if ( type==LEXER ) {
+			return getAllCharValues();
+		}
+		return IntervalSet.of(Label.MIN_TOKEN_TYPE, getMaxTokenType());
+	}
+
+	/** If there is a char vocabulary, use it; else return min to max char
+	 *  as defined by the target.  If no target, use max unicode char value.
+	 */
+	public IntSet getAllCharValues() {
+		if ( charVocabulary!=null ) {
+			return charVocabulary;
+		}
+		IntSet allChar = IntervalSet.of(Label.MIN_CHAR_VALUE, getMaxCharValue());
+		return allChar;
+	}
+
+	/** Return a string representing the escaped char for code c.  E.g., If c
+	 *  has value 0x100, you will get "\u0100".  ASCII gets the usual
+	 *  char (non-hex) representation.  Control characters are spit out
+	 *  as unicode.  While this is specially set up for returning Java strings,
+	 *  it can be used by any language target that has the same syntax. :)
+	 *
+	 *  11/26/2005: I changed this to use double quotes, consistent with antlr.g
+	 *  12/09/2005: I changed so everything is single quotes
+	 */
+	public static String getANTLRCharLiteralForChar(int c) {
+		if ( c<Label.MIN_CHAR_VALUE ) {
+			ErrorManager.internalError("invalid char value "+c);
+			return "'<INVALID>'";
+		}
+		if ( c<ANTLRLiteralCharValueEscape.length && ANTLRLiteralCharValueEscape[c]!=null ) {
+			return '\''+ANTLRLiteralCharValueEscape[c]+'\'';
+		}
+		if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN &&
+			!Character.isISOControl((char)c) ) {
+			if ( c=='\\' ) {
+				return "'\\\\'";
+			}
+			if ( c=='\'') {
+				return "'\\''";
+			}
+			return '\''+Character.toString((char)c)+'\'';
+		}
+		// turn on the bit above max "\uFFFF" value so that we pad with zeros
+		// then only take last 4 digits
+		String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
+		String unicodeStr = "'\\u"+hex+"'";
+		return unicodeStr;
+	}
+
+    /** For lexer grammars, return everything in unicode not in set.
+     *  For parser and tree grammars, return everything in token space
+     *  from MIN_TOKEN_TYPE to last valid token type or char value.
+     */
+    public IntSet complement(IntSet set) {
+        //System.out.println("complement "+set.toString(this));
+        //System.out.println("vocabulary "+getTokenTypes().toString(this));
+        IntSet c = set.complement(getTokenTypes());
+        //System.out.println("result="+c.toString(this));
+        return c;
+    }
+
+    public IntSet complement(int atom) {
+        return complement(IntervalSet.of(atom));
+    }
+
+	/** Given set tree like ( SET A B ) in lexer, check that A and B
+	 *  are both valid sets themselves, else we must tree like a BLOCK
+	 */
+	public boolean isValidSet(TreeToNFAConverter nfabuilder, GrammarAST t) {
+		boolean valid = true;
+		try {
+			//System.out.println("parse BLOCK as set tree: "+t.toStringTree());
+			nfabuilder.testBlockAsSet(t);
+		}
+		catch (RecognitionException re) {
+			// The rule did not parse as a set, return null; ignore exception
+			valid = false;
+		}
+		//System.out.println("valid? "+valid);
+		return valid;
+	}
+
+	/** Get the set equivalent (if any) of the indicated rule from this
+	 *  grammar.  Mostly used in the lexer to do ~T for some fragment rule
+	 *  T.  If the rule AST has a SET use that.  If the rule is a single char
+	 *  convert it to a set and return.  If rule is not a simple set (w/o actions)
+	 *  then return null.
+	 *  Rules have AST form:
+	 *
+	 *		^( RULE ID modifier ARG RET SCOPE block EOR )
+	 */
+	public IntSet getSetFromRule(TreeToNFAConverter nfabuilder, String ruleName)
+		throws RecognitionException
+	{
+		Rule r = getRule(ruleName);
+		if ( r==null ) {
+			return null;
+		}
+		IntSet elements = null;
+		//System.out.println("parsed tree: "+r.tree.toStringTree());
+	    elements = nfabuilder.setRule(r.tree);
+		//System.out.println("elements="+elements);
+		return elements;
+	}
+
+	/** Decisions are linked together with transition(1).  Count how
+     *  many there are.  This is here rather than in NFAState because
+     *  a grammar decides how NFAs are put together to form a decision.
+     */
+    public int getNumberOfAltsForDecisionNFA(NFAState decisionState) {
+        if ( decisionState==null ) {
+            return 0;
+        }
+        int n = 1;
+        NFAState p = decisionState;
+        while ( p.transition(1)!=null ) {
+            n++;
+            p = (NFAState)p.transition(1).target;
+        }
+        return n;
+    }
+
+    /** Get the ith alternative (1..n) from a decision; return null when
+     *  an invalid alt is requested.  I must count in to find the right
+     *  alternative number.  For (A|B), you get NFA structure (roughly):
+	 *
+	 *  o->o-A->o
+	 *  |
+	 *  o->o-B->o
+	 *
+	 *  This routine returns the leftmost state for each alt.  So alt=1, returns
+	 *  the upperleft most state in this structure.
+     */
+    public NFAState getNFAStateForAltOfDecision(NFAState decisionState, int alt) {
+        if ( decisionState==null || alt<=0 ) {
+            return null;
+        }
+        int n = 1;
+        NFAState p = decisionState;
+        while ( p!=null ) {
+            if ( n==alt ) {
+                return p;
+            }
+            n++;
+            Transition next = p.transition(1);
+            p = null;
+            if ( next!=null ) {
+                p = (NFAState)next.target;
+            }
+        }
+        return null;
+    }
+
+	/** From an NFA state, s, find the set of all labels reachable from s.
+	 *  This computes FIRST, FOLLOW and any other lookahead computation
+	 *  depending on where s is.
+	 *
+	 *  Record, with EOR_TOKEN_TYPE, if you hit the end of a rule so we can
+	 *  know at runtime (when these sets are used) to start walking up the
+	 *  follow chain to compute the real, correct follow set.
+	 *
+	 *  This routine will only be used on parser and tree parser grammars.
+	 *
+	 *  TODO: it does properly handle a : b A ; where b is nullable
+	 *  Actually it stops at end of rules, returning EOR.  Hmm...
+	 *  should check for that and keep going.
+	 */
+	public LookaheadSet LOOK(NFAState s) {
+		lookBusy.clear();
+		return _LOOK(s);
+	}
+
+	protected LookaheadSet _LOOK(NFAState s) {
+		if ( s.isAcceptState() ) {
+			return new LookaheadSet(Label.EOR_TOKEN_TYPE);
+		}
+
+		if ( lookBusy.contains(s) ) {
+			// return a copy of an empty set; we may modify set inline
+			return new LookaheadSet();
+		}
+		lookBusy.add(s);
+		Transition transition0 = s.transition(0);
+		if ( transition0==null ) {
+			return null;
+		}
+
+		if ( transition0.label.isAtom() ) {
+			int atom = transition0.label.getAtom();
+			if ( atom==Label.EOF ) {
+				return LookaheadSet.EOF();
+			}
+			return new LookaheadSet(atom);
+		}
+		if ( transition0.label.isSet() ) {
+			IntSet sl = transition0.label.getSet();
+			LookaheadSet laSet = new LookaheadSet(sl);
+			if ( laSet.member(Label.EOF) ) {
+				laSet.remove(Label.EOF);
+				laSet.hasEOF = true;
+			}
+			return laSet;
+		}
+        LookaheadSet tset = _LOOK((NFAState)transition0.target);
+		if ( tset.member(Label.EOR_TOKEN_TYPE) ) {
+			if ( transition0 instanceof RuleClosureTransition ) {
+				// we called a rule that found the end of the rule.
+				// That means the rule is nullable and we need to
+				// keep looking at what follows the rule ref.  E.g.,
+				// a : b A ; where b is nullable means that LOOK(a)
+				// should include A.
+				RuleClosureTransition ruleInvocationTrans =
+					(RuleClosureTransition)transition0;
+				// remove the EOR and get what follows
+				tset.remove(Label.EOR_TOKEN_TYPE);
+				LookaheadSet fset =
+					_LOOK((NFAState)ruleInvocationTrans.getFollowState());
+				tset.orInPlace(fset);
+			}
+		}
+
+		Transition transition1 = s.transition(1);
+		if ( transition1!=null ) {
+			LookaheadSet tset1 = _LOOK((NFAState)transition1.target);
+			tset.orInPlace(tset1);
+		}
+		return tset;
+	}
+
+    public void setCodeGenerator(CodeGenerator generator) {
+        this.generator = generator;
+    }
+
+    public CodeGenerator getCodeGenerator() {
+        return generator;
+    }
+
+    public GrammarAST getGrammarTree() {
+        return grammarTree;
+    }
+
+	public Tool getTool() {
+		return tool;
+	}
+
+	public void setTool(Tool tool) {
+		this.tool = tool;
+	}
+
+	/** given a token type and the text of the literal, come up with a
+	 *  decent token type label.  For now it's just T<type>.  Actually,
+	 *  if there is an aliased name from tokens like PLUS='+', use it.
+	 */
+	public String computeTokenNameFromLiteral(int tokenType, String literal) {
+		return "T"+tokenType;
+	}
+
+    public String toString() {
+        return grammarTreeToString(grammarTree);
+    }
+
+	public String grammarTreeToString(GrammarAST t) {
+		return grammarTreeToString(t, true);
+	}
+
+	public String grammarTreeToString(GrammarAST t, boolean showActions) {
+        String s = null;
+        try {
+            s = t.getLine()+":"+t.getColumn()+": ";
+            s += new ANTLRTreePrinter().toString((AST)t, this, showActions);
+        }
+        catch (Exception e) {
+            ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   t,
+							   e);
+        }
+        return s;
+    }
+
+	public void setWatchNFAConversion(boolean watchNFAConversion) {
+		this.watchNFAConversion = watchNFAConversion;
+	}
+
+	public boolean getWatchNFAConversion() {
+		return watchNFAConversion;
+	}
+
+	public void printGrammar(PrintStream output) {
+		ANTLRTreePrinter printer = new ANTLRTreePrinter();
+		printer.setASTNodeClass("org.antlr.tool.GrammarAST");
+		try {
+			String g = printer.toString(grammarTree, this, false);
+			output.println(g);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,re);
+		}
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarAST.java b/src/org/antlr/tool/GrammarAST.java
new file mode 100644
index 0000000..688908e
--- /dev/null
+++ b/src/org/antlr/tool/GrammarAST.java
@@ -0,0 +1,496 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.BaseAST;
+import antlr.Token;
+import antlr.TokenWithIndex;
+import antlr.collections.AST;
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.NFAState;
+import org.antlr.misc.IntSet;
+import org.antlr.stringtemplate.StringTemplate;
+
+import java.util.*;
+
+/** Grammars are first converted to ASTs using this class and then are
+ *  converted to NFAs via a tree walker.
+ *
+ *  The reader may notice that I have made a very non-OO decision in this
+ *  class to track variables for many different kinds of nodes.  It wastes
+ *  space for nodes that don't need the values and OO principles cry out
+ *  for a new class type for each kind of node in my tree.  I am doing this
+ *  on purpose for a variety of reasons.  I don't like using the type
+ *  system for different node types; it yields too many damn class files
+ *  which I hate.  Perhaps if I put them all in one file.  Most importantly
+ *  though I hate all the type casting that would have to go on.  I would
+ *  have all sorts of extra work to do.  Ick.  Anyway, I'm doing all this
+ *  on purpose, not out of ignorance. ;)
+ */
+public class GrammarAST extends BaseAST {
+	static int count = 0;
+
+	public int ID = ++count;
+
+	/** This AST node was created from what token? */
+    public Token token = null;
+
+    protected String enclosingRule = null;
+
+	/** If this is a RULE node then track rule's start, stop tokens' index. */
+	public int ruleStartTokenIndex;
+	public int ruleStopTokenIndex;
+
+    /** If this is a decision node, what is the lookahead DFA? */
+    public DFA lookaheadDFA = null;
+
+    /** What NFA start state was built from this node? */
+    public NFAState NFAStartState = null;
+
+	/** This is used for TREE_BEGIN nodes to point into
+	 *  the NFA.  TREE_BEGINs point at left edge of DOWN for LOOK computation
+     *  purposes (Nullable tree child list needs special code gen when matching).
+	 */
+	public NFAState NFATreeDownState = null;
+
+	/** Rule ref nodes, token refs, set, and NOT set refs need to track their
+	 *  location in the generated NFA so that local FOLLOW sets can be
+	 *  computed during code gen for automatic error recovery.
+	 */
+	public NFAState followingNFAState = null;
+
+	/** If this is a SET node, what are the elements? */
+    protected IntSet setValue = null;
+
+    /** If this is a BLOCK node, track options here */
+    protected Map options;
+
+	/** If this is a BLOCK node for a rewrite rule, track referenced
+	 *  elements here.  Don't track elements in nested subrules.
+	 */
+	public Set<GrammarAST> rewriteRefsShallow;
+
+	/*	If REWRITE node, track EVERY element and label ref to right of ->
+	 *  for this rewrite rule.  There could be multiple of these per
+	 *  rule:
+	 *
+	 *     a : ( ... -> ... | ... -> ... ) -> ... ;
+	 *
+	 *  We may need a list of all refs to do definitions for whole rewrite
+	 *  later.
+	 *
+	 *  If BLOCK then tracks every element at that level and below.
+	 */
+	public Set<GrammarAST> rewriteRefsDeep;	
+
+	public static final Set legalBlockOptions =
+			new HashSet() {{add("k"); add("greedy"); add("backtrack"); add("memoize");}};
+
+	/** What are the default options for a subrule? */
+    public static final Map defaultBlockOptions =
+            new HashMap() {{put("greedy","true");}};
+
+	/** if this is an ACTION node, this is the outermost enclosing
+	 *  alt num in rule.  For actions, define.g sets these (used to
+	 *  be codegen.g).  We need these set so we can examine actions
+	 *  early, before code gen, for refs to rule predefined properties
+	 *  and rule labels.  For most part define.g sets outerAltNum, but
+	 *  codegen.g does the ones for %foo(a={$ID.text}) type refs as
+	 *  the {$ID...} is not seen as an action until code gen pulls apart.
+	 */
+	public int outerAltNum;
+
+	/** if this is a TOKEN_REF or RULE_REF node, this is the code StringTemplate
+	 *  generated for this node.  We need to update it later to add
+	 *  a label if someone does $tokenref or $ruleref in an action.
+	 */
+	public StringTemplate code;
+
+	public GrammarAST() {;}
+
+	public GrammarAST(int t, String txt) {
+		initialize(t,txt);
+	}
+
+	public void initialize(int i, String s) {
+        token = new TokenWithIndex(i,s);
+    }
+
+    public void initialize(AST ast) {
+		this.token = ((GrammarAST)ast).token;
+    }
+
+    public void initialize(Token token) {
+        this.token = token;
+    }
+
+    public DFA getLookaheadDFA() {
+        return lookaheadDFA;
+    }
+
+    public void setLookaheadDFA(DFA lookaheadDFA) {
+        this.lookaheadDFA = lookaheadDFA;
+    }
+
+	public Token getToken() {
+		return token;
+	}
+
+    public NFAState getNFAStartState() {
+        return NFAStartState;
+    }
+
+    public void setNFAStartState(NFAState nfaStartState) {
+		this.NFAStartState = nfaStartState;
+	}
+
+	/** Save the option key/value pair and process it; return the key
+	 *  or null if invalid option.
+	 */
+	public String setOption(Grammar grammar, String key, Object value) {
+		if ( !legalBlockOptions.contains(key) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
+									  grammar,
+									  token,
+									  key);
+			return null;
+		}
+		if ( value instanceof String ) {
+			String vs = (String)value;
+			if ( vs.charAt(0)=='"' ) {
+				value = vs.substring(1,vs.length()-1); // strip quotes
+            }
+        }
+		if ( options==null ) {
+			options = new HashMap();
+		}
+		if ( key.equals("k") ) {
+			grammar.numberOfManualLookaheadOptions++;
+		}
+        options.put(key, value);
+		return key;
+    }
+
+    public Object getOption(String key) {
+		Object value = null;
+		if ( options!=null ) {
+			value = options.get(key);
+		}
+		if ( value==null ) {
+			value = defaultBlockOptions.get(key);
+		}
+		return value;
+	}
+
+    public void setOptions(Grammar grammar, Map options) {
+		if ( options==null ) {
+			this.options = null;
+			return;
+		}
+		Set keys = options.keySet();
+		for (Iterator it = keys.iterator(); it.hasNext();) {
+			String optionName = (String) it.next();
+			String stored=setOption(grammar, optionName, options.get(optionName));
+			if ( stored==null ) {
+				it.remove();
+			}
+		}
+    }
+
+    public Map getOptions() {
+        return options;
+    }
+
+    public String getText() {
+        if ( token!=null ) {
+            return token.getText();
+        }
+        return "";
+    }
+
+	public void setType(int type) {
+		token.setType(type);
+	}
+
+	public void setText(String text) {
+		token.setText(text);
+	}
+
+    public int getType() {
+        if ( token!=null ) {
+            return token.getType();
+        }
+        return -1;
+    }
+
+    public int getLine() {
+		int line=0;
+        if ( token!=null ) {
+            line = token.getLine();
+        }
+		if ( line==0 ) {
+			AST child = getFirstChild();
+			if ( child!=null ) {
+				line = child.getLine();
+			}
+		}
+        return line;
+    }
+
+    public int getColumn() {
+		int col=0;
+        if ( token!=null ) {
+            col = token.getColumn();
+        }
+		if ( col==0 ) {
+			AST child = getFirstChild();
+			if ( child!=null ) {
+				col = child.getColumn();
+			}
+		}
+        return col;
+    }
+
+    public void setLine(int line) {
+        token.setLine(line);
+    }
+
+    public void setColumn(int col) {
+        token.setColumn(col);
+    }
+
+    public void setEnclosingRule(String rule) {
+        this.enclosingRule = rule;
+    }
+
+    public String getEnclosingRule() {
+        return enclosingRule;
+    }
+
+    public IntSet getSetValue() {
+        return setValue;
+    }
+
+    public void setSetValue(IntSet setValue) {
+        this.setValue = setValue;
+    }
+
+    public GrammarAST getLastChild() {
+        return ((GrammarAST)getFirstChild()).getLastSibling();
+    }
+
+    public GrammarAST getLastSibling() {
+        GrammarAST t = this;
+        GrammarAST last = null;
+        while ( t!=null ) {
+            last = t;
+            t = (GrammarAST)t.getNextSibling();
+        }
+        return last;
+    }
+
+    /** Get the ith child from 0 */
+	public GrammarAST getChild(int i) {
+		int n = 0;
+		AST t = getFirstChild();
+		while ( t!=null ) {
+			if ( n==i ) {
+				return (GrammarAST)t;
+			}
+			n++;
+			t = (GrammarAST)t.getNextSibling();
+		}
+		return null;
+	}
+
+	public GrammarAST getFirstChildWithType(int ttype) {
+		AST t = getFirstChild();
+		while ( t!=null ) {
+			if ( t.getType()==ttype ) {
+				return (GrammarAST)t;
+			}
+			t = (GrammarAST)t.getNextSibling();
+		}
+		return null;
+	}
+
+    public GrammarAST[] getChildrenAsArray() {
+        AST t = getFirstChild();
+        GrammarAST[] array = new GrammarAST[getNumberOfChildren()];
+        int i = 0;
+        while ( t!=null ) {
+            array[i] = (GrammarAST)t;
+            t = t.getNextSibling();
+            i++;
+        }
+        return array;
+    }
+
+	/** Return a reference to the first node (depth-first) that has
+	 *  token type ttype.  Assume 'this' is a root node; don't visit siblings
+	 *  of root.  Return null if no node found with ttype.
+	 */
+	public GrammarAST findFirstType(int ttype) {
+		// check this node (the root) first
+		if ( this.getType()==ttype ) {
+			return this;
+		}
+		// else check children
+		GrammarAST child = (GrammarAST)this.getFirstChild();
+		while ( child!=null ) {
+			GrammarAST result = child.findFirstType(ttype);
+			if ( result!=null ) {
+				return result;
+			}
+			child = (GrammarAST)child.getNextSibling();
+		}
+		return null;
+	}
+
+	/** Make nodes unique based upon Token so we can add them to a Set; if
+	 *  not a GrammarAST, check type.
+	 */
+	public boolean equals(AST ast) {
+		if ( !(ast instanceof GrammarAST) ) {
+			return this.getType() == ast.getType();
+		}
+		GrammarAST t = (GrammarAST)ast;
+		return token.getLine() == t.getLine() &&
+			   token.getColumn() == t.getColumn();
+	}
+
+	/** See if tree has exact token types and structure; no text */
+	public boolean hasSameTreeStructure(AST t) {
+		// check roots first.
+		if (this.getType() != t.getType()) return false;
+		// if roots match, do full list match test on children.
+		if (this.getFirstChild() != null) {
+			if (!(((GrammarAST)this.getFirstChild()).hasSameListStructure(t.getFirstChild()))) return false;
+		}
+		// sibling has no kids, make sure t doesn't either
+		else if (t.getFirstChild() != null) {
+			return false;
+		}
+		return true;
+	}
+
+	public boolean hasSameListStructure(AST t) {
+		AST sibling;
+
+		// the empty tree is not a match of any non-null tree.
+		if (t == null) {
+			return false;
+		}
+
+		// Otherwise, start walking sibling lists.  First mismatch, return false.
+		for (sibling = this;
+			 sibling != null && t != null;
+			 sibling = sibling.getNextSibling(), t = t.getNextSibling())
+		{
+			// as a quick optimization, check roots first.
+			if (sibling.getType()!=t.getType()) {
+				return false;
+			}
+			// if roots match, do full list match test on children.
+			if (sibling.getFirstChild() != null) {
+				if (!((GrammarAST)sibling.getFirstChild()).hasSameListStructure(t.getFirstChild())) {
+					return false;
+				}
+			}
+			// sibling has no kids, make sure t doesn't either
+			else if (t.getFirstChild() != null) {
+				return false;
+			}
+		}
+		if (sibling == null && t == null) {
+			return true;
+		}
+		// one sibling list has more than the other
+		return false;
+	}
+
+	public static GrammarAST dup(AST t) {
+		if ( t==null ) {
+			return null;
+		}
+		GrammarAST dup_t = new GrammarAST();
+		dup_t.initialize(t);
+		return dup_t;
+	}
+
+	public static void main(String[] args) {
+		GrammarAST t = new GrammarAST();
+	}
+
+	/** Duplicate tree including siblings of root. */
+	public static GrammarAST dupListNoActions(GrammarAST t, GrammarAST parent) {
+		GrammarAST result = dupTreeNoActions(t, parent);            // if t == null, then result==null
+		GrammarAST nt = result;
+		while (t != null) {						// for each sibling of the root
+			t = (GrammarAST)t.getNextSibling();
+			if ( t!=null && t.getType()==ANTLRParser.ACTION ) {
+				continue;
+			}
+			GrammarAST d = dupTreeNoActions(t, parent);
+			if ( d!=null ) {
+				if ( nt!=null ) {
+					nt.setNextSibling(d);	// dup each subtree, building new tree
+				}
+				nt = d;
+			}
+		}
+		return result;
+	}
+
+	/**Duplicate a tree, assuming this is a root node of a tree--
+	 * duplicate that node and what's below; ignore siblings of root node.
+	 */
+	public static GrammarAST dupTreeNoActions(GrammarAST t, GrammarAST parent) {
+		if ( t==null ) {
+			return null;
+		}
+		int ttype = t.getType();
+		if ( ttype==ANTLRParser.REWRITE ) {
+			return null;
+		}
+		if ( ttype==ANTLRParser.BANG || ttype==ANTLRParser.ROOT ) {
+			return (GrammarAST)t.getFirstChild(); // return x from ^(ROOT x)
+		}
+		if ( (ttype==ANTLRParser.ASSIGN||ttype==ANTLRParser.PLUS_ASSIGN) &&
+			 (parent==null||parent.getType()!=ANTLRParser.OPTIONS) )
+		{
+			return dupTreeNoActions(t.getChild(1), t); // return x from ^(ASSIGN label x)
+		}
+		GrammarAST result = dup(t);		// make copy of root
+		// copy all children of root.
+		GrammarAST kids = dupListNoActions((GrammarAST)t.getFirstChild(), t);
+		result.setFirstChild(kids);
+		return result;
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarAnalysisAbortedMessage.java b/src/org/antlr/tool/GrammarAnalysisAbortedMessage.java
new file mode 100644
index 0000000..d4e07bb
--- /dev/null
+++ b/src/org/antlr/tool/GrammarAnalysisAbortedMessage.java
@@ -0,0 +1,67 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.SemanticContext;
+import antlr.Token;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/** Reports the condition that ANTLR's LL(*) analysis engine terminated
+ *  early.
+ */
+public class GrammarAnalysisAbortedMessage extends Message {
+	public DecisionProbe probe;
+
+	public GrammarAnalysisAbortedMessage(DecisionProbe probe) {
+		super(ErrorManager.MSG_ANALYSIS_ABORTED);
+		this.probe = probe;
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+		StringTemplate st = getMessageTemplate();
+		st.setAttribute("enclosingRule",
+						probe.dfa.getNFADecisionStartState().getEnclosingRule());
+
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarDanglingStateMessage.java b/src/org/antlr/tool/GrammarDanglingStateMessage.java
new file mode 100644
index 0000000..d829dc5
--- /dev/null
+++ b/src/org/antlr/tool/GrammarDanglingStateMessage.java
@@ -0,0 +1,70 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.SemanticContext;
+import antlr.Token;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/** Reports a potential parsing issue with a decision; the decision is
+ *  nondeterministic in some way.
+ */
+public class GrammarDanglingStateMessage extends Message {
+	public DecisionProbe probe;
+    public DFAState problemState;
+
+	public GrammarDanglingStateMessage(DecisionProbe probe,
+									   DFAState problemState)
+	{
+		super(ErrorManager.MSG_DANGLING_STATE);
+		this.probe = probe;
+		this.problemState = problemState;
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+		StringTemplate st = getMessageTemplate();
+		st.setAttribute("danglingAlts", problemState.getAltSet());
+
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarInsufficientPredicatesMessage.java b/src/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
new file mode 100644
index 0000000..a918714
--- /dev/null
+++ b/src/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
@@ -0,0 +1,67 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.SemanticContext;
+import antlr.Token;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+public class GrammarInsufficientPredicatesMessage extends Message {
+	public DecisionProbe probe;
+    public List alts;
+
+	public GrammarInsufficientPredicatesMessage(DecisionProbe probe,
+												List alts)
+	{
+		super(ErrorManager.MSG_INSUFFICIENT_PREDICATES);
+		this.probe = probe;
+		this.alts = alts;
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+		StringTemplate st = getMessageTemplate();
+		st.setAttribute("alts", alts);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarNonDeterminismMessage.java b/src/org/antlr/tool/GrammarNonDeterminismMessage.java
new file mode 100644
index 0000000..bad9525
--- /dev/null
+++ b/src/org/antlr/tool/GrammarNonDeterminismMessage.java
@@ -0,0 +1,128 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.NFAState;
+import org.antlr.stringtemplate.StringTemplate;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/** Reports a potential parsing issue with a decision; the decision is
+ *  nondeterministic in some way.
+ */
+public class GrammarNonDeterminismMessage extends Message {
+	public DecisionProbe probe;
+    public DFAState problemState;
+
+	public GrammarNonDeterminismMessage(DecisionProbe probe,
+										DFAState problemState)
+	{
+		super(ErrorManager.MSG_GRAMMAR_NONDETERMINISM);
+		this.probe = probe;
+		this.problemState = problemState;
+		// flip msg ID if alts are actually token refs in Tokens rule
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			setMessageID(ErrorManager.MSG_TOKEN_NONDETERMINISM);
+		}
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		StringTemplate st = getMessageTemplate();
+		// Now fill template with information about problemState
+		List labels = probe.getSampleNonDeterministicInputSequence(problemState);
+		String input = probe.getInputSequenceDisplay(labels);
+		st.setAttribute("input", input);
+
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			Set disabledAlts = probe.getDisabledAlternatives(problemState);
+			for (Iterator it = disabledAlts.iterator(); it.hasNext();) {
+				Integer altI = (Integer) it.next();
+				String tokenName =
+					probe.getTokenNameForTokensRuleAlt(altI.intValue());
+				// reset the line/col to the token definition (pick last one)
+				NFAState ruleStart =
+					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
+				line = ruleStart.getAssociatedASTNode().getLine();
+				column = ruleStart.getAssociatedASTNode().getColumn();
+				st.setAttribute("disabled", tokenName);
+			}
+		}
+		else {
+			st.setAttribute("disabled", probe.getDisabledAlternatives(problemState));
+		}
+
+		List nondetAlts = probe.getNonDeterministicAltsForState(problemState);
+		NFAState nfaStart = probe.dfa.getNFADecisionStartState();
+		// all state paths have to begin with same NFA state
+		int firstAlt = 0;
+		if ( nondetAlts!=null ) {
+			for (Iterator iter = nondetAlts.iterator(); iter.hasNext();) {
+				Integer displayAltI = (Integer) iter.next();
+				if ( DecisionProbe.verbose ) {
+					int tracePathAlt =
+						nfaStart.translateDisplayAltToWalkAlt(probe.dfa,
+															  displayAltI.intValue());
+					if ( firstAlt == 0 ) {
+						firstAlt = tracePathAlt;
+					}
+					List path =
+						probe.getNFAPathStatesForAlt(firstAlt,
+													 tracePathAlt,
+													 labels);
+					st.setAttribute("paths.{alt,states}",
+									displayAltI, path);
+				}
+				else {
+					if ( probe.dfa.isTokensRuleDecision() ) {
+						// alts are token rules, convert to the names instead of numbers
+						String tokenName =
+							probe.getTokenNameForTokensRuleAlt(displayAltI.intValue());
+						st.setAttribute("conflictingTokens", tokenName);
+					}
+					else {
+						st.setAttribute("conflictingAlts", displayAltI);
+					}
+				}
+			}
+		}
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarReport.java b/src/org/antlr/tool/GrammarReport.java
new file mode 100644
index 0000000..41002db
--- /dev/null
+++ b/src/org/antlr/tool/GrammarReport.java
@@ -0,0 +1,383 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.DFA;
+import org.antlr.runtime.misc.Stats;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+public class GrammarReport {
+	/** Because I may change the stats, I need to track that for later
+	 *  computations to be consistent.
+	 */
+	public static final String Version = "4";
+	public static final String GRAMMAR_STATS_FILENAME = "grammar.stats";
+	public static final int NUM_GRAMMAR_STATS = 41;
+
+	public static final String newline = System.getProperty("line.separator");
+
+	public Grammar grammar;
+
+	public GrammarReport(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	/** Create a single-line stats report about this grammar suitable to
+	 *  send to the notify page at antlr.org
+	 */
+	public String toNotifyString() {
+		StringBuffer buf = new StringBuffer();
+		buf.append(Version);
+		buf.append('\t');
+		buf.append(grammar.name);
+		buf.append('\t');
+		buf.append(Grammar.grammarTypeToString[grammar.type]);
+		buf.append('\t');
+		buf.append(grammar.getOption("language"));
+		int totalNonSynPredProductions = 0;
+		int totalNonSynPredRules = 0;
+		Collection rules = grammar.getRules();
+		for (Iterator it = rules.iterator(); it.hasNext();) {
+			Rule r = (Rule) it.next();
+			if ( !r.name.toUpperCase()
+				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
+			{
+				totalNonSynPredProductions += r.numberOfAlts;
+				totalNonSynPredRules++;
+			}
+		}
+		buf.append('\t');
+		buf.append(totalNonSynPredRules);
+		buf.append('\t');
+		buf.append(totalNonSynPredProductions);
+		int numACyclicDecisions =
+			grammar.getNumberOfDecisions()-grammar.getNumberOfCyclicDecisions();
+		int[] depths = new int[numACyclicDecisions];
+		int[] acyclicDFAStates = new int[numACyclicDecisions];
+		int[] cyclicDFAStates = new int[grammar.getNumberOfCyclicDecisions()];
+		int acyclicIndex = 0;
+		int cyclicIndex = 0;
+		int numLL1 = 0;
+		int numDec = 0;
+		for (int i=1; i<=grammar.getNumberOfDecisions(); i++) {
+			Grammar.Decision d = grammar.getDecision(i);
+			if( d.dfa==null ) {
+				continue;
+			}
+			numDec++;
+			if ( !d.dfa.isCyclic() ) {
+				int maxk = d.dfa.getMaxLookaheadDepth();
+				if ( maxk==1 ) {
+					numLL1++;
+				}
+				depths[acyclicIndex] = maxk;
+				acyclicDFAStates[acyclicIndex] = d.dfa.getNumberOfStates();
+				acyclicIndex++;
+			}
+			else {
+				cyclicDFAStates[cyclicIndex] = d.dfa.getNumberOfStates();
+				cyclicIndex++;
+			}
+		}
+		buf.append('\t');
+		buf.append(numDec);
+		buf.append('\t');
+		buf.append(grammar.getNumberOfCyclicDecisions());
+		buf.append('\t');
+		buf.append(numLL1);
+		buf.append('\t');
+		buf.append(Stats.min(depths));
+		buf.append('\t');
+		buf.append(Stats.max(depths));
+		buf.append('\t');
+		buf.append(Stats.avg(depths));
+		buf.append('\t');
+		buf.append(Stats.stddev(depths));
+		buf.append('\t');
+		buf.append(Stats.min(acyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.max(acyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.avg(acyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.stddev(acyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.sum(acyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.min(cyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.max(cyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.avg(cyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.stddev(cyclicDFAStates));
+		buf.append('\t');
+		buf.append(Stats.sum(cyclicDFAStates));
+		buf.append('\t');
+		buf.append(grammar.getTokenTypes().size());
+		buf.append('\t');
+		buf.append(grammar.DFACreationWallClockTimeInMS);
+		buf.append('\t');
+		buf.append(grammar.numberOfSemanticPredicates);
+		buf.append('\t');
+		buf.append(grammar.numberOfManualLookaheadOptions);
+		buf.append('\t');
+		buf.append(grammar.setOfNondeterministicDecisionNumbers.size());
+		buf.append('\t');
+		buf.append(grammar.setOfNondeterministicDecisionNumbersResolvedWithPredicates.size());
+		buf.append('\t');
+		buf.append(grammar.setOfDFAWhoseConversionTerminatedEarly.size());
+		buf.append('\t');
+		buf.append(ErrorManager.getErrorState().errors);
+		buf.append('\t');
+		buf.append(ErrorManager.getErrorState().warnings);
+		buf.append('\t');
+		buf.append(ErrorManager.getErrorState().infos);
+		buf.append('\t');
+		Map synpreds = grammar.getSyntacticPredicates();
+		int num_synpreds = synpreds!=null ? synpreds.size() : 0;
+		buf.append(num_synpreds);
+		buf.append('\t');
+		buf.append(grammar.blocksWithSynPreds.size());
+		buf.append('\t');
+		buf.append(grammar.decisionsWhoseDFAsUsesSynPreds.size());
+		buf.append('\t');
+		buf.append(grammar.blocksWithSemPreds.size());
+		buf.append('\t');
+		buf.append(grammar.decisionsWhoseDFAsUsesSemPreds.size());
+		buf.append('\t');
+		String output = (String)grammar.getOption("output");
+		if ( output==null ) {
+			output = "none";
+		}
+		buf.append(output);
+		buf.append('\t');
+		Object k = grammar.getOption("k");
+		if ( k==null ) {
+			k = "none";
+		}
+		buf.append(k);
+		buf.append('\t');
+		String backtrack = (String)grammar.getOption("backtrack");
+		if ( backtrack==null ) {
+			backtrack = "false";
+		}
+		buf.append(backtrack);
+		return buf.toString();
+	}
+
+	public String getBacktrackingReport() {
+		StringBuffer buf = new StringBuffer();
+		buf.append("Backtracking report:");
+		buf.append(newline);
+		buf.append("Number of decisions that backtrack: ");
+		buf.append(grammar.decisionsWhoseDFAsUsesSynPreds.size());
+		buf.append(newline);
+		buf.append(getDFALocations(grammar.decisionsWhoseDFAsUsesSynPreds));
+		return buf.toString();
+	}
+
+	public String getEarlyTerminationReport() {
+		StringBuffer buf = new StringBuffer();
+		buf.append("NFA conversion early termination report:");
+		buf.append(newline);
+		buf.append("Number of NFA conversions that terminated early: ");
+		buf.append(grammar.setOfDFAWhoseConversionTerminatedEarly.size());
+		buf.append(newline);
+		buf.append(getDFALocations(grammar.setOfDFAWhoseConversionTerminatedEarly));
+		return buf.toString();
+	}
+
+	protected String getDFALocations(Set dfas) {
+		Set decisions = new HashSet();
+		StringBuffer buf = new StringBuffer();
+		Iterator it = dfas.iterator();
+		while ( it.hasNext() ) {
+			DFA dfa = (DFA) it.next();
+			// if we aborted a DFA and redid with k=1, the backtrackin
+			if ( decisions.contains(Utils.integer(dfa.decisionNumber)) ) {
+				continue;
+			}
+			decisions.add(Utils.integer(dfa.decisionNumber));
+			buf.append("Rule ");
+			buf.append(dfa.decisionNFAStartState.getEnclosingRule());
+			buf.append(" decision ");
+			buf.append(dfa.decisionNumber);
+			buf.append(" location ");
+			GrammarAST decisionAST =
+				dfa.decisionNFAStartState.getAssociatedASTNode();
+			buf.append(decisionAST.getLine());
+			buf.append(":");
+			buf.append(decisionAST.getColumn());
+			buf.append(newline);
+		}
+		return buf.toString();
+	}
+
+	/** Given a stats line suitable for sending to the antlr.org site,
+	 *  return a human-readable version.  Return null if there is a
+	 *  problem with the data.
+	 */
+	public String toString() {
+		return toString(toNotifyString());
+	}
+
+	protected static String[] decodeReportData(String data) {
+		String[] fields = new String[NUM_GRAMMAR_STATS];
+		StringTokenizer st = new StringTokenizer(data, "\t");
+		int i = 0;
+		while ( st.hasMoreTokens() ) {
+			fields[i] = st.nextToken();
+			i++;
+		}
+		if ( i!=NUM_GRAMMAR_STATS ) {
+			return null;
+		}
+		return fields;
+	}
+
+	public static String toString(String notifyDataLine) {
+		String[] fields = decodeReportData(notifyDataLine);
+		if ( fields==null ) {
+			return null;
+		}
+		StringBuffer buf = new StringBuffer();
+		buf.append("ANTLR Grammar Report; Stats Version ");
+		buf.append(fields[0]);
+		buf.append('\n');
+		buf.append("Grammar: ");
+		buf.append(fields[1]);
+		buf.append('\n');
+		buf.append("Type: ");
+		buf.append(fields[2]);
+		buf.append('\n');
+		buf.append("Target language: ");
+		buf.append(fields[3]);
+		buf.append('\n');
+		buf.append("Output: ");
+		buf.append(fields[38]);
+		buf.append('\n');
+		buf.append("Grammar option k: ");
+		buf.append(fields[39]);
+		buf.append('\n');
+		buf.append("Grammar option backtrack: ");
+		buf.append(fields[40]);
+		buf.append('\n');
+		buf.append("Rules: ");
+		buf.append(fields[4]);
+		buf.append('\n');
+		buf.append("Productions: ");
+		buf.append(fields[5]);
+		buf.append('\n');
+		buf.append("Decisions: ");
+		buf.append(fields[6]);
+		buf.append('\n');
+		buf.append("Cyclic DFA decisions: ");
+		buf.append(fields[7]);
+		buf.append('\n');
+		buf.append("LL(1) decisions: "); buf.append(fields[8]);
+		buf.append('\n');
+		buf.append("Min fixed k: "); buf.append(fields[9]);
+		buf.append('\n');
+		buf.append("Max fixed k: "); buf.append(fields[10]);
+		buf.append('\n');
+		buf.append("Average fixed k: "); buf.append(fields[11]);
+		buf.append('\n');
+		buf.append("Standard deviation of fixed k: "); buf.append(fields[12]);
+		buf.append('\n');
+		buf.append("Min acyclic DFA states: "); buf.append(fields[13]);
+		buf.append('\n');
+		buf.append("Max acyclic DFA states: "); buf.append(fields[14]);
+		buf.append('\n');
+		buf.append("Average acyclic DFA states: "); buf.append(fields[15]);
+		buf.append('\n');
+		buf.append("Standard deviation of acyclic DFA states: "); buf.append(fields[16]);
+		buf.append('\n');
+		buf.append("Total acyclic DFA states: "); buf.append(fields[17]);
+		buf.append('\n');
+		buf.append("Min cyclic DFA states: "); buf.append(fields[18]);
+		buf.append('\n');
+		buf.append("Max cyclic DFA states: "); buf.append(fields[19]);
+		buf.append('\n');
+		buf.append("Average cyclic DFA states: "); buf.append(fields[20]);
+		buf.append('\n');
+		buf.append("Standard deviation of cyclic DFA states: "); buf.append(fields[21]);
+		buf.append('\n');
+		buf.append("Total cyclic DFA states: "); buf.append(fields[22]);
+		buf.append('\n');
+		buf.append("Vocabulary size: ");
+		buf.append(fields[23]);
+		buf.append('\n');
+		buf.append("DFA creation time in ms: ");
+		buf.append(fields[24]);
+		buf.append('\n');
+		buf.append("Number of semantic predicates found: ");
+		buf.append(fields[25]);
+		buf.append('\n');
+		buf.append("Number of manual fixed lookahead k=value options: ");
+		buf.append(fields[26]);
+		buf.append('\n');
+		buf.append("Number of nondeterministic decisions: ");
+		buf.append(fields[27]);
+		buf.append('\n');
+		buf.append("Number of nondeterministic decisions resolved with predicates: ");
+		buf.append(fields[28]);
+		buf.append('\n');
+		buf.append("Number of DFA conversions terminated early: ");
+		buf.append(fields[29]);
+		buf.append('\n');
+		buf.append("Number of errors: ");
+		buf.append(fields[30]);
+		buf.append('\n');
+		buf.append("Number of warnings: ");
+		buf.append(fields[31]);
+		buf.append('\n');
+		buf.append("Number of infos: ");
+		buf.append(fields[32]);
+		buf.append('\n');
+		buf.append("Number of syntactic predicates found: ");
+		buf.append(fields[33]);
+		buf.append('\n');
+		buf.append("Decisions with syntactic predicates: ");
+		buf.append(fields[34]);
+		buf.append('\n');
+		buf.append("Decision DFAs using syntactic predicates: ");
+		buf.append(fields[35]);
+		buf.append('\n');
+		buf.append("Decisions with semantic predicates: ");
+		buf.append(fields[36]);
+		buf.append('\n');
+		buf.append("Decision DFAs using semantic predicates: ");
+		buf.append(fields[37]);
+		buf.append('\n');
+		return buf.toString();
+	}
+
+}
diff --git a/src/org/antlr/tool/GrammarSanity.java b/src/org/antlr/tool/GrammarSanity.java
new file mode 100644
index 0000000..743c3b3
--- /dev/null
+++ b/src/org/antlr/tool/GrammarSanity.java
@@ -0,0 +1,290 @@
+package org.antlr.tool;
+
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.Transition;
+import org.antlr.analysis.RuleClosureTransition;
+
+import java.util.List;
+import java.util.HashSet;
+import java.util.ArrayList;
+import java.util.Set;
+
+/** Factor out routines that check sanity of rules, alts, grammars, etc.. */
+public class GrammarSanity {
+	protected Grammar grammar;
+	public GrammarSanity(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	/** Check all rules for infinite left recursion before analysis. Return list
+	 *  of troublesome rule cycles.  This method has two side-effects: it notifies
+	 *  the error manager that we have problems and it sets the list of
+	 *  recursive rules that we should ignore during analysis.
+	 *
+	 *  Return type: List<Set<String(rule-name)>>.
+	 */
+	public List checkAllRulesForLeftRecursion() {
+		grammar.createNFAs(); // make sure we have NFAs
+		grammar.leftRecursiveRules = new HashSet();
+		List listOfRecursiveCycles = new ArrayList(); // List<Set<String(rule-name)>>
+		for (int i = 0; i < grammar.ruleIndexToRuleList.size(); i++) {
+			String ruleName = (String)grammar.ruleIndexToRuleList.elementAt(i);
+			if ( ruleName!=null ) {
+				NFAState s = grammar.getRuleStartState(ruleName);
+				grammar.visitedDuringRecursionCheck = new HashSet();
+				grammar.visitedDuringRecursionCheck.add(ruleName);
+				Set visitedStates = new HashSet();
+				traceStatesLookingForLeftRecursion(s, visitedStates, listOfRecursiveCycles);
+			}
+		}
+		if ( listOfRecursiveCycles.size()>0 ) {
+			ErrorManager.leftRecursionCycles(listOfRecursiveCycles);
+		}
+		return listOfRecursiveCycles;
+	}
+
+	/** From state s, look for any transition to a rule that is currently
+	 *  being traced.  When tracing r, visitedDuringRecursionCheck has r
+	 *  initially.  If you reach an accept state, return but notify the
+	 *  invoking rule that it is nullable, which implies that invoking
+	 *  rule must look at follow transition for that invoking state.
+	 *  The visitedStates tracks visited states within a single rule so
+	 *  we can avoid epsilon-loop-induced infinite recursion here.  Keep
+	 *  filling the cycles in listOfRecursiveCycles and also, as a
+	 *  side-effect, set leftRecursiveRules.
+	 */
+	protected boolean traceStatesLookingForLeftRecursion(NFAState s,
+														 Set visitedStates,
+														 List listOfRecursiveCycles)
+	{
+		if ( s.isAcceptState() ) {
+			// this rule must be nullable!
+			// At least one epsilon edge reached accept state
+			return true;
+		}
+		if ( visitedStates.contains(s) ) {
+			// within same rule, we've hit same state; quit looping
+			return false;
+		}
+		visitedStates.add(s);
+		boolean stateReachesAcceptState = false;
+		Transition t0 = s.transition(0);
+		if ( t0 instanceof RuleClosureTransition ) {
+			String targetRuleName = ((NFAState)t0.target).getEnclosingRule();
+			if ( grammar.visitedDuringRecursionCheck.contains(targetRuleName) ) {
+				// record left-recursive rule, but don't go back in
+				grammar.leftRecursiveRules.add(targetRuleName);
+				/*
+				System.out.println("already visited "+targetRuleName+", calling from "+
+								   s.getEnclosingRule());
+				*/
+				addRulesToCycle(targetRuleName,
+								s.getEnclosingRule(),
+								listOfRecursiveCycles);
+			}
+			else {
+				// must visit if not already visited; send new visitedStates set
+				grammar.visitedDuringRecursionCheck.add(targetRuleName);
+				boolean callReachedAcceptState =
+					traceStatesLookingForLeftRecursion((NFAState)t0.target,
+													   new HashSet(),
+													   listOfRecursiveCycles);
+				// we're back from visiting that rule
+				grammar.visitedDuringRecursionCheck.remove(targetRuleName);
+				// must keep going in this rule then
+				if ( callReachedAcceptState ) {
+					NFAState followingState =
+						((RuleClosureTransition)t0).getFollowState();
+					stateReachesAcceptState |=
+						traceStatesLookingForLeftRecursion(followingState,
+														   visitedStates,
+														   listOfRecursiveCycles);
+				}
+			}
+		}
+		else if ( t0.label.isEpsilon() ) {
+			stateReachesAcceptState |=
+				traceStatesLookingForLeftRecursion((NFAState)t0.target, visitedStates, listOfRecursiveCycles);
+		}
+		// else it has a labeled edge
+
+		// now do the other transition if it exists
+		Transition t1 = s.transition(1);
+		if ( t1!=null ) {
+			stateReachesAcceptState |=
+				traceStatesLookingForLeftRecursion((NFAState)t1.target,
+												   visitedStates,
+												   listOfRecursiveCycles);
+		}
+		return stateReachesAcceptState;
+	}
+
+	/** enclosingRuleName calls targetRuleName, find the cycle containing
+	 *  the target and add the caller.  Find the cycle containing the caller
+	 *  and add the target.  If no cycles contain either, then create a new
+	 *  cycle.  listOfRecursiveCycles is List<Set<String>> that holds a list
+	 *  of cycles (sets of rule names).
+	 */
+	protected void addRulesToCycle(String targetRuleName,
+								   String enclosingRuleName,
+								   List listOfRecursiveCycles)
+	{
+		boolean foundCycle = false;
+		for (int i = 0; i < listOfRecursiveCycles.size(); i++) {
+			Set rulesInCycle = (Set)listOfRecursiveCycles.get(i);
+			// ensure both rules are in same cycle
+			if ( rulesInCycle.contains(targetRuleName) ) {
+				rulesInCycle.add(enclosingRuleName);
+				foundCycle = true;
+			}
+			if ( rulesInCycle.contains(enclosingRuleName) ) {
+				rulesInCycle.add(targetRuleName);
+				foundCycle = true;
+			}
+		}
+		if ( !foundCycle ) {
+			Set cycle = new HashSet();
+			cycle.add(targetRuleName);
+			cycle.add(enclosingRuleName);
+			listOfRecursiveCycles.add(cycle);
+		}
+	}
+
+	public void checkRuleReference(GrammarAST refAST,
+								   GrammarAST argsAST,
+								   String currentRuleName)
+	{
+		Rule r = grammar.getRule(refAST.getText());
+		if ( refAST.getType()==ANTLRParser.RULE_REF ) {
+			if ( argsAST!=null ) {
+				// rule[args]; ref has args
+                if ( r!=null && r.argActionAST==null ) {
+					// but rule def has no args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_RULE_HAS_NO_ARGS,
+						grammar,
+						argsAST.getToken(),
+						r.name);
+				}
+			}
+			else {
+				// rule ref has no args
+				if ( r!=null && r.argActionAST!=null ) {
+					// but rule def has args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_MISSING_RULE_ARGS,
+						grammar,
+						refAST.getToken(),
+						r.name);
+				}
+			}
+		}
+		else if ( refAST.getType()==ANTLRParser.TOKEN_REF ) {
+			if ( grammar.type!=Grammar.LEXER ) {
+				if ( argsAST!=null ) {
+					// args on a token ref not in a lexer rule
+					ErrorManager.grammarError(
+						ErrorManager.MSG_ARGS_ON_TOKEN_REF,
+						grammar,
+						refAST.getToken(),
+						refAST.getText());
+				}
+				return; // ignore token refs in nonlexers
+			}
+			if ( argsAST!=null ) {
+				// tokenRef[args]; ref has args
+				if ( r!=null && r.argActionAST==null ) {
+					// but token rule def has no args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_RULE_HAS_NO_ARGS,
+						grammar,
+						argsAST.getToken(),
+						r.name);
+				}
+			}
+			else {
+				// token ref has no args
+				if ( r!=null && r.argActionAST!=null ) {
+					// but token rule def has args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_MISSING_RULE_ARGS,
+						grammar,
+						refAST.getToken(),
+						r.name);
+				}
+			}
+		}
+	}
+
+	/** Rules in tree grammar that use -> rewrites and are spitting out
+	 *  templates via output=template and then use rewrite=true must only
+	 *  use -> on alts that are simple nodes or trees or single rule refs
+	 *  that match either nodes or trees.  The altAST is the ALT node
+	 *  for an ALT.  Verify that its first child is simple.  Must be either
+	 *  ( ALT ^( A B ) <end-of-alt> ) or ( ALT A <end-of-alt> ) or
+	 *  other element.
+	 *
+	 *  Ignore predicates in front and labels.
+	 */
+	public void ensureAltIsSimpleNodeOrTree(GrammarAST altAST,
+											GrammarAST elementAST,
+											int outerAltNum)
+	{
+		if ( isValidSimpleElementNode(elementAST) ) {
+			GrammarAST next = (GrammarAST)elementAST.getNextSibling();
+			if ( !isNextNonActionElementEOA(next)) {
+				ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_FOR_MULTI_ELEMENT_ALT,
+											grammar,
+											next.token,
+											new Integer(outerAltNum));
+			}
+			return;
+		}
+		switch ( elementAST.getType() ) {
+			case ANTLRParser.ASSIGN :		// labels ok on non-rule refs
+			case ANTLRParser.PLUS_ASSIGN :
+				if ( isValidSimpleElementNode(elementAST.getChild(1)) ) {
+					return;
+				}
+				break;
+			case ANTLRParser.ACTION :		// skip past actions
+			case ANTLRParser.SEMPRED :
+			case ANTLRParser.SYN_SEMPRED :
+			case ANTLRParser.BACKTRACK_SEMPRED :
+			case ANTLRParser.GATED_SEMPRED :
+				ensureAltIsSimpleNodeOrTree(altAST,
+											(GrammarAST)elementAST.getNextSibling(),
+											outerAltNum);
+				return;
+		}
+		ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_FOR_MULTI_ELEMENT_ALT,
+									grammar,
+									elementAST.token,
+									new Integer(outerAltNum));
+	}
+
+	protected boolean isValidSimpleElementNode(GrammarAST t) {
+		switch ( t.getType() ) {
+			case ANTLRParser.TREE_BEGIN :
+			case ANTLRParser.TOKEN_REF :
+			case ANTLRParser.CHAR_LITERAL :
+			case ANTLRParser.STRING_LITERAL :
+			case ANTLRParser.WILDCARD :
+				return true;
+			default :
+				return false;
+		}
+	}
+
+	protected boolean isNextNonActionElementEOA(GrammarAST t) {
+		while ( t.getType()==ANTLRParser.ACTION ||
+				t.getType()==ANTLRParser.SEMPRED )
+		{
+			t = (GrammarAST)t.getNextSibling();
+		}
+		if ( t.getType()==ANTLRParser.EOA ) {
+			return true;
+		}
+		return false;
+	}
+}
diff --git a/src/org/antlr/tool/GrammarSemanticsMessage.java b/src/org/antlr/tool/GrammarSemanticsMessage.java
new file mode 100644
index 0000000..09afcea
--- /dev/null
+++ b/src/org/antlr/tool/GrammarSemanticsMessage.java
@@ -0,0 +1,88 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import antlr.Token;
+
+/** A problem with the symbols and/or meaning of a grammar such as rule
+ *  redefinition.
+ */
+public class GrammarSemanticsMessage extends Message {
+	public Grammar g;
+	/** Most of the time, we'll have a token such as an undefined rule ref
+	 *  and so this will be set.
+	 */
+	public Token offendingToken;
+
+	public GrammarSemanticsMessage(int msgID,
+						  Grammar g,
+						  Token offendingToken)
+	{
+		this(msgID,g,offendingToken,null,null);
+	}
+
+	public GrammarSemanticsMessage(int msgID,
+						  Grammar g,
+						  Token offendingToken,
+						  Object arg)
+	{
+		this(msgID,g,offendingToken,arg,null);
+	}
+
+	public GrammarSemanticsMessage(int msgID,
+						  Grammar g,
+						  Token offendingToken,
+						  Object arg,
+						  Object arg2)
+	{
+		super(msgID,arg,arg2);
+		this.g = g;
+		this.offendingToken = offendingToken;
+	}
+
+	public String toString() {
+		line = 0;
+		column = 0;
+		if ( offendingToken!=null ) {
+			line = offendingToken.getLine();
+			column = offendingToken.getColumn();
+		}
+		if ( g!=null ) {
+			file = g.getFileName();
+		}
+		StringTemplate st = getMessageTemplate();
+		if ( arg!=null ) {
+			st.setAttribute("arg", arg);
+		}
+		if ( arg2!=null ) {
+			st.setAttribute("arg2", arg2);
+		}
+		return super.toString(st);
+	}
+}
diff --git a/src/org/antlr/tool/GrammarSyntaxMessage.java b/src/org/antlr/tool/GrammarSyntaxMessage.java
new file mode 100644
index 0000000..0f94fb6
--- /dev/null
+++ b/src/org/antlr/tool/GrammarSyntaxMessage.java
@@ -0,0 +1,80 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import antlr.Token;
+
+/** A problem with the syntax of your antlr grammar such as
+ *  "The '{' came as a complete surprise to me at this point in your program"
+ */
+public class GrammarSyntaxMessage extends Message {
+	public Grammar g;
+	/** Most of the time, we'll have a token and so this will be set. */
+	public Token offendingToken;
+	public antlr.RecognitionException exception;
+
+	public GrammarSyntaxMessage(int msgID,
+								Grammar grammar,
+								Token offendingToken,
+								antlr.RecognitionException exception)
+	{
+		this(msgID,grammar,offendingToken,null,exception);
+	}
+
+	public GrammarSyntaxMessage(int msgID,
+								Grammar grammar,
+								Token offendingToken,
+								Object arg,
+								antlr.RecognitionException exception)
+	{
+		super(msgID, arg, null);
+		this.offendingToken = offendingToken;
+		this.exception = exception;
+		this.g = grammar;
+	}
+
+	public String toString() {
+		line = 0;
+		column = 0;
+		if ( offendingToken!=null ) {
+			line = offendingToken.getLine();
+			column = offendingToken.getColumn();
+		}
+		// TODO: actually set the right Grammar instance to get the filename
+		// TODO: have to update all v2 grammar files for this. or use errormanager and tool to get the current grammar
+		if (g != null) {
+			file = g.getFileName();
+		}
+		StringTemplate st = getMessageTemplate();
+		if ( arg!=null ) {
+			st.setAttribute("arg", arg);
+		}
+		return super.toString(st);
+	}
+}
diff --git a/src/org/antlr/tool/GrammarUnreachableAltsMessage.java b/src/org/antlr/tool/GrammarUnreachableAltsMessage.java
new file mode 100644
index 0000000..57196d0
--- /dev/null
+++ b/src/org/antlr/tool/GrammarUnreachableAltsMessage.java
@@ -0,0 +1,93 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.SemanticContext;
+import antlr.Token;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/** Reports a potential parsing issue with a decision; the decision is
+ *  nondeterministic in some way.
+ */
+public class GrammarUnreachableAltsMessage extends Message {
+	public DecisionProbe probe;
+    public List alts;
+
+	public GrammarUnreachableAltsMessage(DecisionProbe probe,
+										 List alts)
+	{
+		super(ErrorManager.MSG_UNREACHABLE_ALTS);
+		this.probe = probe;
+		this.alts = alts;
+		// flip msg ID if alts are actually token refs in Tokens rule
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			setMessageID(ErrorManager.MSG_UNREACHABLE_TOKENS);
+		}
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		StringTemplate st = getMessageTemplate();
+
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			// alts are token rules, convert to the names instead of numbers
+			for (int i = 0; i < alts.size(); i++) {
+				Integer altI = (Integer) alts.get(i);
+				String tokenName =
+					probe.getTokenNameForTokensRuleAlt(altI.intValue());
+				// reset the line/col to the token definition
+				NFAState ruleStart =
+					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
+				line = ruleStart.getAssociatedASTNode().getLine();
+				column = ruleStart.getAssociatedASTNode().getColumn();
+				st.setAttribute("tokens", tokenName);
+			}
+		}
+		else {
+			// regular alt numbers, show the alts
+			st.setAttribute("alts", alts);
+		}
+
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/Interp.java b/src/org/antlr/tool/Interp.java
new file mode 100644
index 0000000..fc87aa5
--- /dev/null
+++ b/src/org/antlr/tool/Interp.java
@@ -0,0 +1,87 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.runtime.ANTLRFileStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.tree.ParseTree;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.util.StringTokenizer;
+
+/** Interpret any ANTLR grammar:
+ *
+ *  java Interp file.g tokens-to-ignore start-rule input-file
+ *
+ *  java Interp C.g 'WS COMMENT' program t.c
+ *
+ *  where the WS and COMMENT are the names of tokens you want to have
+ *  the parser ignore.
+ */
+public class Interp {
+	// pass me a java file to parse
+	public static void main(String[] args) throws Exception {
+		if ( args.length!=4 ) {
+			System.err.println("java Interp file.g tokens-to-ignore start-rule input-file");
+			return;
+		}
+		String grammarFileName = args[0];
+		String ignoreTokens = args[1];
+		String startRule = args[2];
+		String inputFileName = args[3];
+
+		Grammar parser =
+			new Grammar(null,
+						grammarFileName,
+						new BufferedReader(new FileReader(grammarFileName)));
+
+		String lexerGrammarText = parser.getLexerGrammar();
+		Grammar lexer = new Grammar();
+		lexer.importTokenVocabulary(parser);
+		lexer.setGrammarContent(lexerGrammarText);
+		CharStream input =
+			new ANTLRFileStream(inputFileName);
+		Interpreter lexEngine = new Interpreter(lexer, input);
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+		StringTokenizer tk = new StringTokenizer(ignoreTokens, " ");
+		while ( tk.hasMoreTokens() ) {
+			String tokenName = tk.nextToken();
+			tokens.setTokenTypeChannel(lexer.getTokenType(tokenName), 99);
+		}
+
+		if ( parser.getRule(startRule)==null ) {
+			System.err.println("Rule "+startRule+" does not exist in "+grammarFileName);
+			return;
+		}
+		Interpreter parseEngine = new Interpreter(parser, tokens);
+		ParseTree t = parseEngine.parse(startRule);
+		System.out.println(t.toStringTree());
+	}
+}
diff --git a/src/org/antlr/tool/Interpreter.java b/src/org/antlr/tool/Interpreter.java
new file mode 100644
index 0000000..0c748b1
--- /dev/null
+++ b/src/org/antlr/tool/Interpreter.java
@@ -0,0 +1,425 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.*;
+import org.antlr.runtime.*;
+import org.antlr.runtime.debug.DebugEventListener;
+import org.antlr.runtime.debug.BlankDebugEventListener;
+import org.antlr.runtime.tree.ParseTree;
+import org.antlr.runtime.debug.ParseTreeBuilder;
+import org.antlr.misc.IntervalSet;
+
+import java.util.List;
+import java.util.Stack;
+
+/** The recognition interpreter/engine for grammars.  Separated
+ *  out of Grammar as it's related, but technically not a Grammar function.
+ *  You create an interpreter for a grammar and an input stream.  This object
+ *  can act as a TokenSource so that you can hook up two grammars (via
+ *  a CommonTokenStream) to lex/parse.  Being a token source only makes sense
+ *  for a lexer grammar of course.
+ */
+public class Interpreter implements TokenSource {
+	protected Grammar grammar;
+	protected IntStream input;
+
+	/** A lexer listener that just creates token objects as they
+	 *  are matched.  scan() use this listener to get a single object.
+	 *  To get a stream of tokens, you must call scan() multiple times,
+	 *  recording the token object result after each call.
+	 */
+	class LexerActionGetTokenType extends BlankDebugEventListener {
+		public CommonToken token;
+		Grammar g;
+		public LexerActionGetTokenType(Grammar g) {
+			this.g = g;
+		}
+		public void exitRule(String ruleName) {
+			if ( !ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ){
+				int type = g.getTokenType(ruleName);
+				int channel = Token.DEFAULT_CHANNEL;
+				token = new CommonToken((CharStream)input,type,channel,0,0);
+			}
+		}
+	}
+
+	public Interpreter(Grammar grammar, IntStream input) {
+		this.grammar = grammar;
+		this.input = input;
+	}
+
+	public Token nextToken() {
+		if ( grammar.type!=Grammar.LEXER ) {
+			return null;
+		}
+		if ( input.LA(1)==CharStream.EOF ) {
+			return Token.EOF_TOKEN;
+		}
+		int start = input.index();
+		int charPos = ((CharStream)input).getCharPositionInLine();
+		CommonToken token = null;
+		loop:
+		while (input.LA(1)!=CharStream.EOF) {
+			try {
+				token = scan(Grammar.ARTIFICIAL_TOKENS_RULENAME, null);
+				break;
+			}
+			catch (RecognitionException re) {
+				// report a problem and try for another
+				reportScanError(re);
+				continue loop;
+			}
+		}
+		// the scan can only set type
+		// we must set the line, and other junk here to make it a complete token
+		int stop = input.index()-1;
+		if ( token==null ) {
+			return Token.EOF_TOKEN;
+		}
+		token.setLine(((CharStream)input).getLine());
+		token.setStartIndex(start);
+		token.setStopIndex(stop);
+		token.setCharPositionInLine(charPos);
+		return token;
+	}
+
+	/** For a given input char stream, try to match against the NFA
+	 *  starting at startRule.  This is a deterministic parse even though
+	 *  it is using an NFA because it uses DFAs at each decision point to
+	 *  predict which alternative will succeed.  This is exactly what the
+	 *  generated parser will do.
+	 *
+	 *  This only does lexer grammars.
+	 *
+	 *  Return the token type associated with the final rule end state.
+	 */
+	public void scan(String startRule,
+					 DebugEventListener actions,
+					 List visitedStates)
+		throws RecognitionException
+	{
+		if ( grammar.type!=Grammar.LEXER ) {
+			return;
+		}
+		CharStream in = (CharStream)this.input;
+		//System.out.println("scan("+startRule+",'"+in.substring(in.index(),in.size()-1)+"')");
+		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
+		if ( grammar.getRuleStartState(startRule)==null ) {
+			grammar.createNFAs();
+		}
+
+		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
+			// Create the DFA predictors for each decision
+			grammar.createLookaheadDFAs();
+		}
+
+		// do the parse
+		Stack ruleInvocationStack = new Stack();
+		NFAState start = grammar.getRuleStartState(startRule);
+		NFAState stop = grammar.getRuleStopState(startRule);
+		parseEngine(startRule, start, stop, in, ruleInvocationStack,
+					actions, visitedStates);
+	}
+
+	public CommonToken scan(String startRule)
+		throws RecognitionException
+	{
+		return scan(startRule, null);
+	}
+
+	public CommonToken scan(String startRule,
+							List visitedStates)
+		throws RecognitionException
+	{
+		LexerActionGetTokenType actions = new LexerActionGetTokenType(grammar);
+		scan(startRule, actions, visitedStates);
+		return actions.token;
+	}
+
+	public void parse(String startRule,
+					  DebugEventListener actions,
+					  List visitedStates)
+		throws RecognitionException
+	{
+		//System.out.println("parse("+startRule+")");
+		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
+		if ( grammar.getRuleStartState(startRule)==null ) {
+			grammar.createNFAs();
+		}
+		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
+			// Create the DFA predictors for each decision
+			grammar.createLookaheadDFAs();
+		}
+		// do the parse
+		Stack ruleInvocationStack = new Stack();
+		NFAState start = grammar.getRuleStartState(startRule);
+		NFAState stop = grammar.getRuleStopState(startRule);
+		parseEngine(startRule, start, stop, input, ruleInvocationStack,
+					actions, visitedStates);
+	}
+
+	public ParseTree parse(String startRule)
+		throws RecognitionException
+	{
+		return parse(startRule, null);
+	}
+
+	public ParseTree parse(String startRule, List visitedStates)
+		throws RecognitionException
+	{
+		ParseTreeBuilder actions = new ParseTreeBuilder(grammar.name);
+		try {
+			parse(startRule, actions, visitedStates);
+		}
+		catch (RecognitionException re) {
+			// Errors are tracked via the ANTLRDebugInterface
+			// Exceptions are used just to blast out of the parse engine
+			// The error will be in the parse tree.
+		}
+		return actions.getTree();
+	}
+
+	/** Fill a list of all NFA states visited during the parse */
+	protected void parseEngine(String startRule,
+							   NFAState start,
+							   NFAState stop,
+							   IntStream input,
+							   Stack ruleInvocationStack,
+							   DebugEventListener actions,
+							   List visitedStates)
+		throws RecognitionException
+	{
+		if ( actions!=null ) {
+			actions.enterRule(start.getEnclosingRule());
+		}
+		NFAState s = start;
+		int t = input.LA(1);
+		while ( s!=stop ) {
+			if ( visitedStates!=null ) {
+				visitedStates.add(s);
+			}
+			/*
+			System.out.println("parse state "+s.stateNumber+" input="+
+				grammar.getTokenDisplayName(t));
+				*/
+			// CASE 1: decision state
+			if ( s.getDecisionNumber()>0 && grammar.getNumberOfAltsForDecisionNFA(s)>1 ) {
+				// decision point, must predict and jump to alt
+				DFA dfa = grammar.getLookaheadDFA(s.getDecisionNumber());
+				/*
+				if ( grammar.type!=Grammar.LEXER ) {
+					System.out.println("decision: "+
+								   dfa.getNFADecisionStartState().getDescription()+
+								   " input="+grammar.getTokenDisplayName(t));
+				}
+				*/
+				int m = input.mark();
+				int predictedAlt = predict(dfa);
+				if ( predictedAlt == NFA.INVALID_ALT_NUMBER ) {
+					String description = dfa.getNFADecisionStartState().getDescription();
+					NoViableAltException nvae =
+						new NoViableAltException(description,
+												 dfa.getDecisionNumber(),
+												 s.stateNumber,
+												 input);
+					if ( actions!=null ) {
+						actions.recognitionException(nvae);
+					}
+					input.consume(); // recover
+					throw nvae;
+				}
+				input.rewind(m);
+				int parseAlt =
+					s.translateDisplayAltToWalkAlt(dfa,predictedAlt);
+				/*
+				if ( grammar.type!=Grammar.LEXER ) {
+					System.out.println("predicted alt "+predictedAlt+", parseAlt "+
+									   parseAlt);
+				}
+				*/
+				NFAState alt = grammar.getNFAStateForAltOfDecision(s, parseAlt);
+				s = (NFAState)alt.transition(0).target;
+				continue;
+			}
+
+			// CASE 2: finished matching a rule
+			if ( s.isAcceptState() ) { // end of rule node
+				if ( actions!=null ) {
+					actions.exitRule(s.getEnclosingRule());
+				}
+				if ( ruleInvocationStack.empty() ) {
+					// done parsing.  Hit the start state.
+					//System.out.println("stack empty in stop state for "+s.getEnclosingRule());
+					break;
+				}
+				// pop invoking state off the stack to know where to return to
+				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
+				RuleClosureTransition invokingTransition =
+						(RuleClosureTransition)invokingState.transition(0);
+				// move to node after state that invoked this rule
+				s = invokingTransition.getFollowState();
+				continue;
+			}
+
+			Transition trans = s.transition(0);
+			Label label = trans.label;
+			// CASE 3: epsilon transition
+			if ( label.isEpsilon() ) {
+				// CASE 3a: rule invocation state
+				if ( trans instanceof RuleClosureTransition ) {
+					ruleInvocationStack.push(s);
+					s = (NFAState)trans.target;
+					if ( actions!=null ) {
+						actions.enterRule(s.getEnclosingRule());
+					}
+				}
+				// CASE 3b: plain old epsilon transition, just move
+				else {
+					s = (NFAState)trans.target;
+				}
+			}
+
+			// CASE 4: match label on transition
+			else if ( label.matches(t) ) {
+				if ( actions!=null ) {
+					if ( grammar.type == Grammar.PARSER ||
+						 grammar.type == Grammar.COMBINED )
+					{
+						actions.consumeToken(((TokenStream)input).LT(1));
+					}
+				}
+				s = (NFAState)s.transition(0).target;
+				input.consume();
+				t = input.LA(1);
+			}
+
+			// CASE 5: error condition; label is inconsistent with input
+			else {
+				if ( label.isAtom() ) {
+					MismatchedTokenException mte =
+						new MismatchedTokenException(label.getAtom(), input);
+					if ( actions!=null ) {
+						actions.recognitionException(mte);
+					}
+					input.consume(); // recover
+					throw mte;
+				}
+				else if ( label.isSet() ) {
+					MismatchedSetException mse =
+						new MismatchedSetException(((IntervalSet)label.getSet()).toRuntimeBitSet(),
+												   input);
+					if ( actions!=null ) {
+						actions.recognitionException(mse);
+					}
+					input.consume(); // recover
+					throw mse;
+				}
+				else if ( label.isSemanticPredicate() ) {
+					FailedPredicateException fpe =
+						new FailedPredicateException(input,
+													 s.getEnclosingRule(),
+													 label.getSemanticContext().toString());
+					if ( actions!=null ) {
+						actions.recognitionException(fpe);
+					}
+					input.consume(); // recover
+					throw fpe;
+				}
+				else {
+					throw new RecognitionException(input); // unknown error
+				}
+			}
+		}
+		//System.out.println("hit stop state for "+stop.getEnclosingRule());
+		if ( actions!=null ) {
+			actions.exitRule(stop.getEnclosingRule());
+		}
+	}
+
+	/** Given an input stream, return the unique alternative predicted by
+	 *  matching the input.  Upon error, return NFA.INVALID_ALT_NUMBER
+	 *  The first symbol of lookahead is presumed to be primed; that is,
+	 *  input.lookahead(1) must point at the input symbol you want to start
+	 *  predicting with.
+	 */
+	public int predict(DFA dfa) {
+		DFAState s = dfa.startState;
+		int c = input.LA(1);
+		Transition eotTransition = null;
+	dfaLoop:
+		while ( !s.isAcceptState() ) {
+			/*
+			System.out.println("DFA.predict("+s.getStateNumber()+", "+
+					dfa.getNFA().getGrammar().getTokenName(c)+")");
+			*/
+			// for each edge of s, look for intersection with current char
+			for (int i=0; i<s.getNumberOfTransitions(); i++) {
+				Transition t = s.transition(i);
+				// special case: EOT matches any char
+				if ( t.label.matches(c) ) {
+					// take transition i
+					s = (DFAState)t.target;
+					input.consume();
+					c = input.LA(1);
+					continue dfaLoop;
+				}
+				if ( t.label.getAtom()==Label.EOT ) {
+					eotTransition = t;
+				}
+			}
+			if ( eotTransition!=null ) {
+				s = (DFAState)eotTransition.target;
+				continue dfaLoop;
+			}
+			/*
+			ErrorManager.error(ErrorManager.MSG_NO_VIABLE_DFA_ALT,
+							   s,
+							   dfa.nfa.grammar.getTokenName(c));
+			*/
+			return NFA.INVALID_ALT_NUMBER;
+		}
+		// woohoo!  We know which alt to predict
+		// nothing emanates from a stop state; must terminate anyway
+		/*
+		System.out.println("DFA stop state "+s.getStateNumber()+" predicts "+
+				s.getUniquelyPredictedAlt());
+		*/
+		return s.getUniquelyPredictedAlt();
+	}
+
+	public void reportScanError(RecognitionException re) {
+		CharStream cs = (CharStream)input;
+		// print as good of a message is we can't, given that we do not have
+		// a Lexer object and, hence, cannot call the routine to get a
+		// decent error message.
+		System.err.println("problem matching token at "+
+			cs.getLine()+":"+cs.getCharPositionInLine()+" "+re.getClass().getName());
+	}
+}
diff --git a/src/org/antlr/tool/LeftRecursionCyclesMessage.java b/src/org/antlr/tool/LeftRecursionCyclesMessage.java
new file mode 100644
index 0000000..5b7d768
--- /dev/null
+++ b/src/org/antlr/tool/LeftRecursionCyclesMessage.java
@@ -0,0 +1,53 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.analysis.*;
+import antlr.Token;
+
+import java.util.*;
+
+/** Similar to LeftRecursionMessage except this is used for announcing
+ *  cycles found by walking rules without decisions; the other msg is
+ *  invoked when a decision DFA construction finds a problem in closure.
+ */
+public class LeftRecursionCyclesMessage extends Message {
+	public Collection cycles;
+
+	public LeftRecursionCyclesMessage(Collection cycles) {
+		super(ErrorManager.MSG_LEFT_RECURSION_CYCLES);
+		this.cycles = cycles;
+	}
+
+	public String toString() {
+		StringTemplate st = getMessageTemplate();
+		st.setAttribute("listOfCycles", cycles);
+		return super.toString(st);
+	}
+}
diff --git a/src/org/antlr/tool/Message.java b/src/org/antlr/tool/Message.java
new file mode 100644
index 0000000..e9c3499
--- /dev/null
+++ b/src/org/antlr/tool/Message.java
@@ -0,0 +1,128 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+
+/** The ANTLR code calls methods on ErrorManager to report errors etc...
+ *  Rather than simply pass these arguments to the ANTLRErrorListener directly,
+ *  create an object that encapsulates everything.  In this way, the error
+ *  listener interface does not have to change when I add a new kind of
+ *  error message.  I don't want to break a GUI for example every time
+ *  I update the error system in ANTLR itself.
+ *
+ *  To get a printable error/warning message, call toString().
+ */
+public abstract class Message {
+	// msgST is the actual text of the message
+	public StringTemplate msgST;
+	// these are for supporting different output formats
+	public StringTemplate locationST;
+	public StringTemplate reportST;
+	public StringTemplate messageFormatST;
+
+	public int msgID;
+	public Object arg;
+	public Object arg2;
+	public Throwable e;
+	// used for location template
+	public String file;
+	public int line = -1;
+	public int column = -1;
+
+	public Message() {
+	}
+
+	public Message(int msgID) {
+		this(msgID, null, null);
+	}
+
+	public Message(int msgID, Object arg, Object arg2) {
+		setMessageID(msgID);
+		this.arg = arg;
+		this.arg2 = arg2;
+	}
+
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	public void setColumn(int column) {
+		this.column = column;
+	}
+
+	public void setMessageID(int msgID) {
+		this.msgID = msgID;
+		msgST = ErrorManager.getMessage(msgID);
+	}
+
+	/** Return a new template instance every time someone tries to print
+	 *  a Message.
+	 */
+	public StringTemplate getMessageTemplate() {
+		return msgST.getInstanceOf();
+	}
+
+	/** Return a new template instance for the location part of a Message.
+	 *  TODO: Is this really necessary? -Kay
+	 */
+	public StringTemplate getLocationTemplate() {
+		return locationST.getInstanceOf();
+	}
+
+	public String toString(StringTemplate messageST) {
+		// setup the location
+		locationST = ErrorManager.getLocationFormat();
+		reportST = ErrorManager.getReportFormat();
+		messageFormatST = ErrorManager.getMessageFormat();
+		boolean locationValid = false;
+		if (line != -1) {
+			locationST.setAttribute("line", line);
+			locationValid = true;
+		}
+		if (column != -1) {
+			locationST.setAttribute("column", column);
+			locationValid = true;
+		}
+		if (file != null) {
+			locationST.setAttribute("file", file);
+			locationValid = true;
+		}
+
+		messageFormatST.setAttribute("id", msgID);
+		messageFormatST.setAttribute("text", messageST);
+
+		if (locationValid) {
+			reportST.setAttribute("location", locationST);
+		}
+		reportST.setAttribute("message", messageFormatST);
+		reportST.setAttribute("type", ErrorManager.getMessageType(msgID));
+
+		return reportST.toString();
+	}
+}
diff --git a/src/org/antlr/tool/NFAFactory.java b/src/org/antlr/tool/NFAFactory.java
new file mode 100644
index 0000000..219e612
--- /dev/null
+++ b/src/org/antlr/tool/NFAFactory.java
@@ -0,0 +1,692 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+
+import java.util.Iterator;
+import java.util.List;
+
+/** Routines to construct StateClusters from EBNF grammar constructs.
+ *  No optimization is done to remove unnecessary epsilon edges.
+ *
+ *  TODO: add an optimization that reduces number of states and transitions
+ *  will help with speed of conversion and make it easier to view NFA.  For
+ *  example, o-A->o-->o-B->o should be o-A->o-B->o
+ */
+public class NFAFactory {
+	/** This factory is attached to a specifc NFA that it is building.
+     *  The NFA will be filled up with states and transitions.
+     */
+	NFA nfa = null;
+
+	String currentRuleName = null;
+
+    /** Used to assign state numbers */
+    protected int stateCounter = 0;
+
+	public NFAFactory(NFA nfa) {
+        nfa.setFactory(this);
+		this.nfa = nfa;
+	}
+
+    public NFAState newState() {
+        NFAState n = new NFAState(nfa);
+        int state = stateCounter;
+        n.stateNumber = state;
+        stateCounter++;
+        nfa.addState(n);
+		n.setEnclosingRuleName(currentRuleName);
+        return n;
+    }
+
+    public int getNumberOfStates() {
+        return stateCounter;
+    }
+
+	/** Optimize an alternative (list of grammar elements).
+	 *
+	 *  Walk the chain of elements (which can be complicated loop blocks...)
+	 *  and throw away any epsilon transitions used to link up simple elements.
+	 *
+	 *  This only removes 195 states from the java.g's NFA, but every little
+	 *  bit helps.  Perhaps I can improve in the future.
+	 */
+	public void optimizeAlternative(StateCluster alt) {
+		NFAState s = alt.left;
+		while ( s!=alt.right ) {
+			// if it's a block element, jump over it and continue
+			if ( s.endOfBlockStateNumber!=State.INVALID_STATE_NUMBER ) {
+				s = nfa.getState(s.endOfBlockStateNumber);
+				continue;
+			}
+			Transition t = s.transition(0);
+			if ( t instanceof RuleClosureTransition ) {
+				s = ((RuleClosureTransition)t).getFollowState();
+				continue;
+			}
+			if ( t.label.isEpsilon() && s.getNumberOfTransitions()==1 ) {
+				// bypass epsilon transition and point to what the epsilon's
+				// target points to unless that epsilon transition points to
+				// a block or loop etc..  Also don't collapse epsilons that
+				// point at the last node of the alt
+				NFAState epsilonTarget = (NFAState)t.target;
+				if ( epsilonTarget.endOfBlockStateNumber==State.INVALID_STATE_NUMBER &&
+					 epsilonTarget.transition(0)!=null )
+				{
+					s.setTransition0(epsilonTarget.transition(0));
+					/*
+					System.out.println("### opt "+s.stateNumber+"->"+
+									   epsilonTarget.transition(0).target.stateNumber);
+					*/
+				}
+			}
+			s = (NFAState)t.target;
+		}
+	}
+
+	/** From label A build Graph o-A->o */
+	public StateCluster build_Atom(int label) {
+		NFAState left = newState();
+		NFAState right = newState();
+		transitionBetweenStates(left, right, label);
+		StateCluster g = new StateCluster(left, right);
+		return g;
+	}
+
+    /** From set build single edge graph o->o-set->o.  To conform to
+     *  what an alt block looks like, must have extra state on left.
+     */
+	public StateCluster build_Set(IntSet set) {
+        //NFAState start = newState();
+        NFAState left = newState();
+        //transitionBetweenStates(start, left, Label.EPSILON);
+        NFAState right = newState();
+        Transition e = new Transition(new Label(set),right);
+        left.addTransition(e);
+		StateCluster g = new StateCluster(left, right);
+        return g;
+	}
+
+    /** Can only complement block of simple alts; can complement build_Set()
+     *  result, that is.  Get set and complement, replace old with complement.
+    public StateCluster build_AlternativeBlockComplement(StateCluster blk) {
+        State s0 = blk.left;
+        IntSet set = getCollapsedBlockAsSet(s0);
+        if ( set!=null ) {
+            // if set is available, then structure known and blk is a set
+            set = nfa.grammar.complement(set);
+            Label label = s0.transition(0).target.transition(0).label;
+            label.setSet(set);
+        }
+        return blk;
+    }
+	 */
+
+    public StateCluster build_Range(int a, int b) {
+        NFAState left = newState();
+        NFAState right = newState();
+        Transition e = new Transition(new Label(IntervalSet.of(a,b)),right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+	/** From char 'c' build StateCluster o-intValue(c)->o
+	 */
+	public StateCluster build_CharLiteralAtom(String charLiteral) {
+        int c = Grammar.getCharValueFromGrammarCharLiteral(charLiteral);
+		return build_Atom(c);
+	}
+
+	/** From char 'c' build StateCluster o-intValue(c)->o
+	 *  can include unicode spec likes '\u0024' later.  Accepts
+	 *  actual unicode 16-bit now, of course, by default.
+     *  TODO not supplemental char clean!
+	 */
+	public StateCluster build_CharRange(String a, String b) {
+		int from = Grammar.getCharValueFromGrammarCharLiteral(a);
+		int to = Grammar.getCharValueFromGrammarCharLiteral(b);
+		return build_Range(from, to);
+	}
+
+    /** For a non-lexer, just build a simple token reference atom.
+     *  For a lexer, a string is a sequence of char to match.  That is,
+     *  "fog" is treated as 'f' 'o' 'g' not as a single transition in
+     *  the DFA.  Machine== o-'f'->o-'o'->o-'g'->o and has n+1 states
+     *  for n characters.
+     */
+    public StateCluster build_StringLiteralAtom(String stringLiteral) {
+        if ( nfa.grammar.type==Grammar.LEXER ) {
+			StringBuffer chars =
+				Grammar.getUnescapedStringFromGrammarStringLiteral(stringLiteral);
+            NFAState first = newState();
+            NFAState last = null;
+            NFAState prev = first;
+            for (int i=0; i<chars.length(); i++) {
+                int c = chars.charAt(i);
+                NFAState next = newState();
+                transitionBetweenStates(prev, next, c);
+                prev = last = next;
+            }
+            return  new StateCluster(first, last);
+        }
+
+        // a simple token reference in non-Lexers
+        int tokenType = nfa.grammar.getTokenType(stringLiteral);
+        return build_Atom(tokenType);
+    }
+
+    /** For reference to rule r, build
+     *
+     *  o-e->(r)  o
+     *
+     *  where (r) is the start of rule r and the trailing o is not linked
+     *  to from rule ref state directly (it's done thru the transition(0)
+     *  RuleClosureTransition.
+     *
+     *  If the rule r is just a list of tokens, it's block will be just
+     *  a set on an edge o->o->o-set->o->o->o, could inline it rather than doing
+     *  the rule reference, but i'm not doing this yet as I'm not sure
+     *  it would help much in the NFA->DFA construction.
+     *
+     *  TODO add to codegen: collapse alt blks that are sets into single matchSet
+     */
+    public StateCluster build_RuleRef(int ruleIndex, NFAState ruleStart) {
+        /*
+        System.out.println("building ref to rule "+ruleIndex+": "+
+                nfa.getGrammar().getRuleName(ruleIndex));
+        */
+        NFAState left = newState();
+        // left.setDescription("ref to "+ruleStart.getDescription());
+        NFAState right = newState();
+        // right.setDescription("NFAState following ref to "+ruleStart.getDescription());
+        Transition e = new RuleClosureTransition(ruleIndex,ruleStart,right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+    /** From an empty alternative build StateCluster o-e->o */
+    public StateCluster build_Epsilon() {
+        NFAState left = newState();
+        NFAState right = newState();
+        transitionBetweenStates(left, right, Label.EPSILON);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+    /** Build what amounts to an epsilon transition with a semantic
+     *  predicate action.  The pred is a pointer into the AST of
+     *  the SEMPRED token.
+     */
+    public StateCluster build_SemanticPredicate(GrammarAST pred) {
+		// don't count syn preds
+		if ( !pred.getText().toUpperCase()
+			    .startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
+		{
+			nfa.grammar.numberOfSemanticPredicates++;
+		}
+		NFAState left = newState();
+        NFAState right = newState();
+        Transition e = new Transition(new Label(pred), right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+	/** add an EOF transition to any rule end NFAState that points to nothing
+     *  (i.e., for all those rules not invoked by another rule).  These
+     *  are start symbols then.
+	 *
+	 *  Return the number of grammar entry points; i.e., how many rules are
+	 *  not invoked by another rule (they can only be invoked from outside).
+	 *  These are the start rules.
+     */
+    public int build_EOFStates(List rules) {
+		int numberUnInvokedRules = 0;
+        for (Iterator iterator = rules.iterator(); iterator.hasNext();) {
+			Rule r = (Rule) iterator.next();
+			String ruleName = r.name;
+			NFAState endNFAState = nfa.grammar.getRuleStopState(ruleName);
+            // Is this rule a start symbol?  (no follow links)
+            if ( endNFAState.transition(0)==null ) {
+                // if so, then don't let algorithm fall off the end of
+                // the rule, make it hit EOF/EOT.
+				/*
+				if ( nfa.grammar.type==Grammar.LEXER ) {
+					return; // 11/28/2005: try having only Tokens with EOT transition
+				}
+                if ( nfa.grammar.type!=Grammar.LEXER ||
+					 ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
+				{
+					build_EOFState(endNFAState);
+				}
+				*/
+				build_EOFState(endNFAState);
+				// track how many rules have been invoked by another rule
+				numberUnInvokedRules++;
+            }
+        }
+		return numberUnInvokedRules;
+    }
+
+    /** set up an NFA NFAState that will yield eof tokens or,
+     *  in the case of a lexer grammar, an EOT token when the conversion
+     *  hits the end of a rule.
+     */
+    private void build_EOFState(NFAState endNFAState) {
+		NFAState end = newState();
+        int label = Label.EOF;
+        if ( nfa.grammar.type==Grammar.LEXER ) {
+            label = Label.EOT;
+			end.setEOTTargetState(true);
+        }
+        /*
+		System.out.println("build "+nfa.grammar.getTokenDisplayName(label)+
+						   " loop on end of state "+endNFAState.getDescription()+
+						   " to state "+end.stateNumber);
+        */
+		Transition toEnd = new Transition(label, end);
+        endNFAState.addTransition(toEnd);
+    }
+
+    /** From A B build A-e->B (that is, build an epsilon arc from right
+     *  of A to left of B).
+     *
+     *  As a convenience, return B if A is null or return A if B is null.
+     */
+    public StateCluster build_AB(StateCluster A, StateCluster B) {
+        if ( A==null ) {
+            return B;
+        }
+        if ( B==null ) {
+            return A;
+        }
+		transitionBetweenStates(A.right, B.left, Label.EPSILON);
+		StateCluster g = new StateCluster(A.left, B.right);
+        return g;
+    }
+
+	/** From a set ('a'|'b') build
+     *
+     *  o->o-'a'..'b'->o->o (last NFAState is blockEndNFAState pointed to by all alts)
+	 */
+	public StateCluster build_AlternativeBlockFromSet(StateCluster set) {
+		if ( set==null ) {
+			return null;
+		}
+
+		// single alt, no decision, just return only alt state cluster
+		NFAState startOfAlt = newState(); // must have this no matter what
+		transitionBetweenStates(startOfAlt, set.left, Label.EPSILON);
+
+		return new StateCluster(startOfAlt,set.right);
+	}
+
+	/** From A|B|..|Z alternative block build
+     *
+     *  o->o-A->o->o (last NFAState is blockEndNFAState pointed to by all alts)
+     *  |          ^
+     *  o->o-B->o--|
+     *  |          |
+     *  ...        |
+     *  |          |
+     *  o->o-Z->o--|
+     *
+     *  So every alternative gets begin NFAState connected by epsilon
+     *  and every alt right side points at a block end NFAState.  There is a
+     *  new NFAState in the NFAState in the StateCluster for each alt plus one for the
+     *  end NFAState.
+     *
+     *  Special case: only one alternative: don't make a block with alt
+     *  begin/end.
+     *
+     *  Special case: if just a list of tokens/chars/sets, then collapse
+     *  to a single edge'd o-set->o graph.
+     *
+     *  Set alt number (1..n) in the left-Transition NFAState.
+     */
+    public StateCluster build_AlternativeBlock(List alternativeStateClusters)
+    {
+        StateCluster result = null;
+        if ( alternativeStateClusters==null || alternativeStateClusters.size()==0 ) {
+            return null;
+        }
+
+		// single alt case
+		if ( alternativeStateClusters.size()==1 ) {
+			// single alt, no decision, just return only alt state cluster
+			StateCluster g = (StateCluster)alternativeStateClusters.get(0);
+			NFAState startOfAlt = newState(); // must have this no matter what
+			transitionBetweenStates(startOfAlt, g.left, Label.EPSILON);
+
+			//System.out.println("### opt saved start/stop end in (...)");
+			return new StateCluster(startOfAlt,g.right);
+		}
+
+		// even if we can collapse for lookahead purposes, we will still
+        // need to predict the alts of this subrule in case there are actions
+        // etc...  This is the decision that is pointed to from the AST node
+        // (always)
+        NFAState prevAlternative = null; // tracks prev so we can link to next alt
+        NFAState firstAlt = null;
+        NFAState blockEndNFAState = newState();
+        blockEndNFAState.setDescription("end block");
+        int altNum = 1;
+        for (Iterator iter = alternativeStateClusters.iterator(); iter.hasNext();) {
+            StateCluster g = (StateCluster) iter.next();
+            // add begin NFAState for this alt connected by epsilon
+            NFAState left = newState();
+            left.setDescription("alt "+altNum+" of ()");
+			transitionBetweenStates(left, g.left, Label.EPSILON);
+			transitionBetweenStates(g.right, blockEndNFAState, Label.EPSILON);
+			// Are we the first alternative?
+			if ( firstAlt==null ) {
+				firstAlt = left; // track extreme left node of StateCluster
+			}
+			else {
+				// if not first alternative, must link to this alt from previous
+				transitionBetweenStates(prevAlternative, left, Label.EPSILON);
+			}
+			prevAlternative = left;
+			altNum++;
+		}
+
+		// return StateCluster pointing representing entire block
+		// Points to first alt NFAState on left, block end on right
+		result = new StateCluster(firstAlt, blockEndNFAState);
+
+		firstAlt.decisionStateType = NFAState.BLOCK_START;
+
+		// set EOB markers for Jean
+		firstAlt.endOfBlockStateNumber = blockEndNFAState.stateNumber;
+
+		return result;
+    }
+
+    /** From (A)? build either:
+     *
+	 *  o--A->o
+	 *  |     ^
+	 *  o---->|
+     *
+     *  or, if A is a block, just add an empty alt to the end of the block
+     */
+    public StateCluster build_Aoptional(StateCluster A) {
+        StateCluster g = null;
+        int n = nfa.grammar.getNumberOfAltsForDecisionNFA(A.left);
+        if ( n==1 ) {
+            // no decision, just wrap in an optional path
+			//NFAState decisionState = newState();
+			NFAState decisionState = A.left; // resuse left edge
+			decisionState.setDescription("only alt of ()? block");
+			NFAState emptyAlt = newState();
+            emptyAlt.setDescription("epsilon path of ()? block");
+            NFAState blockEndNFAState = null;
+			blockEndNFAState = newState();
+			transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON);
+			blockEndNFAState.setDescription("end ()? block");
+            //transitionBetweenStates(decisionState, A.left, Label.EPSILON);
+            transitionBetweenStates(decisionState, emptyAlt, Label.EPSILON);
+            transitionBetweenStates(emptyAlt, blockEndNFAState, Label.EPSILON);
+
+			// set EOB markers for Jean
+			decisionState.endOfBlockStateNumber = blockEndNFAState.stateNumber;
+			blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+            g = new StateCluster(decisionState, blockEndNFAState);
+        }
+        else {
+            // a decision block, add an empty alt
+            NFAState lastRealAlt =
+                    nfa.grammar.getNFAStateForAltOfDecision(A.left, n);
+            NFAState emptyAlt = newState();
+            emptyAlt.setDescription("epsilon path of ()? block");
+            transitionBetweenStates(lastRealAlt, emptyAlt, Label.EPSILON);
+            transitionBetweenStates(emptyAlt, A.right, Label.EPSILON);
+
+			// set EOB markers for Jean (I think this is redundant here)
+			A.left.endOfBlockStateNumber = A.right.stateNumber;
+			A.right.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+            g = A; // return same block, but now with optional last path
+        }
+		g.left.decisionStateType = NFAState.OPTIONAL_BLOCK_START;
+
+        return g;
+    }
+
+    /** From (A)+ build
+	 *
+     *     |---|    (Transition 2 from A.right points at alt 1)
+	 *     v   |    (follow of loop is Transition 1)
+     *  o->o-A-o->o
+     *
+     *  Meaning that the last NFAState in A points back to A's left Transition NFAState
+     *  and we add a new begin/end NFAState.  A can be single alternative or
+     *  multiple.
+	 *
+	 *  During analysis we'll call the follow link (transition 1) alt n+1 for
+	 *  an n-alt A block.
+     */
+    public StateCluster build_Aplus(StateCluster A) {
+        NFAState left = newState();
+        NFAState blockEndNFAState = newState();
+		blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+		// don't reuse A.right as loopback if it's right edge of another block
+		if ( A.right.decisionStateType == NFAState.RIGHT_EDGE_OF_BLOCK ) {
+			// nested A* so make another tail node to be the loop back
+			// instead of the usual A.right which is the EOB for inner loop
+			NFAState extraRightEdge = newState();
+			transitionBetweenStates(A.right, extraRightEdge, Label.EPSILON);
+			A.right = extraRightEdge;
+		}
+
+        transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON); // follow is Transition 1
+		// turn A's block end into a loopback (acts like alt 2)
+		transitionBetweenStates(A.right, A.left, Label.EPSILON); // loop back Transition 2
+		transitionBetweenStates(left, A.left, Label.EPSILON);
+		
+		A.right.decisionStateType = NFAState.LOOPBACK;
+		A.left.decisionStateType = NFAState.BLOCK_START;
+
+		// set EOB markers for Jean
+		A.left.endOfBlockStateNumber = A.right.stateNumber;
+
+        StateCluster g = new StateCluster(left, blockEndNFAState);
+        return g;
+    }
+
+    /** From (A)* build
+     *
+	 *     |---|
+	 *     v   |
+	 *  o->o-A-o--o (Transition 2 from block end points at alt 1; follow is Transition 1)
+     *  |         ^
+     *  o---------| (optional branch is 2nd alt of optional block containing A+)
+     *
+     *  Meaning that the last (end) NFAState in A points back to A's
+     *  left side NFAState and we add 3 new NFAStates (the
+     *  optional branch is built just like an optional subrule).
+     *  See the Aplus() method for more on the loop back Transition.
+	 *  The new node on right edge is set to RIGHT_EDGE_OF_CLOSURE so we
+	 *  can detect nested (A*)* loops and insert an extra node.  Previously,
+	 *  two blocks shared same EOB node.
+     *
+     *  There are 2 or 3 decision points in a A*.  If A is not a block (i.e.,
+     *  it only has one alt), then there are two decisions: the optional bypass
+     *  and then loopback.  If A is a block of alts, then there are three
+     *  decisions: bypass, loopback, and A's decision point.
+     *
+     *  Note that the optional bypass must be outside the loop as (A|B)* is
+     *  not the same thing as (A|B|)+.
+     *
+     *  This is an accurate NFA representation of the meaning of (A)*, but
+     *  for generating code, I don't need a DFA for the optional branch by
+     *  virtue of how I generate code.  The exit-loopback-branch decision
+     *  is sufficient to let me make an appropriate enter, exit, loop
+     *  determination.  See codegen.g
+     */
+    public StateCluster build_Astar(StateCluster A) {
+		NFAState bypassDecisionState = newState();
+		bypassDecisionState.setDescription("enter loop path of ()* block");
+        NFAState optionalAlt = newState();
+        optionalAlt.setDescription("epsilon path of ()* block");
+        NFAState blockEndNFAState = newState();
+		blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+		// don't reuse A.right as loopback if it's right edge of another block
+		if ( A.right.decisionStateType == NFAState.RIGHT_EDGE_OF_BLOCK ) {
+			// nested A* so make another tail node to be the loop back
+			// instead of the usual A.right which is the EOB for inner loop
+			NFAState extraRightEdge = newState();
+			transitionBetweenStates(A.right, extraRightEdge, Label.EPSILON);
+			A.right = extraRightEdge;
+		}
+
+		// convert A's end block to loopback
+		A.right.setDescription("()* loopback");
+		// Transition 1 to actual block of stuff
+        transitionBetweenStates(bypassDecisionState, A.left, Label.EPSILON);
+        // Transition 2 optional to bypass
+        transitionBetweenStates(bypassDecisionState, optionalAlt, Label.EPSILON);
+		transitionBetweenStates(optionalAlt, blockEndNFAState, Label.EPSILON);
+        // Transition 1 of end block exits
+        transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON);
+        // Transition 2 of end block loops
+        transitionBetweenStates(A.right, A.left, Label.EPSILON);
+
+		bypassDecisionState.decisionStateType = NFAState.BYPASS;
+		A.left.decisionStateType = NFAState.BLOCK_START;
+		A.right.decisionStateType = NFAState.LOOPBACK;
+
+		// set EOB markers for Jean
+		A.left.endOfBlockStateNumber = A.right.stateNumber;
+		bypassDecisionState.endOfBlockStateNumber = blockEndNFAState.stateNumber;
+
+        StateCluster g = new StateCluster(bypassDecisionState, blockEndNFAState);
+        return g;
+    }
+
+    /** Build an NFA predictor for special rule called Tokens manually that
+     *  predicts which token will succeed.  The refs to the rules are not
+     *  RuleRefTransitions as I want DFA conversion to stop at the EOT
+     *  transition on the end of each token, rather than return to Tokens rule.
+     *  If I used normal build_alternativeBlock for this, the RuleRefTransitions
+     *  would save return address when jumping away from Tokens rule.
+     *
+     *  All I do here is build n new states for n rules with an epsilon
+     *  edge to the rule start states and then to the next state in the
+     *  list:
+     *
+     *   o->(A)  (a state links to start of A and to next in list)
+     *   |
+     *   o->(B)
+     *   |
+     *   ...
+     *   |
+     *   o->(Z)
+	 *
+	 *  This is the NFA created for the artificial rule created in
+	 *  Grammar.addArtificialMatchTokensRule().
+	 *
+	 *  11/28/2005: removed so we can use normal rule construction for Tokens.
+    public NFAState build_ArtificialMatchTokensRuleNFA() {
+        int altNum = 1;
+        NFAState firstAlt = null; // the start state for the "rule"
+        NFAState prevAlternative = null;
+        Iterator iter = nfa.grammar.getRules().iterator();
+		// TODO: add a single decision node/state for good description
+        while (iter.hasNext()) {
+			Rule r = (Rule) iter.next();
+            String ruleName = r.name;
+			String modifier = nfa.grammar.getRuleModifier(ruleName);
+            if ( ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
+				 (modifier!=null &&
+				  modifier.equals(Grammar.FRAGMENT_RULE_MODIFIER)) )
+			{
+                continue; // don't loop to yourself or do nontoken rules
+            }
+            NFAState ruleStartState = nfa.grammar.getRuleStartState(ruleName);
+            NFAState left = newState();
+            left.setDescription("alt "+altNum+" of artificial rule "+Grammar.ARTIFICIAL_TOKENS_RULENAME);
+            transitionBetweenStates(left, ruleStartState, Label.EPSILON);
+            // Are we the first alternative?
+            if ( firstAlt==null ) {
+                firstAlt = left; // track extreme top left node as rule start
+            }
+            else {
+                // if not first alternative, must link to this alt from previous
+                transitionBetweenStates(prevAlternative, left, Label.EPSILON);
+            }
+            prevAlternative = left;
+            altNum++;
+        }
+		firstAlt.decisionStateType = NFAState.BLOCK_START;
+
+        return firstAlt;
+    }
+	 */
+
+    /** Build an atom with all possible values in its label */
+    public StateCluster build_Wildcard() {
+        NFAState left = newState();
+        NFAState right = newState();
+        Label label = new Label(nfa.grammar.getTokenTypes()); // char or tokens
+        Transition e = new Transition(label,right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+    /** Given a collapsed block of alts (a set of atoms), pull out
+     *  the set and return it.
+     */
+    protected IntSet getCollapsedBlockAsSet(State blk) {
+        State s0 = blk;
+        if ( s0!=null && s0.transition(0)!=null ) {
+            State s1 = s0.transition(0).target;
+            if ( s1!=null && s1.transition(0)!=null ) {
+                Label label = s1.transition(0).label;
+                if ( label.isSet() ) {
+                    return label.getSet();
+                }
+            }
+        }
+        return null;
+    }
+
+	private void transitionBetweenStates(NFAState a, NFAState b, int label) {
+		Transition e = new Transition(label,b);
+		a.addTransition(e);
+	}
+}
diff --git a/src/org/antlr/tool/NameSpaceChecker.java b/src/org/antlr/tool/NameSpaceChecker.java
new file mode 100644
index 0000000..3a528a5
--- /dev/null
+++ b/src/org/antlr/tool/NameSpaceChecker.java
@@ -0,0 +1,236 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.Token;
+import org.antlr.analysis.Label;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+public class NameSpaceChecker {
+	protected Grammar grammar;
+
+	public NameSpaceChecker(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	public void checkConflicts() {
+		for (int i = 0; i < grammar.ruleIndexToRuleList.size(); i++) {
+			String ruleName = (String) grammar.ruleIndexToRuleList.elementAt(i);
+			if ( ruleName==null ) {
+				continue;
+			}
+			Rule r = grammar.getRule(ruleName);
+			// walk all labels for Rule r
+			if ( r.labelNameSpace!=null ) {
+				Iterator it = r.labelNameSpace.values().iterator();
+				while ( it.hasNext() ) {
+					Grammar.LabelElementPair pair = (Grammar.LabelElementPair) it.next();
+					checkForLabelConflict(r, pair.label);
+				}
+			}
+			// walk rule scope attributes for Rule r
+			if ( r.ruleScope!=null ) {
+				List attributes = r.ruleScope.getAttributes();
+				for (int j = 0; j < attributes.size(); j++) {
+					Attribute attribute = (Attribute) attributes.get(j);
+					checkForRuleScopeAttributeConflict(r, attribute);
+				}
+			}
+			checkForRuleDefinitionProblems(r);
+			checkForRuleArgumentAndReturnValueConflicts(r);
+		}
+		// check all global scopes against tokens
+		Iterator it = grammar.getGlobalScopes().values().iterator();
+		while (it.hasNext()) {
+			AttributeScope scope = (AttributeScope) it.next();
+			checkForGlobalScopeTokenConflict(scope);
+		}
+		// check for missing rule, tokens
+		lookForReferencesToUndefinedSymbols();
+	}
+
+	protected void checkForRuleArgumentAndReturnValueConflicts(Rule r) {
+		if ( r.returnScope!=null ) {
+			Set conflictingKeys = r.returnScope.intersection(r.parameterScope);
+			if (conflictingKeys!=null) {
+				for (Iterator it = conflictingKeys.iterator(); it.hasNext();) {
+					String key = (String) it.next();
+					ErrorManager.grammarError(
+						ErrorManager.MSG_ARG_RETVAL_CONFLICT,
+						grammar,
+						r.tree.getToken(),
+						key,
+						r.name);
+				}
+			}
+		}
+	}
+
+	protected void checkForRuleDefinitionProblems(Rule r) {
+		String ruleName = r.name;
+		antlr.Token ruleToken = r.tree.getToken();
+		int msgID = 0;
+		if ( grammar.type==Grammar.PARSER && Character.isUpperCase(ruleName.charAt(0)) ) {
+			msgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
+        }
+        else if ( grammar.type==Grammar.LEXER &&
+			      Character.isLowerCase(ruleName.charAt(0)) &&
+			      !r.isSynPred )
+		{
+			msgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
+        }
+		else if ( grammar.getGlobalScope(ruleName)!=null ) {
+			msgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		}
+		if ( msgID!=0 ) {
+			ErrorManager.grammarError(msgID, grammar, ruleToken, ruleName);
+		}
+	}
+
+	/** If ref to undefined rule, give error at first occurrence.
+	 *
+	 *  If you ref ID in a combined grammar and don't define ID as a lexer rule
+	 *  it is an error.
+	 */
+	protected void lookForReferencesToUndefinedSymbols() {
+		// for each rule ref, ask if there is a rule definition
+		for (Iterator iter = grammar.ruleRefs.iterator(); iter.hasNext();) {
+			Token tok = (Token) iter.next();
+			String ruleName = tok.getText();
+			if ( grammar.getRule(ruleName)==null &&
+			     grammar.getTokenType(ruleName)!=Label.EOF )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
+										  grammar,
+										  tok,
+										  ruleName);
+			}
+        }
+		if ( grammar.type==Grammar.COMBINED ) {
+			for (Iterator iter = grammar.tokenIDRefs.iterator(); iter.hasNext();) {
+				Token tok = (Token) iter.next();
+				String tokenID = tok.getText();
+				if ( !grammar.lexerRules.contains(tokenID) &&
+					 grammar.getTokenType(tokenID)!=Label.EOF )
+				{
+					ErrorManager.grammarWarning(ErrorManager.MSG_NO_TOKEN_DEFINITION,
+												grammar,
+												tok,
+												tokenID);
+				}
+			}
+		}
+	}
+
+	protected void checkForGlobalScopeTokenConflict(AttributeScope scope) {
+		if ( grammar.getTokenType(scope.getName())!=Label.INVALID ) {
+			ErrorManager.grammarError(ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE,
+									  grammar, null, scope.getName());
+		}
+	}
+
+	/** Check for collision of a rule-scope dynamic attribute with:
+	 *  arg, return value, rule name itself.  Labels are checked elsewhere.
+	 */
+	public void checkForRuleScopeAttributeConflict(Rule r, Attribute attribute) {
+		int msgID = 0;
+		Object arg2 = null;
+		String attrName = attribute.name;
+		if ( r.name.equals(attrName) ) {
+			msgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
+			arg2 = r.name;
+		}
+		else if ( (r.returnScope!=null&&r.returnScope.getAttribute(attrName)!=null) ||
+				  (r.parameterScope!=null&&r.parameterScope.getAttribute(attrName)!=null) )
+		{
+			msgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
+			arg2 = r.name;
+		}
+		if ( msgID!=0 ) {
+			ErrorManager.grammarError(msgID,grammar,r.tree.getToken(),attrName,arg2);
+		}
+	}
+
+	/** Make sure a label doesn't conflict with another symbol.
+	 *  Labels must not conflict with: rules, tokens, scope names,
+	 *  return values, parameters, and rule-scope dynamic attributes
+	 *  defined in surrounding rule.
+	 */
+	protected void checkForLabelConflict(Rule r, antlr.Token label) {
+		int msgID = 0;
+		Object arg2 = null;
+		if ( grammar.getGlobalScope(label.getText())!=null ) {
+			msgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		}
+		else if ( grammar.getRule(label.getText())!=null ) {
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
+		}
+		else if ( grammar.getTokenType(label.getText())!=Label.INVALID ) {
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
+		}
+		else if ( r.ruleScope!=null && r.ruleScope.getAttribute(label.getText())!=null ) {
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
+			arg2 = r.name;
+		}
+		else if ( (r.returnScope!=null&&r.returnScope.getAttribute(label.getText())!=null) ||
+				  (r.parameterScope!=null&&r.parameterScope.getAttribute(label.getText())!=null) )
+		{
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
+			arg2 = r.name;
+		}
+		if ( msgID!=0 ) {
+			ErrorManager.grammarError(msgID,grammar,label,label.getText(),arg2);
+		}
+	}
+
+	/** If type of previous label differs from new label's type, that's an error.
+	 */
+	public boolean checkForLabelTypeMismatch(Rule r, antlr.Token label, int type) {
+		Grammar.LabelElementPair prevLabelPair =
+			(Grammar.LabelElementPair)r.labelNameSpace.get(label.getText());
+		if ( prevLabelPair!=null ) {
+			// label already defined; if same type, no problem
+			if ( prevLabelPair.type != type ) {
+				String typeMismatchExpr =
+					Grammar.LabelTypeToString[type]+"!="+
+					Grammar.LabelTypeToString[prevLabelPair.type];
+				ErrorManager.grammarError(
+					ErrorManager.MSG_LABEL_TYPE_CONFLICT,
+					grammar,
+					label,
+					label.getText(),
+					typeMismatchExpr);
+				return true;
+			}
+		}
+		return false;
+	}
+}
diff --git a/src/org/antlr/tool/NonRegularDecisionMessage.java b/src/org/antlr/tool/NonRegularDecisionMessage.java
new file mode 100644
index 0000000..884fd65
--- /dev/null
+++ b/src/org/antlr/tool/NonRegularDecisionMessage.java
@@ -0,0 +1,66 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.stringtemplate.StringTemplate;
+
+import java.util.*;
+
+/** More a single alternative recurses so this decision is not regular. */
+public class NonRegularDecisionMessage extends Message {
+	public DecisionProbe probe;
+	public Set altsWithRecursion;
+
+	public NonRegularDecisionMessage(DecisionProbe probe, Set altsWithRecursion) {
+		super(ErrorManager.MSG_NONREGULAR_DECISION);
+		this.probe = probe;
+		this.altsWithRecursion = altsWithRecursion;
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		StringTemplate st = getMessageTemplate();
+		String ruleName = probe.dfa.getNFADecisionStartState().getEnclosingRule();
+		st.setAttribute("ruleName", ruleName);
+		List sortedAlts = new ArrayList();
+		sortedAlts.addAll(altsWithRecursion);
+		Collections.sort(sortedAlts); // make sure it's 1, 2, ...
+		st.setAttribute("alts", sortedAlts);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/RandomPhrase.java b/src/org/antlr/tool/RandomPhrase.java
new file mode 100644
index 0000000..80b69e6
--- /dev/null
+++ b/src/org/antlr/tool/RandomPhrase.java
@@ -0,0 +1,180 @@
+package org.antlr.tool;
+
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.RuleClosureTransition;
+import org.antlr.analysis.Transition;
+import org.antlr.analysis.Label;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.Utils;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Stack;
+import java.util.Random;
+
+/** Generate a random phrase given a grammar.
+ *  Usage:
+ *     java org.antlr.tool.RandomPhrase grammarFile.g startRule [seed]
+ *
+ *  For example:
+ *     java org.antlr.tool.RandomPhrase simple.g program 342
+ *
+ *  The seed acts like a unique identifier so you can get the same random
+ *  phrase back during unit testing, for example.
+ *
+ *  If you do not specify a seed then the current time in milliseconds is used
+ *  guaranteeing that you'll never see that seed again.
+ */
+public class RandomPhrase {
+	protected static Random random;
+
+	/** an experimental method to generate random phrases for a given
+	 *  grammar given a start rule.  Return a list of token types.
+	 */
+	protected static void randomPhrase(Grammar g, List tokenTypes, String startRule) {
+		NFAState state = g.getRuleStartState(startRule);
+		NFAState stopState = g.getRuleStopState(startRule);
+
+		Stack ruleInvocationStack = new Stack();
+		while ( true ) {
+			if ( state==stopState && ruleInvocationStack.size()==0 ) {
+				break;
+			}
+			//System.out.println("state "+state);
+			if ( state.getNumberOfTransitions()==0 ) {
+				//System.out.println("dangling state: "+state);
+				return;
+			}
+			// end of rule node
+			if ( state.isAcceptState() ) {
+				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
+				// System.out.println("pop invoking state "+invokingState);
+				RuleClosureTransition invokingTransition =
+					(RuleClosureTransition)invokingState.transition(0);
+				// move to node after state that invoked this rule
+				state = invokingTransition.getFollowState();
+				continue;
+			}
+			if ( state.getNumberOfTransitions()==1 ) {
+				// no branching, just take this path
+				Transition t0 = state.transition(0);
+				if ( t0 instanceof RuleClosureTransition ) {
+					ruleInvocationStack.push(state);
+					// System.out.println("push state "+state);
+					int ruleIndex = ((RuleClosureTransition)t0).getRuleIndex();
+					//System.out.println("invoke "+g.getRuleName(ruleIndex));
+				}
+				else if ( !t0.label.isEpsilon() ) {
+					tokenTypes.add( getTokenType(t0.label) );
+					//System.out.println(t0.label.toString(g));
+				}
+				state = (NFAState)t0.target;
+				continue;
+			}
+
+			int decisionNumber = state.getDecisionNumber();
+			if ( decisionNumber==0 ) {
+				System.out.println("weird: no decision number but a choice node");
+				continue;
+			}
+			// decision point, pick ith alternative randomly
+			int n = g.getNumberOfAltsForDecisionNFA(state);
+			int randomAlt = random.nextInt(n) + 1;
+			//System.out.println("randomAlt="+randomAlt);
+			NFAState altStartState =
+				g.getNFAStateForAltOfDecision(state, randomAlt);
+			Transition t = altStartState.transition(0);
+			/*
+			start of a decision could never be a labeled transition
+			if ( !t.label.isEpsilon() ) {
+				tokenTypes.add( getTokenType(t.label) );
+			}
+			*/
+			state = (NFAState)t.target;
+		}
+	}
+
+	protected static Integer getTokenType(Label label) {
+		if ( label.isSet() ) {
+			// pick random element of set
+			IntSet typeSet = label.getSet();
+			List typeList = typeSet.toList();
+			int randomIndex = random.nextInt(typeList.size());
+			return (Integer)typeList.get(randomIndex);
+		}
+		else {
+			return Utils.integer(label.getAtom());
+		}
+		//System.out.println(t0.label.toString(g));
+	}
+
+	/** Used to generate random strings */
+	public static void main(String[] args) throws Exception {
+		String grammarFileName = args[0];
+		String startRule = args[1];
+		long seed = System.currentTimeMillis(); // use random seed unless spec.
+		if ( args.length==3 ) {
+			String seedStr = args[2];
+			seed = Integer.parseInt(seedStr);
+		}
+		random = new Random(seed);
+
+		Grammar parser =
+			new Grammar(null,
+						grammarFileName,
+						new BufferedReader(new FileReader(grammarFileName)));
+		parser.createNFAs();
+
+		List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
+		if ( leftRecursiveRules.size()>0 ) {
+			return;
+		}
+
+		if ( parser.getRule(startRule)==null ) {
+			System.out.println("undefined start rule "+startRule);
+			return;
+		}
+
+		String lexerGrammarText = parser.getLexerGrammar();
+		Grammar lexer = new Grammar();
+		lexer.importTokenVocabulary(parser);
+		if ( lexerGrammarText!=null ) {
+			lexer.setGrammarContent(lexerGrammarText);
+		}
+		else {
+			System.err.println("no lexer grammar found in "+grammarFileName);
+		}
+		lexer.createNFAs();
+		leftRecursiveRules = lexer.checkAllRulesForLeftRecursion();
+		if ( leftRecursiveRules.size()>0 ) {
+			return;
+		}
+
+		List tokenTypes = new ArrayList(100);
+		randomPhrase(parser, tokenTypes, startRule);
+		//System.out.println("token types="+tokenTypes);
+		for (int i = 0; i < tokenTypes.size(); i++) {
+			Integer ttypeI = (Integer) tokenTypes.get(i);
+			int ttype = ttypeI.intValue();
+			String ttypeDisplayName = parser.getTokenDisplayName(ttype);
+			if ( Character.isUpperCase(ttypeDisplayName.charAt(0)) ) {
+				List charsInToken = new ArrayList(10);
+				randomPhrase(lexer, charsInToken, ttypeDisplayName);
+				System.out.print(" ");
+				for (int j = 0; j < charsInToken.size(); j++) {
+					java.lang.Integer cI = (java.lang.Integer) charsInToken.get(j);
+					System.out.print((char)cI.intValue());
+				}
+			}
+			else { // it's a literal
+				String literal =
+					ttypeDisplayName.substring(1,ttypeDisplayName.length()-1);
+				System.out.print(" "+literal);
+			}
+		}
+		System.out.println();
+	}
+
+}
diff --git a/src/org/antlr/tool/RecursionOverflowMessage.java b/src/org/antlr/tool/RecursionOverflowMessage.java
new file mode 100644
index 0000000..221d9f1
--- /dev/null
+++ b/src/org/antlr/tool/RecursionOverflowMessage.java
@@ -0,0 +1,82 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.analysis.*;
+import antlr.Token;
+
+import java.util.*;
+
+/** Indicates recursion overflow.  A DFA state tried add an NFA configuration
+ *  with NFA state p that was mentioned in its stack context too many times.
+ */
+public class RecursionOverflowMessage extends Message {
+	public DecisionProbe probe;
+	public DFAState sampleBadState;
+	public int alt;
+	public Collection targetRules;
+	public Collection callSiteStates;
+
+	public RecursionOverflowMessage(DecisionProbe probe,
+									DFAState sampleBadState,
+									int alt,
+									Collection targetRules,
+									Collection callSiteStates)
+	{
+		super(ErrorManager.MSG_RECURSION_OVERLOW);
+		this.probe = probe;
+		this.sampleBadState = sampleBadState;
+		this.alt = alt;
+		this.targetRules = targetRules;
+		this.callSiteStates = callSiteStates;
+	}
+
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getColumn();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		StringTemplate st = getMessageTemplate();
+		st.setAttribute("targetRules", targetRules);
+		st.setAttribute("alt", alt);
+		st.setAttribute("callSiteStates", callSiteStates);
+
+		List labels =
+			probe.getSampleNonDeterministicInputSequence(sampleBadState);
+		String input = probe.getInputSequenceDisplay(labels);
+		st.setAttribute("input", input);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/src/org/antlr/tool/Rule.java b/src/org/antlr/tool/Rule.java
new file mode 100644
index 0000000..47f615a
--- /dev/null
+++ b/src/org/antlr/tool/Rule.java
@@ -0,0 +1,562 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.CommonToken;
+import org.antlr.analysis.NFAState;
+import org.antlr.codegen.CodeGenerator;
+
+import java.util.*;
+
+/** Combine the info associated with a rule */
+public class Rule {
+	public String name;
+	public int index;
+	public String modifier;
+	public NFAState startState;
+	public NFAState stopState;
+
+	/** This rule's options */
+	protected Map options;
+
+	public static final Set legalOptions =
+			new HashSet() {{add("k"); add("greedy"); add("memoize"); add("backtrack");}};
+
+	/** The AST representing the whole rule */
+	public GrammarAST tree;
+
+	/** To which grammar does this belong? */
+	public Grammar grammar;
+
+	/** For convenience, track the argument def AST action node if any */
+	public GrammarAST argActionAST;
+
+	public GrammarAST EORNode;
+
+	/** The return values of a rule and predefined rule attributes */
+	public AttributeScope returnScope;
+
+	public AttributeScope parameterScope;
+
+	/** the attributes defined with "scope {...}" inside a rule */
+	public AttributeScope ruleScope;
+
+	/** A list of scope names (String) used by this rule */
+	public List useScopes;
+
+	/** A list of all LabelElementPair attached to tokens like id=ID */
+	public LinkedHashMap tokenLabels;
+
+	/** A list of all LabelElementPair attached to single char literals like x='a' */
+	public LinkedHashMap charLabels;
+
+	/** A list of all LabelElementPair attached to rule references like f=field */
+	public LinkedHashMap ruleLabels;
+
+	/** A list of all Token list LabelElementPair like ids+=ID */
+	public LinkedHashMap tokenListLabels;
+
+	/** A list of all rule ref list LabelElementPair like ids+=expr */
+	public LinkedHashMap ruleListLabels;
+
+	/** All labels go in here (plus being split per the above lists) to
+	 *  catch dup label and label type mismatches.
+	 */
+	protected Map<String, Grammar.LabelElementPair> labelNameSpace =
+		new HashMap<String, Grammar.LabelElementPair>();
+
+	/** Map a name to an action for this rule.  Currently init is only
+	 *  one we use, but we can add more in future.
+	 *  The code generator will use this to fill holes in the rule template.
+	 *  I track the AST node for the action in case I need the line number
+	 *  for errors.  A better name is probably namedActions, but I don't
+	 *  want everyone to have to change their code gen templates now.
+	 */
+	protected Map<String, GrammarAST> actions =
+		new HashMap<String, GrammarAST>();
+
+	/** Track all executable actions other than named actions like @init.
+	 *  Also tracks exception handlers, predicates, and rewrite rewrites.
+	 *  We need to examine these actions before code generation so
+	 *  that we can detect refs to $rule.attr etc...
+	 */
+	protected List<GrammarAST> inlineActions = new ArrayList<GrammarAST>();
+
+	public int numberOfAlts;
+
+	/** Each alt has a Map<tokenRefName,List<tokenRefAST>>; range 1..numberOfAlts.
+	 *  So, if there are 3 ID refs in a rule's alt number 2, you'll have
+	 *  altToTokenRef[2].get("ID").size()==3.  This is used to see if $ID is ok.
+	 *  There must be only one ID reference in the alt for $ID to be ok in
+	 *  an action--must be unique.
+	 *
+	 *  This also tracks '+' and "int" literal token references
+	 *  (if not in LEXER).
+	 *
+	 *  Rewrite rules force tracking of all tokens.
+	 */
+	protected Map<String, List<GrammarAST>>[] altToTokenRefMap;
+
+	/** Each alt has a Map<ruleRefName,List<ruleRefAST>>; range 1..numberOfAlts
+	 *  So, if there are 3 expr refs in a rule's alt number 2, you'll have
+	 *  altToRuleRef[2].get("expr").size()==3.  This is used to see if $expr is ok.
+	 *  There must be only one expr reference in the alt for $expr to be ok in
+	 *  an action--must be unique.
+	 *
+	 *  Rewrite rules force tracking of all rule result ASTs. 1..n
+	 */
+	protected Map<String, List<GrammarAST>>[] altToRuleRefMap;
+
+	/** Track which alts have rewrite rules associated with them. 1..n */
+	protected boolean[] altsWithRewrites;
+
+	/** Do not generate start, stop etc... in a return value struct unless
+	 *  somebody references $r.start somewhere.
+	 */
+	public boolean referencedPredefinedRuleAttributes = false;
+
+	public boolean isSynPred = false;
+
+	public Rule(Grammar grammar,
+				String ruleName,
+				int ruleIndex,
+				int numberOfAlts)
+	{
+		this.name = ruleName;
+		this.index = ruleIndex;
+		this.numberOfAlts = numberOfAlts;
+		this.grammar = grammar;
+		altToTokenRefMap = new Map[numberOfAlts+1];
+		altToRuleRefMap = new Map[numberOfAlts+1];
+		altsWithRewrites = new boolean[numberOfAlts+1];
+		for (int alt=1; alt<=numberOfAlts; alt++) {
+			altToTokenRefMap[alt] = new HashMap<String, List<GrammarAST>>();
+			altToRuleRefMap[alt] = new HashMap<String, List<GrammarAST>>();
+		}
+	}
+
+	public void defineLabel(antlr.Token label, GrammarAST elementRef, int type) {
+		Grammar.LabelElementPair pair = grammar.new LabelElementPair(label,elementRef);
+		pair.type = type;
+		labelNameSpace.put(label.getText(), pair);
+		switch ( type ) {
+			case Grammar.TOKEN_LABEL :
+				if ( tokenLabels==null ) {
+					tokenLabels = new LinkedHashMap();
+				}
+				tokenLabels.put(label.getText(), pair);
+				break;
+			case Grammar.RULE_LABEL :
+				if ( ruleLabels==null ) {
+					ruleLabels = new LinkedHashMap();
+				}
+				ruleLabels.put(label.getText(), pair);
+				break;
+			case Grammar.TOKEN_LIST_LABEL :
+				if ( tokenListLabels==null ) {
+					tokenListLabels = new LinkedHashMap();
+				}
+				tokenListLabels.put(label.getText(), pair);
+				break;
+			case Grammar.RULE_LIST_LABEL :
+				if ( ruleListLabels==null ) {
+					ruleListLabels = new LinkedHashMap();
+				}
+				ruleListLabels.put(label.getText(), pair);
+				break;
+			case Grammar.CHAR_LABEL :
+				if ( charLabels==null ) {
+					charLabels = new LinkedHashMap();
+				}
+				charLabels.put(label.getText(), pair);
+				break;
+		}
+	}
+
+	public Grammar.LabelElementPair getLabel(String name) {
+		return (Grammar.LabelElementPair)labelNameSpace.get(name);
+	}
+
+	public Grammar.LabelElementPair getTokenLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( tokenLabels!=null ) {
+			return (Grammar.LabelElementPair)tokenLabels.get(name);
+		}
+		return pair;
+	}
+
+	public Map getRuleLabels() {
+		return ruleLabels;
+	}
+
+	public Map getRuleListLabels() {
+		return ruleListLabels;
+	}
+
+	public Grammar.LabelElementPair getRuleLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( ruleLabels!=null ) {
+			return (Grammar.LabelElementPair)ruleLabels.get(name);
+		}
+		return pair;
+	}
+
+	public Grammar.LabelElementPair getTokenListLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( tokenListLabels!=null ) {
+			return (Grammar.LabelElementPair)tokenListLabels.get(name);
+		}
+		return pair;
+	}
+
+	public Grammar.LabelElementPair getRuleListLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( ruleListLabels!=null ) {
+			return (Grammar.LabelElementPair)ruleListLabels.get(name);
+		}
+		return pair;
+	}
+
+	/** Track a token ID or literal like '+' and "void" as having been referenced
+	 *  somewhere within the alts (not rewrite sections) of a rule.
+	 *
+	 *  This differs from Grammar.altReferencesTokenID(), which tracks all
+	 *  token IDs to check for token IDs without corresponding lexer rules.
+	 */
+	public void trackTokenReferenceInAlt(GrammarAST refAST, int outerAltNum) {
+		List refs = (List)altToTokenRefMap[outerAltNum].get(refAST.getText());
+		if ( refs==null ) {
+			refs = new ArrayList();
+			altToTokenRefMap[outerAltNum].put(refAST.getText(), refs);
+		}
+		refs.add(refAST);
+	}
+
+	public List getTokenRefsInAlt(String ref, int outerAltNum) {
+		if ( altToTokenRefMap[outerAltNum]!=null ) {
+			List tokenRefASTs = (List)altToTokenRefMap[outerAltNum].get(ref);
+			return tokenRefASTs;
+		}
+		return null;
+	}
+
+	public void trackRuleReferenceInAlt(GrammarAST refAST, int outerAltNum) {
+		List refs = (List)altToRuleRefMap[outerAltNum].get(refAST.getText());
+		if ( refs==null ) {
+			refs = new ArrayList();
+			altToRuleRefMap[outerAltNum].put(refAST.getText(), refs);
+		}
+		refs.add(refAST);
+	}
+
+	public List getRuleRefsInAlt(String ref, int outerAltNum) {
+		if ( altToRuleRefMap[outerAltNum]!=null ) {
+			List ruleRefASTs = (List)altToRuleRefMap[outerAltNum].get(ref);
+			return ruleRefASTs;
+		}
+		return null;
+	}
+
+	public Set getTokenRefsInAlt(int altNum) {
+		return altToTokenRefMap[altNum].keySet();
+	}
+
+	/** For use with rewrite rules, we must track all tokens matched on the
+	 *  left-hand-side; so we need Lists.  This is a unique list of all
+	 *  token types for which the rule needs a list of tokens.  This
+	 *  is called from the rule template not directly by the code generator.
+	 */
+	public Set getAllTokenRefsInAltsWithRewrites() {
+		String output = (String)grammar.getOption("output");
+		Set tokens = new HashSet();
+		if ( output==null || !output.equals("AST") ) {
+			// return nothing if not generating trees; i.e., don't do for templates
+			return tokens;
+		}
+		for (int i = 1; i <= numberOfAlts; i++) {
+			if ( altsWithRewrites[i] ) {
+				Map m = altToTokenRefMap[i];
+				Set s = m.keySet();
+				for (Iterator it = s.iterator(); it.hasNext();) {
+					// convert token name like ID to ID, "void" to 31
+					String tokenName = (String) it.next();
+					int ttype = grammar.getTokenType(tokenName);
+					String label = grammar.generator.getTokenTypeAsTargetLabel(ttype);
+					tokens.add(label);
+				}
+			}
+		}
+		return tokens;
+	}
+
+	public Set getRuleRefsInAlt(int outerAltNum) {
+		return altToRuleRefMap[outerAltNum].keySet();
+	}
+
+	/** For use with rewrite rules, we must track all rule AST results on the
+	 *  left-hand-side; so we need Lists.  This is a unique list of all
+	 *  rule results for which the rule needs a list of results.
+	 */
+	public Set getAllRuleRefsInAltsWithRewrites() {
+		Set rules = new HashSet();
+		for (int i = 1; i <= numberOfAlts; i++) {
+			if ( altsWithRewrites[i] ) {
+				Map m = altToRuleRefMap[i];
+				rules.addAll(m.keySet());
+			}
+		}
+		return rules;
+	}
+
+	public List<GrammarAST> getInlineActions() {
+		return inlineActions;
+	}
+
+	public boolean hasRewrite(int i) {
+		return altsWithRewrites[i];
+	}
+
+	/** Track which rules have rewrite rules.  Pass in the ALT node
+	 *  for the alt so we can check for problems when output=template,
+	 *  rewrite=true, and grammar type is tree parser.
+	 */
+	public void trackAltsWithRewrites(GrammarAST altAST, int outerAltNum) {
+		if ( grammar.type==Grammar.TREE_PARSER &&
+			 grammar.buildTemplate() &&
+			 grammar.getOption("rewrite")!=null &&
+			 grammar.getOption("rewrite").equals("true")
+			)
+		{
+			GrammarAST firstElementAST = (GrammarAST)altAST.getFirstChild();
+			grammar.sanity.ensureAltIsSimpleNodeOrTree(altAST,
+													   firstElementAST,
+													   outerAltNum);
+		}
+		altsWithRewrites[outerAltNum] = true;
+	}
+
+	/** Return the scope containing name */
+	public AttributeScope getAttributeScope(String name) {
+		AttributeScope scope = getLocalAttributeScope(name);
+		if ( scope!=null ) {
+			return scope;
+		}
+		if ( ruleScope!=null && ruleScope.getAttribute(name)!=null ) {
+			scope = ruleScope;
+		}
+		return scope;
+	}
+
+	/** Get the arg, return value, or predefined property for this rule */
+	public AttributeScope getLocalAttributeScope(String name) {
+		AttributeScope scope = null;
+		if ( returnScope!=null && returnScope.getAttribute(name)!=null ) {
+			scope = returnScope;
+		}
+		else if ( parameterScope!=null && parameterScope.getAttribute(name)!=null ) {
+			scope = parameterScope;
+		}
+		else {
+			AttributeScope rulePropertiesScope =
+				RuleLabelScope.grammarTypeToRulePropertiesScope[grammar.type];
+			if ( rulePropertiesScope.getAttribute(name)!=null ) {
+				scope = rulePropertiesScope;
+			}
+		}
+		return scope;
+	}
+
+	/** For references to tokens rather than by label such as $ID, we
+	 *  need to get the existing label for the ID ref or create a new
+	 *  one.
+	 */
+	public String getElementLabel(String refdSymbol,
+								  int outerAltNum,
+								  CodeGenerator generator)
+	{
+		GrammarAST uniqueRefAST;
+		if ( grammar.type != Grammar.LEXER &&
+			 Character.isUpperCase(refdSymbol.charAt(0)) )
+		{
+			// symbol is a token
+			List tokenRefs = getTokenRefsInAlt(refdSymbol, outerAltNum);
+			uniqueRefAST = (GrammarAST)tokenRefs.get(0);
+		}
+		else {
+			// symbol is a rule
+			List ruleRefs = getRuleRefsInAlt(refdSymbol, outerAltNum);
+			uniqueRefAST = (GrammarAST)ruleRefs.get(0);
+		}
+		if ( uniqueRefAST.code==null ) {
+			// no code?  must not have gen'd yet; forward ref
+			return null;
+		}
+		String labelName = null;
+		String existingLabelName =
+			(String)uniqueRefAST.code.getAttribute("label");
+		// reuse any label or list label if it exists
+		if ( existingLabelName!=null ) {
+			labelName = existingLabelName;
+		}
+		else {
+			// else create new label
+			labelName = generator.createUniqueLabel(refdSymbol);
+			CommonToken label = new CommonToken(ANTLRParser.ID, labelName);
+			if ( grammar.type != Grammar.LEXER &&
+				 Character.isUpperCase(refdSymbol.charAt(0)) )
+			{
+				grammar.defineTokenRefLabel(name, label, uniqueRefAST);
+			}
+			else {
+				grammar.defineRuleRefLabel(name, label, uniqueRefAST);
+			}
+			uniqueRefAST.code.setAttribute("label", labelName);
+		}
+		return labelName;
+	}
+
+	/** If a rule has no user-defined return values and nobody references
+	 *  it's start/stop (predefined attributes), then there is no need to
+	 *  define a struct; otherwise for now we assume a struct.  A rule also
+	 *  has multiple return values if you are building trees or templates.
+	 */
+	public boolean getHasMultipleReturnValues() {
+		return
+			referencedPredefinedRuleAttributes || grammar.buildAST() ||
+			grammar.buildTemplate() ||
+			(returnScope!=null && returnScope.attributes.size()>1);
+	}
+
+	public boolean getHasSingleReturnValue() {
+		return
+			!(referencedPredefinedRuleAttributes || grammar.buildAST() ||
+			  grammar.buildTemplate()) &&
+									   (returnScope!=null && returnScope.attributes.size()==1);
+	}
+
+	public boolean getHasReturnValue() {
+		return
+			referencedPredefinedRuleAttributes || grammar.buildAST() ||
+			grammar.buildTemplate() ||
+			(returnScope!=null && returnScope.attributes.size()>0);
+	}
+
+	public String getSingleValueReturnType() {
+		if ( returnScope!=null && returnScope.attributes.size()==1 ) {
+			Collection retvalAttrs = returnScope.attributes.values();
+			Object[] javaSucks = retvalAttrs.toArray();
+			return ((Attribute)javaSucks[0]).type;
+		}
+		return null;
+	}
+
+	public String getSingleValueReturnName() {
+		if ( returnScope!=null && returnScope.attributes.size()==1 ) {
+			Collection retvalAttrs = returnScope.attributes.values();
+			Object[] javaSucks = retvalAttrs.toArray();
+			return ((Attribute)javaSucks[0]).name;
+		}
+		return null;
+	}
+
+	/** Given @scope::name {action} define it for this grammar.  Later,
+	 *  the code generator will ask for the actions table.
+	 */
+	public void defineNamedAction(GrammarAST ampersandAST,
+								  GrammarAST nameAST,
+								  GrammarAST actionAST)
+	{
+		//System.out.println("rule @"+nameAST.getText()+"{"+actionAST.getText()+"}");
+		String actionName = nameAST.getText();
+		GrammarAST a = (GrammarAST)actions.get(actionName);
+		if ( a!=null ) {
+			ErrorManager.grammarError(
+				ErrorManager.MSG_ACTION_REDEFINITION,grammar,
+				nameAST.getToken(),nameAST.getText());
+		}
+		else {
+			actions.put(actionName,actionAST);
+		}
+	}
+
+	public void trackInlineAction(GrammarAST actionAST) {
+		inlineActions.add(actionAST);
+	}
+
+	public Map<String, GrammarAST> getActions() {
+		return actions;
+	}
+
+	public void setActions(Map<String, GrammarAST> actions) {
+		this.actions = actions;
+	}
+
+	/** Save the option key/value pair and process it; return the key
+	 *  or null if invalid option.
+	 */
+	public String setOption(String key, Object value, antlr.Token optionsStartToken) {
+		if ( !legalOptions.contains(key) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
+									  grammar,
+									  optionsStartToken,
+									  key);
+			return null;
+		}
+		if ( options==null ) {
+			options = new HashMap();
+		}
+		if ( key.equals("k") ) {
+			grammar.numberOfManualLookaheadOptions++;
+		}
+		 options.put(key, value);
+		return key;
+	}
+
+	public void setOptions(Map options, antlr.Token optionsStartToken) {
+		if ( options==null ) {
+			this.options = null;
+			return;
+		}
+		Set keys = options.keySet();
+		for (Iterator it = keys.iterator(); it.hasNext();) {
+			String optionName = (String) it.next();
+			Object optionValue = options.get(optionName);
+			String stored=setOption(optionName, optionValue, optionsStartToken);
+			if ( stored==null ) {
+				it.remove();
+			}
+		}
+	}
+
+	public String toString() { // used for testing
+		if ( modifier!=null ) {
+			return modifier+" "+name;
+		}
+		return name;
+	}
+}
diff --git a/src/org/antlr/tool/RuleLabelScope.java b/src/org/antlr/tool/RuleLabelScope.java
new file mode 100644
index 0000000..256320b
--- /dev/null
+++ b/src/org/antlr/tool/RuleLabelScope.java
@@ -0,0 +1,99 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import antlr.Token;
+
+public class RuleLabelScope extends AttributeScope {
+	/** Rules have a predefined set of attributes as well as
+	 *  the return values.  'text' needs to be computed though so.
+	 */
+	public static AttributeScope predefinedRulePropertiesScope =
+		new AttributeScope("RulePredefined",null) {{
+			addAttribute("text", null);
+			addAttribute("start", null);
+			addAttribute("stop", null);
+			addAttribute("tree", null);
+			addAttribute("st", null);
+			isPredefinedRuleScope = true;
+		}};
+
+	public static AttributeScope predefinedTreeRulePropertiesScope =
+		new AttributeScope("RulePredefined",null) {{
+			addAttribute("text", null);
+			addAttribute("start", null); // note: no stop; not meaningful
+			addAttribute("tree", null);
+			addAttribute("st", null);
+			isPredefinedRuleScope = true;
+		}};
+
+	public static AttributeScope predefinedLexerRulePropertiesScope =
+		new AttributeScope("LexerRulePredefined",null) {{
+			addAttribute("text", null);
+			addAttribute("type", null);
+			addAttribute("line", null);
+			addAttribute("index", null);
+			addAttribute("pos", null);
+			addAttribute("channel", null);
+			addAttribute("start", null);
+			addAttribute("stop", null);
+			isPredefinedLexerRuleScope = true;
+		}};
+
+	public static AttributeScope[] grammarTypeToRulePropertiesScope =
+		{
+			null,
+			predefinedLexerRulePropertiesScope,	// LEXER
+			predefinedRulePropertiesScope,		// PARSER
+			predefinedTreeRulePropertiesScope,		// TREE_PARSER
+			predefinedRulePropertiesScope,		// COMBINED
+		};
+
+	public Rule referencedRule;
+
+	public RuleLabelScope(Rule referencedRule, Token actionToken) {
+		super("ref_"+referencedRule.name,actionToken);
+		this.referencedRule = referencedRule;
+	}
+
+	/** If you label a rule reference, you can access that rule's
+	 *  return values as well as any predefined attributes.
+	 */
+	public Attribute getAttribute(String name) {
+		AttributeScope rulePropertiesScope =
+			RuleLabelScope.grammarTypeToRulePropertiesScope[grammar.type];
+		if ( rulePropertiesScope.getAttribute(name)!=null ) {
+			return rulePropertiesScope.getAttribute(name);
+		}
+
+		if ( referencedRule.returnScope!=null ) {
+			return referencedRule.returnScope.getAttribute(name);
+		}
+		return null;
+	}
+}
diff --git a/src/org/antlr/tool/ToolMessage.java b/src/org/antlr/tool/ToolMessage.java
new file mode 100644
index 0000000..6506a44
--- /dev/null
+++ b/src/org/antlr/tool/ToolMessage.java
@@ -0,0 +1,75 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.stringtemplate.StringTemplate;
+
+/** A generic message from the tool such as "file not found" type errors; there
+ *  is no reason to create a special object for each error unlike the grammar
+ *  errors, which may be rather complex.
+ *
+ *  Sometimes you need to pass in a filename or something to say it is "bad".
+ *  Allow a generic object to be passed in and the string template can deal
+ *  with just printing it or pulling a property out of it.
+ *
+ *  TODO what to do with exceptions?  Want stack trace for internal errors?
+ */
+public class ToolMessage extends Message {
+
+	public ToolMessage(int msgID) {
+		super(msgID, null, null);
+	}
+	public ToolMessage(int msgID, Object arg) {
+		super(msgID, arg, null);
+	}
+	public ToolMessage(int msgID, Throwable e) {
+		super(msgID);
+		this.e = e;
+	}
+	public ToolMessage(int msgID, Object arg, Object arg2) {
+		super(msgID, arg, arg2);
+	}
+	public ToolMessage(int msgID, Object arg, Throwable e) {
+		super(msgID,arg,null);
+		this.e = e;
+	}
+	public String toString() {
+		StringTemplate st = getMessageTemplate();
+		if ( arg!=null ) {
+			st.setAttribute("arg", arg);
+		}
+		if ( arg2!=null ) {
+			st.setAttribute("arg2", arg2);
+		}
+		if ( e!=null ) {
+			st.setAttribute("exception", e);
+			st.setAttribute("stackTrace", e.getStackTrace());
+		}
+		return super.toString(st);
+	}
+}
diff --git a/src/org/antlr/tool/TreeToNFAConverter.java b/src/org/antlr/tool/TreeToNFAConverter.java
new file mode 100644
index 0000000..e07e8c2
--- /dev/null
+++ b/src/org/antlr/tool/TreeToNFAConverter.java
@@ -0,0 +1,2852 @@
+// $ANTLR 2.7.7 (2006-01-29): "buildnfa.g" -> "TreeToNFAConverter.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+
+import antlr.TreeParser;
+import antlr.Token;
+import antlr.collections.AST;
+import antlr.RecognitionException;
+import antlr.ANTLRException;
+import antlr.NoViableAltException;
+import antlr.MismatchedTokenException;
+import antlr.SemanticException;
+import antlr.collections.impl.BitSet;
+import antlr.ASTPair;
+import antlr.collections.impl.ASTArray;
+
+
+/** Build an NFA from a tree representing an ANTLR grammar. */
+public class TreeToNFAConverter extends antlr.TreeParser       implements TreeToNFAConverterTokenTypes
+ {
+
+/** Factory used to create nodes and submachines */
+protected NFAFactory factory = null;
+
+/** Which NFA object are we filling in? */
+protected NFA nfa = null;
+
+/** Which grammar are we converting an NFA for? */
+protected Grammar grammar = null;
+
+protected String currentRuleName = null;
+
+protected int outerAltNum = 0;
+protected int blockLevel = 0;
+
+public TreeToNFAConverter(Grammar g, NFA nfa, NFAFactory factory) {
+	this();
+	this.grammar = g;
+	this.nfa = nfa;
+	this.factory = factory;
+}
+
+protected void init() {
+    // define all the rule begin/end NFAStates to solve forward reference issues
+    Collection rules = grammar.getRules();
+    for (Iterator itr = rules.iterator(); itr.hasNext();) {
+		Rule r = (Rule) itr.next();
+        String ruleName = r.name;
+        NFAState ruleBeginState = factory.newState();
+        ruleBeginState.setDescription("rule "+ruleName+" start");
+		ruleBeginState.setEnclosingRuleName(ruleName);
+        grammar.setRuleStartState(ruleName, ruleBeginState);
+        NFAState ruleEndState = factory.newState();
+        ruleEndState.setDescription("rule "+ruleName+" end");
+        ruleEndState.setAcceptState(true);
+		ruleEndState.setEnclosingRuleName(ruleName);
+        grammar.setRuleStopState(ruleName, ruleEndState);
+    }
+}
+
+protected void addFollowTransition(String ruleName, NFAState following) {
+     //System.out.println("adding follow link to rule "+ruleName);
+     // find last link in FOLLOW chain emanating from rule
+     NFAState end = grammar.getRuleStopState(ruleName);
+     while ( end.transition(1)!=null ) {
+         end = (NFAState)end.transition(1).target;
+     }
+     if ( end.transition(0)!=null ) {
+         // already points to a following node
+         // gotta add another node to keep edges to a max of 2
+         NFAState n = factory.newState();
+         Transition e = new Transition(Label.EPSILON, n);
+         end.addTransition(e);
+         end = n;
+     }
+     Transition followEdge = new Transition(Label.EPSILON, following);
+     end.addTransition(followEdge);
+}
+
+protected void finish() {
+    List rules = new LinkedList();
+    rules.addAll(grammar.getRules());
+    int numEntryPoints = factory.build_EOFStates(rules);
+    if ( numEntryPoints==0 ) {
+        ErrorManager.grammarWarning(ErrorManager.MSG_NO_GRAMMAR_START_RULE,
+                                   grammar,
+                                   null,
+                                   grammar.name);
+    }
+}
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "buildnfa: "+ex.toString(),
+            ex);
+    }
+public TreeToNFAConverter() {
+	tokenNames = _tokenNames;
+}
+
+	public final void grammar(AST _t) throws RecognitionException {
+		
+		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			init();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LEXER_GRAMMAR:
+			{
+				AST __t3 = _t;
+				GrammarAST tmp1_AST_in = (GrammarAST)_t;
+				match(_t,LEXER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t3;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PARSER_GRAMMAR:
+			{
+				AST __t4 = _t;
+				GrammarAST tmp2_AST_in = (GrammarAST)_t;
+				match(_t,PARSER_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t4;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TREE_GRAMMAR:
+			{
+				AST __t5 = _t;
+				GrammarAST tmp3_AST_in = (GrammarAST)_t;
+				match(_t,TREE_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t5;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case COMBINED_GRAMMAR:
+			{
+				AST __t6 = _t;
+				GrammarAST tmp4_AST_in = (GrammarAST)_t;
+				match(_t,COMBINED_GRAMMAR);
+				_t = _t.getFirstChild();
+				grammarSpec(_t);
+				_t = _retTree;
+				_t = __t6;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			finish();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void grammarSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST cmt = null;
+		
+		try {      // for error handling
+			GrammarAST tmp5_AST_in = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case DOC_COMMENT:
+			{
+				cmt = (GrammarAST)_t;
+				match(_t,DOC_COMMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case OPTIONS:
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				AST __t12 = _t;
+				GrammarAST tmp6_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getFirstChild();
+				GrammarAST tmp7_AST_in = (GrammarAST)_t;
+				if ( _t==null ) throw new MismatchedTokenException();
+				_t = _t.getNextSibling();
+				_t = __t12;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TOKENS:
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case TOKENS:
+			{
+				AST __t14 = _t;
+				GrammarAST tmp8_AST_in = (GrammarAST)_t;
+				match(_t,TOKENS);
+				_t = _t.getFirstChild();
+				GrammarAST tmp9_AST_in = (GrammarAST)_t;
+				if ( _t==null ) throw new MismatchedTokenException();
+				_t = _t.getNextSibling();
+				_t = __t14;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RULE:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop16:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==SCOPE)) {
+					attrScope(_t);
+					_t = _retTree;
+				}
+				else {
+					break _loop16;
+				}
+				
+			} while (true);
+			}
+			{
+			_loop18:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp10_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop18;
+				}
+				
+			} while (true);
+			}
+			rules(_t);
+			_t = _retTree;
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void attrScope(AST _t) throws RecognitionException {
+		
+		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t8 = _t;
+			GrammarAST tmp11_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			GrammarAST tmp12_AST_in = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			GrammarAST tmp13_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t8;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rules(AST _t) throws RecognitionException {
+		
+		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			int _cnt21=0;
+			_loop21:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==RULE)) {
+					rule(_t);
+					_t = _retTree;
+				}
+				else {
+					if ( _cnt21>=1 ) { break _loop21; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt21++;
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void rule(AST _t) throws RecognitionException {
+		
+		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		
+		StateCluster g=null;
+		StateCluster b = null;
+		String r=null;
+		
+		
+		try {      // for error handling
+			AST __t23 = _t;
+			GrammarAST tmp14_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			r=id.getText();
+			currentRuleName = r; factory.currentRuleName = r;
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			GrammarAST tmp15_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				GrammarAST tmp16_AST_in = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RET:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			}
+			{
+			GrammarAST tmp17_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ARG_ACTION:
+			{
+				GrammarAST tmp18_AST_in = (GrammarAST)_t;
+				match(_t,ARG_ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case OPTIONS:
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp19_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop32:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp20_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop32;
+				}
+				
+			} while (true);
+			}
+			GrammarAST blk = (GrammarAST)_t;
+			b=block(_t);
+			_t = _retTree;
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp21_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			
+			if ( blk.setValue!=null ) {
+			// if block comes back as a set not BLOCK, make it
+			// a single ALT block
+			b = factory.build_AlternativeBlockFromSet(b);
+			}
+							if ( Character.isLowerCase(r.charAt(0)) ||
+								 grammar.type==Grammar.LEXER )
+							{
+								// attach start node to block for this rule
+								NFAState start = grammar.getRuleStartState(r);
+								start.setAssociatedASTNode(id);
+								start.addTransition(new Transition(Label.EPSILON, b.left));
+			
+								// track decision if > 1 alts
+								if ( grammar.getNumberOfAltsForDecisionNFA(b.left)>1 ) {
+									b.left.setDescription(grammar.grammarTreeToString(rule_AST_in,false));
+									b.left.setDecisionASTNode(blk);
+									int d = grammar.assignDecisionNumber( b.left );
+									grammar.setDecisionNFA( d, b.left );
+				grammar.setDecisionBlockAST(d, blk);
+								}
+			
+								// hook to end of rule node
+								NFAState end = grammar.getRuleStopState(r);
+								b.right.addTransition(new Transition(Label.EPSILON,end));
+							}
+			
+			_t = __t23;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void modifier(AST _t) throws RecognitionException {
+		
+		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_protected:
+			{
+				GrammarAST tmp22_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_protected);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_public:
+			{
+				GrammarAST tmp23_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_public);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case LITERAL_private:
+			{
+				GrammarAST tmp24_AST_in = (GrammarAST)_t;
+				match(_t,LITERAL_private);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case FRAGMENT:
+			{
+				GrammarAST tmp25_AST_in = (GrammarAST)_t;
+				match(_t,FRAGMENT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void ruleScopeSpec(AST _t) throws RecognitionException {
+		
+		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t36 = _t;
+			GrammarAST tmp26_AST_in = (GrammarAST)_t;
+			match(_t,SCOPE);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ACTION:
+			{
+				GrammarAST tmp27_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case 3:
+			case ID:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop39:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ID)) {
+					GrammarAST tmp28_AST_in = (GrammarAST)_t;
+					match(_t,ID);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop39;
+				}
+				
+			} while (true);
+			}
+			_t = __t36;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StateCluster  block(AST _t) throws RecognitionException {
+		StateCluster g = null;
+		
+		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StateCluster a = null;
+		List alts = new LinkedList();
+		this.blockLevel++;
+		if ( this.blockLevel==1 ) {this.outerAltNum=1;}
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			if (((_t.getType()==BLOCK))&&(grammar.isValidSet(this,block_AST_in) &&
+		 !currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME))) {
+				g=set(_t);
+				_t = _retTree;
+				this.blockLevel--;
+			}
+			else if ((_t.getType()==BLOCK)) {
+				AST __t41 = _t;
+				GrammarAST tmp29_AST_in = (GrammarAST)_t;
+				match(_t,BLOCK);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case OPTIONS:
+				{
+					GrammarAST tmp30_AST_in = (GrammarAST)_t;
+					match(_t,OPTIONS);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case ALT:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				{
+				int _cnt44=0;
+				_loop44:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==ALT)) {
+						a=alternative(_t);
+						_t = _retTree;
+						rewrite(_t);
+						_t = _retTree;
+						
+						alts.add(a);
+						if ( this.blockLevel==1 ) {this.outerAltNum++;}
+						
+					}
+					else {
+						if ( _cnt44>=1 ) { break _loop44; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt44++;
+				} while (true);
+				}
+				GrammarAST tmp31_AST_in = (GrammarAST)_t;
+				match(_t,EOB);
+				_t = _t.getNextSibling();
+				_t = __t41;
+				_t = _t.getNextSibling();
+				g = factory.build_AlternativeBlock(alts);
+				this.blockLevel--;
+			}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final void exceptionGroup(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			{
+				{
+				int _cnt51=0;
+				_loop51:
+				do {
+					if (_t==null) _t=ASTNULL;
+					if ((_t.getType()==LITERAL_catch)) {
+						exceptionHandler(_t);
+						_t = _retTree;
+					}
+					else {
+						if ( _cnt51>=1 ) { break _loop51; } else {throw new NoViableAltException(_t);}
+					}
+					
+					_cnt51++;
+				} while (true);
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case LITERAL_finally:
+				{
+					finallyClause(_t);
+					_t = _retTree;
+					break;
+				}
+				case EOR:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				break;
+			}
+			case LITERAL_finally:
+			{
+				finallyClause(_t);
+				_t = _retTree;
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StateCluster  set(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST set_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST b = null;
+		
+		IntSet elements=new IntervalSet();
+		set_AST_in.setSetValue(elements); // track set for use by code gen
+		
+		
+		try {      // for error handling
+			AST __t99 = _t;
+			b = _t==ASTNULL ? null :(GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			int _cnt103=0;
+			_loop103:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					AST __t101 = _t;
+					GrammarAST tmp32_AST_in = (GrammarAST)_t;
+					match(_t,ALT);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BACKTRACK_SEMPRED:
+					{
+						GrammarAST tmp33_AST_in = (GrammarAST)_t;
+						match(_t,BACKTRACK_SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case BLOCK:
+					case CHAR_RANGE:
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case TOKEN_REF:
+					case NOT:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					setElement(_t,elements);
+					_t = _retTree;
+					GrammarAST tmp34_AST_in = (GrammarAST)_t;
+					match(_t,EOA);
+					_t = _t.getNextSibling();
+					_t = __t101;
+					_t = _t.getNextSibling();
+				}
+				else {
+					if ( _cnt103>=1 ) { break _loop103; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt103++;
+			} while (true);
+			}
+			GrammarAST tmp35_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t99;
+			_t = _t.getNextSibling();
+			
+			g = factory.build_Set(elements);
+			b.followingNFAState = g.right;
+			b.setValue = elements; // track set value of this block
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final StateCluster  alternative(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StateCluster e = null;
+		
+		
+		try {      // for error handling
+			AST __t46 = _t;
+			GrammarAST tmp36_AST_in = (GrammarAST)_t;
+			match(_t,ALT);
+			_t = _t.getFirstChild();
+			{
+			int _cnt48=0;
+			_loop48:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_tokenSet_0.member(_t.getType()))) {
+					e=element(_t);
+					_t = _retTree;
+					g = factory.build_AB(g,e);
+				}
+				else {
+					if ( _cnt48>=1 ) { break _loop48; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt48++;
+			} while (true);
+			}
+			_t = __t46;
+			_t = _t.getNextSibling();
+			
+			if (g==null) { // if alt was a list of actions or whatever
+			g = factory.build_Epsilon();
+			}
+			else {
+				factory.optimizeAlternative(g);
+			}
+			
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final void rewrite(AST _t) throws RecognitionException {
+		
+		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			{
+			_loop62:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==REWRITE)) {
+					
+								if ( grammar.getOption("output")==null ) {
+									ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+															  grammar, rewrite_AST_in.token, currentRuleName);
+								}
+								
+					AST __t59 = _t;
+					GrammarAST tmp37_AST_in = (GrammarAST)_t;
+					match(_t,REWRITE);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case SEMPRED:
+					{
+						GrammarAST tmp38_AST_in = (GrammarAST)_t;
+						match(_t,SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case ALT:
+					case TEMPLATE:
+					case ACTION:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case ALT:
+					{
+						GrammarAST tmp39_AST_in = (GrammarAST)_t;
+						match(_t,ALT);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case TEMPLATE:
+					{
+						GrammarAST tmp40_AST_in = (GrammarAST)_t;
+						match(_t,TEMPLATE);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case ACTION:
+					{
+						GrammarAST tmp41_AST_in = (GrammarAST)_t;
+						match(_t,ACTION);
+						_t = _t.getNextSibling();
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					_t = __t59;
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop62;
+				}
+				
+			} while (true);
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StateCluster  element(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST a = null;
+		GrammarAST b = null;
+		GrammarAST c1 = null;
+		GrammarAST c2 = null;
+		GrammarAST pred = null;
+		GrammarAST spred = null;
+		GrammarAST bpred = null;
+		GrammarAST gpred = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				AST __t64 = _t;
+				GrammarAST tmp42_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getFirstChild();
+				g=element(_t);
+				_t = _retTree;
+				_t = __t64;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				AST __t65 = _t;
+				GrammarAST tmp43_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getFirstChild();
+				g=element(_t);
+				_t = _retTree;
+				_t = __t65;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ASSIGN:
+			{
+				AST __t66 = _t;
+				GrammarAST tmp44_AST_in = (GrammarAST)_t;
+				match(_t,ASSIGN);
+				_t = _t.getFirstChild();
+				GrammarAST tmp45_AST_in = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				g=element(_t);
+				_t = _retTree;
+				_t = __t66;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case PLUS_ASSIGN:
+			{
+				AST __t67 = _t;
+				GrammarAST tmp46_AST_in = (GrammarAST)_t;
+				match(_t,PLUS_ASSIGN);
+				_t = _t.getFirstChild();
+				GrammarAST tmp47_AST_in = (GrammarAST)_t;
+				match(_t,ID);
+				_t = _t.getNextSibling();
+				g=element(_t);
+				_t = _retTree;
+				_t = __t67;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case RANGE:
+			{
+				AST __t68 = _t;
+				GrammarAST tmp48_AST_in = (GrammarAST)_t;
+				match(_t,RANGE);
+				_t = _t.getFirstChild();
+				a = _t==ASTNULL ? null : (GrammarAST)_t;
+				atom(_t);
+				_t = _retTree;
+				b = _t==ASTNULL ? null : (GrammarAST)_t;
+				atom(_t);
+				_t = _retTree;
+				_t = __t68;
+				_t = _t.getNextSibling();
+				g = factory.build_Range(grammar.getTokenType(a.getText()),
+				grammar.getTokenType(b.getText()));
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t69 = _t;
+				GrammarAST tmp49_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				c1 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				c2 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t69;
+				_t = _t.getNextSibling();
+				
+				if ( grammar.type==Grammar.LEXER ) {
+					g = factory.build_CharRange(c1.getText(), c2.getText());
+				}
+				
+				break;
+			}
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case NOT:
+			case WILDCARD:
+			{
+				g=atom_or_notatom(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case OPTIONAL:
+			case CLOSURE:
+			case POSITIVE_CLOSURE:
+			{
+				g=ebnf(_t);
+				_t = _retTree;
+				break;
+			}
+			case TREE_BEGIN:
+			{
+				g=tree(_t);
+				_t = _retTree;
+				break;
+			}
+			case SYNPRED:
+			{
+				AST __t70 = _t;
+				GrammarAST tmp50_AST_in = (GrammarAST)_t;
+				match(_t,SYNPRED);
+				_t = _t.getFirstChild();
+				block(_t);
+				_t = _retTree;
+				_t = __t70;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ACTION:
+			{
+				GrammarAST tmp51_AST_in = (GrammarAST)_t;
+				match(_t,ACTION);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case SEMPRED:
+			{
+				pred = (GrammarAST)_t;
+				match(_t,SEMPRED);
+				_t = _t.getNextSibling();
+				g = factory.build_SemanticPredicate(pred);
+				break;
+			}
+			case SYN_SEMPRED:
+			{
+				spred = (GrammarAST)_t;
+				match(_t,SYN_SEMPRED);
+				_t = _t.getNextSibling();
+				g = factory.build_SemanticPredicate(spred);
+				break;
+			}
+			case BACKTRACK_SEMPRED:
+			{
+				bpred = (GrammarAST)_t;
+				match(_t,BACKTRACK_SEMPRED);
+				_t = _t.getNextSibling();
+				g = factory.build_SemanticPredicate(bpred);
+				break;
+			}
+			case GATED_SEMPRED:
+			{
+				gpred = (GrammarAST)_t;
+				match(_t,GATED_SEMPRED);
+				_t = _t.getNextSibling();
+				g = factory.build_SemanticPredicate(gpred);
+				break;
+			}
+			case EPSILON:
+			{
+				GrammarAST tmp52_AST_in = (GrammarAST)_t;
+				match(_t,EPSILON);
+				_t = _t.getNextSibling();
+				g = factory.build_Epsilon();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final void exceptionHandler(AST _t) throws RecognitionException {
+		
+		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t54 = _t;
+			GrammarAST tmp53_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_catch);
+			_t = _t.getFirstChild();
+			GrammarAST tmp54_AST_in = (GrammarAST)_t;
+			match(_t,ARG_ACTION);
+			_t = _t.getNextSibling();
+			GrammarAST tmp55_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t54;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void finallyClause(AST _t) throws RecognitionException {
+		
+		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		try {      // for error handling
+			AST __t56 = _t;
+			GrammarAST tmp56_AST_in = (GrammarAST)_t;
+			match(_t,LITERAL_finally);
+			_t = _t.getFirstChild();
+			GrammarAST tmp57_AST_in = (GrammarAST)_t;
+			match(_t,ACTION);
+			_t = _t.getNextSibling();
+			_t = __t56;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final StateCluster  atom(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST r = null;
+		GrammarAST rarg = null;
+		GrammarAST as1 = null;
+		GrammarAST t = null;
+		GrammarAST targ = null;
+		GrammarAST as2 = null;
+		GrammarAST c = null;
+		GrammarAST as3 = null;
+		GrammarAST s = null;
+		GrammarAST as4 = null;
+		GrammarAST w = null;
+		GrammarAST as5 = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case RULE_REF:
+			{
+				AST __t85 = _t;
+				r = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,RULE_REF);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ARG_ACTION:
+				{
+					rarg = (GrammarAST)_t;
+					match(_t,ARG_ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				case BANG:
+				case ROOT:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case BANG:
+				case ROOT:
+				{
+					as1 = _t==ASTNULL ? null : (GrammarAST)_t;
+					ast_suffix(_t);
+					_t = _retTree;
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t85;
+				_t = _t.getNextSibling();
+				
+				NFAState start = grammar.getRuleStartState(r.getText());
+				if ( start!=null ) {
+				int ruleIndex = grammar.getRuleIndex(r.getText());
+				g = factory.build_RuleRef(ruleIndex, start);
+				r.followingNFAState = g.right;
+				if ( g.left.transition(0) instanceof RuleClosureTransition
+					 && grammar.type!=Grammar.LEXER )
+				{
+				addFollowTransition(r.getText(), g.right);
+				}
+				// else rule ref got inlined to a set
+				}
+				
+				break;
+			}
+			case TOKEN_REF:
+			{
+				AST __t88 = _t;
+				t = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case ARG_ACTION:
+				{
+					targ = (GrammarAST)_t;
+					match(_t,ARG_ACTION);
+					_t = _t.getNextSibling();
+					break;
+				}
+				case 3:
+				case BANG:
+				case ROOT:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case BANG:
+				case ROOT:
+				{
+					as2 = _t==ASTNULL ? null : (GrammarAST)_t;
+					ast_suffix(_t);
+					_t = _retTree;
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t88;
+				_t = _t.getNextSibling();
+				
+				if ( grammar.type==Grammar.LEXER ) {
+				NFAState start = grammar.getRuleStartState(t.getText());
+				if ( start!=null ) {
+				int ruleIndex = grammar.getRuleIndex(t.getText());
+				g = factory.build_RuleRef(ruleIndex, start);
+				// don't add FOLLOW transitions in the lexer;
+				// only exact context should be used.
+				}
+				}
+				else {
+				int tokenType = grammar.getTokenType(t.getText());
+				g = factory.build_Atom(tokenType);
+				t.followingNFAState = g.right;
+				}
+				
+				break;
+			}
+			case CHAR_LITERAL:
+			{
+				AST __t91 = _t;
+				c = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case BANG:
+				case ROOT:
+				{
+					as3 = _t==ASTNULL ? null : (GrammarAST)_t;
+					ast_suffix(_t);
+					_t = _retTree;
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t91;
+				_t = _t.getNextSibling();
+				
+					if ( grammar.type==Grammar.LEXER ) {
+						g = factory.build_CharLiteralAtom(c.getText());
+					}
+					else {
+				int tokenType = grammar.getTokenType(c.getText());
+				g = factory.build_Atom(tokenType);
+				c.followingNFAState = g.right;
+					}
+					
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				AST __t93 = _t;
+				s = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case BANG:
+				case ROOT:
+				{
+					as4 = _t==ASTNULL ? null : (GrammarAST)_t;
+					ast_suffix(_t);
+					_t = _retTree;
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t93;
+				_t = _t.getNextSibling();
+				
+					if ( grammar.type==Grammar.LEXER ) {
+						g = factory.build_StringLiteralAtom(s.getText());
+					}
+					else {
+				int tokenType = grammar.getTokenType(s.getText());
+				g = factory.build_Atom(tokenType);
+				s.followingNFAState = g.right;
+					}
+					
+				break;
+			}
+			case WILDCARD:
+			{
+				AST __t95 = _t;
+				w = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,WILDCARD);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case BANG:
+				case ROOT:
+				{
+					as5 = _t==ASTNULL ? null : (GrammarAST)_t;
+					ast_suffix(_t);
+					_t = _retTree;
+					break;
+				}
+				case 3:
+				{
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				_t = __t95;
+				_t = _t.getNextSibling();
+				g = factory.build_Wildcard();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final StateCluster  atom_or_notatom(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST atom_or_notatom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST n = null;
+		GrammarAST c = null;
+		GrammarAST ast1 = null;
+		GrammarAST t = null;
+		GrammarAST ast3 = null;
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case STRING_LITERAL:
+			case CHAR_LITERAL:
+			case TOKEN_REF:
+			case RULE_REF:
+			case WILDCARD:
+			{
+				g=atom(_t);
+				_t = _retTree;
+				break;
+			}
+			case NOT:
+			{
+				AST __t80 = _t;
+				n = _t==ASTNULL ? null :(GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				{
+				if (_t==null) _t=ASTNULL;
+				switch ( _t.getType()) {
+				case CHAR_LITERAL:
+				{
+					c = (GrammarAST)_t;
+					match(_t,CHAR_LITERAL);
+					_t = _t.getNextSibling();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast1 = _t==ASTNULL ? null : (GrammarAST)_t;
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					
+						            int ttype=0;
+								if ( grammar.type==Grammar.LEXER ) {
+								ttype = Grammar.getCharValueFromGrammarCharLiteral(c.getText());
+								}
+								else {
+								ttype = grammar.getTokenType(c.getText());
+							}
+					IntSet notAtom = grammar.complement(ttype);
+					if ( notAtom.isNil() ) {
+					ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
+										  			              grammar,
+													              c.token,
+														          c.getText());
+					}
+						            g=factory.build_Set(notAtom);
+						
+					break;
+				}
+				case TOKEN_REF:
+				{
+					t = (GrammarAST)_t;
+					match(_t,TOKEN_REF);
+					_t = _t.getNextSibling();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BANG:
+					case ROOT:
+					{
+						ast3 = _t==ASTNULL ? null : (GrammarAST)_t;
+						ast_suffix(_t);
+						_t = _retTree;
+						break;
+					}
+					case 3:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					
+						            int ttype=0;
+					IntSet notAtom = null;
+								if ( grammar.type==Grammar.LEXER ) {
+								notAtom = grammar.getSetFromRule(this,t.getText());
+						   		if ( notAtom==null ) {
+							ErrorManager.grammarError(ErrorManager.MSG_RULE_INVALID_SET,
+									  			              grammar,
+												              t.token,
+													          t.getText());
+						   		}
+						   		else {
+						            		notAtom = grammar.complement(notAtom);
+						            	}
+								}
+								else {
+								ttype = grammar.getTokenType(t.getText());
+						            	notAtom = grammar.complement(ttype);
+							}
+					if ( notAtom==null || notAtom.isNil() ) {
+					ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
+									  			              grammar,
+												              t.token,
+													          t.getText());
+					}
+						           g=factory.build_Set(notAtom);
+						
+					break;
+				}
+				case BLOCK:
+				{
+					g=set(_t);
+					_t = _retTree;
+					
+						           GrammarAST stNode = (GrammarAST)n.getFirstChild();
+					//IntSet notSet = grammar.complement(stNode.getSetValue());
+					// let code generator complement the sets
+					IntSet s = stNode.getSetValue();
+					stNode.setSetValue(s);
+					// let code gen do the complement again; here we compute
+					// for NFA construction
+					s = grammar.complement(s);
+					if ( s.isNil() ) {
+					ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
+									  			              grammar,
+												              n.token);
+					}
+						           g=factory.build_Set(s);
+						
+					break;
+				}
+				default:
+				{
+					throw new NoViableAltException(_t);
+				}
+				}
+				}
+				n.followingNFAState = g.right;
+				_t = __t80;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final StateCluster  ebnf(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StateCluster b = null;
+		GrammarAST blk = ebnf_AST_in;
+		if ( blk.getType()!=BLOCK ) {
+			blk = (GrammarAST)blk.getFirstChild();
+		}
+		GrammarAST eob = blk.getLastChild();
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONAL:
+			{
+				AST __t72 = _t;
+				GrammarAST tmp58_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONAL);
+				_t = _t.getFirstChild();
+				b=block(_t);
+				_t = _retTree;
+				_t = __t72;
+				_t = _t.getNextSibling();
+				
+				if ( blk.setValue!=null ) {
+				// if block comes back SET not BLOCK, make it
+				// a single ALT block
+				b = factory.build_AlternativeBlockFromSet(b);
+				}
+				g = factory.build_Aoptional(b);
+					g.left.setDescription(grammar.grammarTreeToString(ebnf_AST_in,false));
+				// there is always at least one alt even if block has just 1 alt
+				int d = grammar.assignDecisionNumber( g.left );
+						grammar.setDecisionNFA(d, g.left);
+				grammar.setDecisionBlockAST(d, blk);
+				g.left.setDecisionASTNode(ebnf_AST_in);
+					
+				break;
+			}
+			case CLOSURE:
+			{
+				AST __t73 = _t;
+				GrammarAST tmp59_AST_in = (GrammarAST)_t;
+				match(_t,CLOSURE);
+				_t = _t.getFirstChild();
+				b=block(_t);
+				_t = _retTree;
+				_t = __t73;
+				_t = _t.getNextSibling();
+				
+				if ( blk.setValue!=null ) {
+				b = factory.build_AlternativeBlockFromSet(b);
+				}
+				g = factory.build_Astar(b);
+						// track the loop back / exit decision point
+					b.right.setDescription("()* loopback of "+grammar.grammarTreeToString(ebnf_AST_in,false));
+				int d = grammar.assignDecisionNumber( b.right );
+						grammar.setDecisionNFA(d, b.right);
+				grammar.setDecisionBlockAST(d, blk);
+				b.right.setDecisionASTNode(eob);
+				// make block entry state also have same decision for interpreting grammar
+				NFAState altBlockState = (NFAState)g.left.transition(0).target;
+				altBlockState.setDecisionASTNode(ebnf_AST_in);
+				altBlockState.setDecisionNumber(d);
+				g.left.setDecisionNumber(d); // this is the bypass decision (2 alts)
+				g.left.setDecisionASTNode(ebnf_AST_in);
+					
+				break;
+			}
+			case POSITIVE_CLOSURE:
+			{
+				AST __t74 = _t;
+				GrammarAST tmp60_AST_in = (GrammarAST)_t;
+				match(_t,POSITIVE_CLOSURE);
+				_t = _t.getFirstChild();
+				b=block(_t);
+				_t = _retTree;
+				_t = __t74;
+				_t = _t.getNextSibling();
+				
+				if ( blk.setValue!=null ) {
+				b = factory.build_AlternativeBlockFromSet(b);
+				}
+				g = factory.build_Aplus(b);
+				// don't make a decision on left edge, can reuse loop end decision
+						// track the loop back / exit decision point
+					b.right.setDescription("()+ loopback of "+grammar.grammarTreeToString(ebnf_AST_in,false));
+				int d = grammar.assignDecisionNumber( b.right );
+						grammar.setDecisionNFA(d, b.right);
+				grammar.setDecisionBlockAST(d, blk);
+				b.right.setDecisionASTNode(eob);
+				// make block entry state also have same decision for interpreting grammar
+				NFAState altBlockState = (NFAState)g.left.transition(0).target;
+				altBlockState.setDecisionASTNode(ebnf_AST_in);
+				altBlockState.setDecisionNumber(d);
+				
+				break;
+			}
+			default:
+				if (_t==null) _t=ASTNULL;
+				if (((_t.getType()==BLOCK))&&(grammar.isValidSet(this,ebnf_AST_in))) {
+					g=set(_t);
+					_t = _retTree;
+				}
+				else if ((_t.getType()==BLOCK)) {
+					b=block(_t);
+					_t = _retTree;
+					
+					// track decision if > 1 alts
+					if ( grammar.getNumberOfAltsForDecisionNFA(b.left)>1 ) {
+					b.left.setDescription(grammar.grammarTreeToString(blk,false));
+					b.left.setDecisionASTNode(blk);
+					int d = grammar.assignDecisionNumber( b.left );
+					grammar.setDecisionNFA( d, b.left );
+					grammar.setDecisionBlockAST(d, blk);
+					}
+					g = b;
+					
+				}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final StateCluster  tree(AST _t) throws RecognitionException {
+		StateCluster g=null;
+		
+		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		StateCluster e=null;
+		GrammarAST el=null;
+		StateCluster down=null, up=null;
+		
+		
+		try {      // for error handling
+			AST __t76 = _t;
+			GrammarAST tmp61_AST_in = (GrammarAST)_t;
+			match(_t,TREE_BEGIN);
+			_t = _t.getFirstChild();
+			el=(GrammarAST)_t;
+			g=element(_t);
+			_t = _retTree;
+			
+			down = factory.build_Atom(Label.DOWN);
+			// TODO set following states for imaginary nodes?
+			//el.followingNFAState = down.right;
+					   g = factory.build_AB(g,down);
+					
+			{
+			_loop78:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_tokenSet_0.member(_t.getType()))) {
+					el=(GrammarAST)_t;
+					e=element(_t);
+					_t = _retTree;
+					g = factory.build_AB(g,e);
+				}
+				else {
+					break _loop78;
+				}
+				
+			} while (true);
+			}
+			
+			up = factory.build_Atom(Label.UP);
+			//el.followingNFAState = up.right;
+					   g = factory.build_AB(g,up);
+					   // tree roots point at right edge of DOWN for LOOK computation later
+					   tree_AST_in.NFATreeDownState = down.left;
+					
+			_t = __t76;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+		return g;
+	}
+	
+	public final void ast_suffix(AST _t) throws RecognitionException {
+		
+		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		if ( grammar.getOption("output")==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+									  grammar, ast_suffix_AST_in.token, currentRuleName);
+		}
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case ROOT:
+			{
+				GrammarAST tmp62_AST_in = (GrammarAST)_t;
+				match(_t,ROOT);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BANG:
+			{
+				GrammarAST tmp63_AST_in = (GrammarAST)_t;
+				match(_t,BANG);
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final void setElement(AST _t,
+		IntSet elements
+	) throws RecognitionException {
+		
+		GrammarAST setElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST c = null;
+		GrammarAST t = null;
+		GrammarAST s = null;
+		GrammarAST c1 = null;
+		GrammarAST c2 = null;
+		
+		int ttype;
+		IntSet ns=null;
+		StateCluster gset;
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				
+					if ( grammar.type==Grammar.LEXER ) {
+					ttype = Grammar.getCharValueFromGrammarCharLiteral(c.getText());
+					}
+					else {
+					ttype = grammar.getTokenType(c.getText());
+				}
+				if ( elements.member(ttype) ) {
+							ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+													  grammar,
+													  c.token,
+													  c.getText());
+				}
+				elements.add(ttype);
+				
+				break;
+			}
+			case TOKEN_REF:
+			{
+				t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				
+						if ( grammar.type==Grammar.LEXER ) {
+							// recursively will invoke this rule to match elements in target rule ref
+							IntSet ruleSet = grammar.getSetFromRule(this,t.getText());
+							if ( ruleSet==null ) {
+								ErrorManager.grammarError(ErrorManager.MSG_RULE_INVALID_SET,
+												  grammar,
+												  t.token,
+												  t.getText());
+							}
+							else {
+								elements.addAll(ruleSet);
+							}
+						}
+						else {
+							ttype = grammar.getTokenType(t.getText());
+							if ( elements.member(ttype) ) {
+								ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+														  grammar,
+														  t.token,
+														  t.getText());
+							}
+							elements.add(ttype);
+							}
+				
+				break;
+			}
+			case STRING_LITERAL:
+			{
+				s = (GrammarAST)_t;
+				match(_t,STRING_LITERAL);
+				_t = _t.getNextSibling();
+				
+				ttype = grammar.getTokenType(s.getText());
+				if ( elements.member(ttype) ) {
+							ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+													  grammar,
+													  s.token,
+													  s.getText());
+				}
+				elements.add(ttype);
+				
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t118 = _t;
+				GrammarAST tmp64_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				c1 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				c2 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t118;
+				_t = _t.getNextSibling();
+				
+					if ( grammar.type==Grammar.LEXER ) {
+					        int a = Grammar.getCharValueFromGrammarCharLiteral(c1.getText());
+					    int b = Grammar.getCharValueFromGrammarCharLiteral(c2.getText());
+						elements.addAll(IntervalSet.of(a,b));
+					}
+					
+				break;
+			}
+			case BLOCK:
+			{
+				gset=set(_t);
+				_t = _retTree;
+				
+						Transition setTrans = gset.left.transition(0);
+				elements.addAll(setTrans.label.getSet());
+				
+				break;
+			}
+			case NOT:
+			{
+				AST __t119 = _t;
+				GrammarAST tmp65_AST_in = (GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				ns=new IntervalSet();
+				setElement(_t,ns);
+				_t = _retTree;
+				
+				IntSet not = grammar.complement(ns);
+				elements.addAll(not);
+				
+				_t = __t119;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException ex) {
+			reportError(ex);
+			if (_t!=null) {_t = _t.getNextSibling();}
+		}
+		_retTree = _t;
+	}
+	
+	public final IntSet  setRule(AST _t) throws RecognitionException {
+		IntSet elements=new IntervalSet();
+		
+		GrammarAST setRule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		IntSet s=null;
+		
+		try {      // for error handling
+			AST __t105 = _t;
+			GrammarAST tmp66_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp67_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getNextSibling();
+			GrammarAST tmp68_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp69_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop110:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp70_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop110;
+				}
+				
+			} while (true);
+			}
+			AST __t111 = _t;
+			GrammarAST tmp71_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp72_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case ALT:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			int _cnt115=0;
+			_loop115:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					AST __t114 = _t;
+					GrammarAST tmp73_AST_in = (GrammarAST)_t;
+					match(_t,ALT);
+					_t = _t.getFirstChild();
+					setElement(_t,elements);
+					_t = _retTree;
+					GrammarAST tmp74_AST_in = (GrammarAST)_t;
+					match(_t,EOA);
+					_t = _t.getNextSibling();
+					_t = __t114;
+					_t = _t.getNextSibling();
+				}
+				else {
+					if ( _cnt115>=1 ) { break _loop115; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt115++;
+			} while (true);
+			}
+			GrammarAST tmp75_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t111;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp76_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			_t = __t105;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException re) {
+			throw re;
+		}
+		_retTree = _t;
+		return elements;
+	}
+	
+/** Check to see if this block can be a set.  Can't have actions
+ *  etc...  Also can't be in a rule with a rewrite as we need
+ *  to track what's inside set for use in rewrite.
+ */
+	public final void testBlockAsSet(AST _t) throws RecognitionException {
+		
+		GrammarAST testBlockAsSet_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		
+		int nAlts=0;
+		Rule r = grammar.getRule(currentRuleName);
+		
+		
+		try {      // for error handling
+			AST __t121 = _t;
+			GrammarAST tmp77_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			int _cnt125=0;
+			_loop125:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					AST __t123 = _t;
+					GrammarAST tmp78_AST_in = (GrammarAST)_t;
+					match(_t,ALT);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BACKTRACK_SEMPRED:
+					{
+						GrammarAST tmp79_AST_in = (GrammarAST)_t;
+						match(_t,BACKTRACK_SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case BLOCK:
+					case CHAR_RANGE:
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case TOKEN_REF:
+					case NOT:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					testSetElement(_t);
+					_t = _retTree;
+					nAlts++;
+					GrammarAST tmp80_AST_in = (GrammarAST)_t;
+					match(_t,EOA);
+					_t = _t.getNextSibling();
+					_t = __t123;
+					_t = _t.getNextSibling();
+					if (!(!r.hasRewrite(outerAltNum)))
+					  throw new SemanticException("!r.hasRewrite(outerAltNum)");
+				}
+				else {
+					if ( _cnt125>=1 ) { break _loop125; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt125++;
+			} while (true);
+			}
+			GrammarAST tmp81_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t121;
+			_t = _t.getNextSibling();
+			if (!(nAlts>1))
+			  throw new SemanticException("nAlts>1");
+		}
+		catch (RecognitionException re) {
+			throw re;
+		}
+		_retTree = _t;
+	}
+	
+/** Match just an element; no ast suffix etc.. */
+	public final void testSetElement(AST _t) throws RecognitionException {
+		
+		GrammarAST testSetElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST c = null;
+		GrammarAST t = null;
+		GrammarAST s = null;
+		GrammarAST c1 = null;
+		GrammarAST c2 = null;
+		
+		AST r = _t;
+		
+		
+		try {      // for error handling
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case CHAR_LITERAL:
+			{
+				c = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case TOKEN_REF:
+			{
+				t = (GrammarAST)_t;
+				match(_t,TOKEN_REF);
+				_t = _t.getNextSibling();
+				
+						if ( grammar.type==Grammar.LEXER ) {
+					        Rule rule = grammar.getRule(t.getText());
+					        if ( rule==null ) {
+					        	throw new RecognitionException("invalid rule");
+					        }
+							// recursively will invoke this rule to match elements in target rule ref
+					        testSetRule(rule.tree);
+						}
+				
+				break;
+			}
+			case CHAR_RANGE:
+			{
+				AST __t140 = _t;
+				GrammarAST tmp82_AST_in = (GrammarAST)_t;
+				match(_t,CHAR_RANGE);
+				_t = _t.getFirstChild();
+				c1 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				c2 = (GrammarAST)_t;
+				match(_t,CHAR_LITERAL);
+				_t = _t.getNextSibling();
+				_t = __t140;
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			{
+				testBlockAsSet(_t);
+				_t = _retTree;
+				break;
+			}
+			case NOT:
+			{
+				AST __t141 = _t;
+				GrammarAST tmp83_AST_in = (GrammarAST)_t;
+				match(_t,NOT);
+				_t = _t.getFirstChild();
+				testSetElement(_t);
+				_t = _retTree;
+				_t = __t141;
+				_t = _t.getNextSibling();
+				break;
+			}
+			default:
+				if (_t==null) _t=ASTNULL;
+				if (((_t.getType()==STRING_LITERAL))&&(grammar.type!=Grammar.LEXER)) {
+					s = (GrammarAST)_t;
+					match(_t,STRING_LITERAL);
+					_t = _t.getNextSibling();
+				}
+			else {
+				throw new NoViableAltException(_t);
+			}
+			}
+		}
+		catch (RecognitionException re) {
+			throw re;
+		}
+		_retTree = _t;
+	}
+	
+	public final void testSetRule(AST _t) throws RecognitionException {
+		
+		GrammarAST testSetRule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
+		GrammarAST id = null;
+		
+		try {      // for error handling
+			AST __t127 = _t;
+			GrammarAST tmp84_AST_in = (GrammarAST)_t;
+			match(_t,RULE);
+			_t = _t.getFirstChild();
+			id = (GrammarAST)_t;
+			match(_t,ID);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case FRAGMENT:
+			case LITERAL_protected:
+			case LITERAL_public:
+			case LITERAL_private:
+			{
+				modifier(_t);
+				_t = _retTree;
+				break;
+			}
+			case ARG:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp85_AST_in = (GrammarAST)_t;
+			match(_t,ARG);
+			_t = _t.getNextSibling();
+			GrammarAST tmp86_AST_in = (GrammarAST)_t;
+			match(_t,RET);
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case OPTIONS:
+			{
+				GrammarAST tmp87_AST_in = (GrammarAST)_t;
+				match(_t,OPTIONS);
+				_t = _t.getNextSibling();
+				break;
+			}
+			case BLOCK:
+			case SCOPE:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case SCOPE:
+			{
+				ruleScopeSpec(_t);
+				_t = _retTree;
+				break;
+			}
+			case BLOCK:
+			case AMPERSAND:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			{
+			_loop132:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==AMPERSAND)) {
+					GrammarAST tmp88_AST_in = (GrammarAST)_t;
+					match(_t,AMPERSAND);
+					_t = _t.getNextSibling();
+				}
+				else {
+					break _loop132;
+				}
+				
+			} while (true);
+			}
+			AST __t133 = _t;
+			GrammarAST tmp89_AST_in = (GrammarAST)_t;
+			match(_t,BLOCK);
+			_t = _t.getFirstChild();
+			{
+			int _cnt137=0;
+			_loop137:
+			do {
+				if (_t==null) _t=ASTNULL;
+				if ((_t.getType()==ALT)) {
+					AST __t135 = _t;
+					GrammarAST tmp90_AST_in = (GrammarAST)_t;
+					match(_t,ALT);
+					_t = _t.getFirstChild();
+					{
+					if (_t==null) _t=ASTNULL;
+					switch ( _t.getType()) {
+					case BACKTRACK_SEMPRED:
+					{
+						GrammarAST tmp91_AST_in = (GrammarAST)_t;
+						match(_t,BACKTRACK_SEMPRED);
+						_t = _t.getNextSibling();
+						break;
+					}
+					case BLOCK:
+					case CHAR_RANGE:
+					case STRING_LITERAL:
+					case CHAR_LITERAL:
+					case TOKEN_REF:
+					case NOT:
+					{
+						break;
+					}
+					default:
+					{
+						throw new NoViableAltException(_t);
+					}
+					}
+					}
+					testSetElement(_t);
+					_t = _retTree;
+					GrammarAST tmp92_AST_in = (GrammarAST)_t;
+					match(_t,EOA);
+					_t = _t.getNextSibling();
+					_t = __t135;
+					_t = _t.getNextSibling();
+				}
+				else {
+					if ( _cnt137>=1 ) { break _loop137; } else {throw new NoViableAltException(_t);}
+				}
+				
+				_cnt137++;
+			} while (true);
+			}
+			GrammarAST tmp93_AST_in = (GrammarAST)_t;
+			match(_t,EOB);
+			_t = _t.getNextSibling();
+			_t = __t133;
+			_t = _t.getNextSibling();
+			{
+			if (_t==null) _t=ASTNULL;
+			switch ( _t.getType()) {
+			case LITERAL_catch:
+			case LITERAL_finally:
+			{
+				exceptionGroup(_t);
+				_t = _retTree;
+				break;
+			}
+			case EOR:
+			{
+				break;
+			}
+			default:
+			{
+				throw new NoViableAltException(_t);
+			}
+			}
+			}
+			GrammarAST tmp94_AST_in = (GrammarAST)_t;
+			match(_t,EOR);
+			_t = _t.getNextSibling();
+			_t = __t127;
+			_t = _t.getNextSibling();
+		}
+		catch (RecognitionException re) {
+			throw re;
+		}
+		_retTree = _t;
+	}
+	
+	
+	public static final String[] _tokenNames = {
+		"<0>",
+		"EOF",
+		"<2>",
+		"NULL_TREE_LOOKAHEAD",
+		"\"options\"",
+		"\"tokens\"",
+		"\"parser\"",
+		"LEXER",
+		"RULE",
+		"BLOCK",
+		"OPTIONAL",
+		"CLOSURE",
+		"POSITIVE_CLOSURE",
+		"SYNPRED",
+		"RANGE",
+		"CHAR_RANGE",
+		"EPSILON",
+		"ALT",
+		"EOR",
+		"EOB",
+		"EOA",
+		"ID",
+		"ARG",
+		"ARGLIST",
+		"RET",
+		"LEXER_GRAMMAR",
+		"PARSER_GRAMMAR",
+		"TREE_GRAMMAR",
+		"COMBINED_GRAMMAR",
+		"INITACTION",
+		"LABEL",
+		"TEMPLATE",
+		"\"scope\"",
+		"GATED_SEMPRED",
+		"SYN_SEMPRED",
+		"BACKTRACK_SEMPRED",
+		"\"fragment\"",
+		"ACTION",
+		"DOC_COMMENT",
+		"SEMI",
+		"\"lexer\"",
+		"\"tree\"",
+		"\"grammar\"",
+		"AMPERSAND",
+		"COLON",
+		"RCURLY",
+		"ASSIGN",
+		"STRING_LITERAL",
+		"CHAR_LITERAL",
+		"INT",
+		"STAR",
+		"TOKEN_REF",
+		"\"protected\"",
+		"\"public\"",
+		"\"private\"",
+		"BANG",
+		"ARG_ACTION",
+		"\"returns\"",
+		"\"throws\"",
+		"COMMA",
+		"LPAREN",
+		"OR",
+		"RPAREN",
+		"\"catch\"",
+		"\"finally\"",
+		"PLUS_ASSIGN",
+		"SEMPRED",
+		"IMPLIES",
+		"ROOT",
+		"RULE_REF",
+		"NOT",
+		"TREE_BEGIN",
+		"QUESTION",
+		"PLUS",
+		"WILDCARD",
+		"REWRITE",
+		"DOLLAR",
+		"DOUBLE_QUOTE_STRING_LITERAL",
+		"DOUBLE_ANGLE_STRING_LITERAL",
+		"WS",
+		"COMMENT",
+		"SL_COMMENT",
+		"ML_COMMENT",
+		"OPEN_ELEMENT_OPTION",
+		"CLOSE_ELEMENT_OPTION",
+		"ESC",
+		"DIGIT",
+		"XDIGIT",
+		"NESTED_ARG_ACTION",
+		"NESTED_ACTION",
+		"ACTION_CHAR_LITERAL",
+		"ACTION_STRING_LITERAL",
+		"ACTION_ESC",
+		"WS_LOOP",
+		"INTERNAL_RULE_REF",
+		"WS_OPT",
+		"SRC"
+	};
+	
+	private static final long[] mk_tokenSet_0() {
+		long[] data = { 38773375610519040L, 1270L, 0L, 0L};
+		return data;
+	}
+	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+	}
+	
diff --git a/src/org/antlr/tool/TreeToNFAConverter.smap b/src/org/antlr/tool/TreeToNFAConverter.smap
new file mode 100644
index 0000000..2274d2e
--- /dev/null
+++ b/src/org/antlr/tool/TreeToNFAConverter.smap
@@ -0,0 +1,2084 @@
+SMAP
+TreeToNFAConverter.java
+G
+*S G
+*F
++ 0 buildnfa.g
+buildnfa.g
+*L
+1:3
+1:4
+1:5
+1:6
+1:8
+1:9
+1:10
+1:11
+1:12
+1:13
+1:14
+1:15
+1:16
+1:17
+1:19
+1:20
+1:21
+1:22
+1:23
+1:24
+1:25
+1:26
+1:27
+1:28
+1:29
+1:30
+1:31
+1:32
+1:33
+44:52
+45:53
+47:55
+48:56
+50:58
+51:59
+53:61
+55:63
+56:64
+58:66
+59:67
+60:68
+61:69
+62:70
+63:71
+65:73
+66:74
+67:75
+68:76
+69:77
+70:78
+71:79
+72:80
+73:81
+74:82
+75:83
+76:84
+77:85
+78:86
+79:87
+80:88
+81:89
+83:91
+84:92
+85:93
+86:94
+87:95
+88:96
+89:97
+90:98
+91:99
+92:100
+93:101
+94:102
+95:103
+96:104
+97:105
+98:106
+99:107
+100:108
+102:110
+103:111
+104:112
+105:113
+106:114
+107:115
+108:116
+109:117
+110:118
+111:119
+112:120
+114:122
+115:123
+116:124
+117:125
+118:126
+119:127
+120:128
+121:129
+122:130
+123:131
+124:132
+125:133
+126:134
+127:135
+128:136
+131:141
+131:145
+131:205
+131:206
+131:207
+131:208
+131:209
+131:210
+131:211
+132:146
+133:148
+133:149
+133:150
+133:151
+133:152
+133:153
+133:154
+133:155
+133:156
+133:157
+133:158
+133:159
+133:198
+133:199
+133:200
+133:201
+133:202
+134:162
+134:163
+134:164
+134:165
+134:166
+134:167
+134:168
+134:169
+134:170
+134:171
+135:174
+135:175
+135:176
+135:177
+135:178
+135:179
+135:180
+135:181
+135:182
+135:183
+136:186
+136:187
+136:188
+136:189
+136:190
+136:191
+136:192
+136:193
+136:194
+136:195
+138:204
+141:342
+141:346
+141:359
+141:360
+141:361
+141:362
+141:363
+141:364
+141:365
+142:347
+142:348
+142:349
+142:350
+142:351
+142:352
+142:353
+142:354
+142:355
+142:356
+142:357
+142:358
+145:213
+145:218
+145:334
+145:335
+145:336
+145:337
+145:338
+145:339
+145:340
+146:219
+146:220
+146:221
+147:216
+147:223
+147:224
+147:225
+147:226
+147:227
+147:228
+147:229
+147:240
+147:241
+147:242
+147:243
+147:244
+148:247
+148:248
+148:249
+148:250
+148:251
+148:252
+148:253
+148:254
+148:255
+148:256
+148:257
+148:258
+148:259
+148:269
+148:270
+148:271
+148:272
+148:273
+149:276
+149:277
+149:278
+149:279
+149:280
+149:281
+149:282
+149:283
+149:284
+149:285
+149:286
+149:287
+149:288
+149:297
+149:298
+149:299
+149:300
+149:301
+150:303
+150:304
+150:305
+150:306
+150:307
+150:308
+150:309
+150:310
+150:311
+150:312
+150:313
+150:315
+150:316
+151:317
+151:318
+151:319
+151:320
+151:321
+151:322
+151:323
+151:324
+151:325
+151:326
+151:327
+151:328
+151:330
+151:331
+152:332
+152:333
+155:367
+155:371
+155:388
+155:389
+155:390
+155:391
+155:392
+155:393
+155:394
+156:373
+156:374
+156:375
+156:376
+156:377
+156:378
+156:379
+156:380
+156:381
+156:382
+156:383
+156:385
+156:386
+156:387
+159:396
+159:406
+159:604
+159:605
+159:606
+159:607
+159:608
+159:609
+159:610
+160:401
+161:402
+162:403
+165:399
+165:407
+165:408
+165:409
+165:410
+165:411
+165:412
+165:413
+165:414
+165:602
+165:603
+166:415
+167:417
+167:418
+167:419
+167:420
+167:421
+167:422
+167:423
+167:424
+167:425
+167:432
+167:433
+167:434
+167:435
+167:436
+168:439
+168:440
+168:441
+168:443
+168:444
+168:445
+168:446
+168:447
+168:448
+168:449
+168:456
+168:457
+168:458
+168:459
+168:460
+169:464
+169:465
+169:466
+169:468
+169:469
+169:470
+169:471
+169:472
+169:473
+169:474
+169:484
+169:485
+169:486
+169:487
+169:488
+170:492
+170:493
+170:494
+170:495
+170:496
+170:497
+170:498
+170:507
+170:508
+170:509
+170:510
+170:511
+171:514
+171:515
+171:516
+171:517
+171:518
+171:519
+171:527
+171:528
+171:529
+171:530
+171:531
+172:533
+172:534
+172:535
+172:536
+172:537
+172:538
+172:539
+172:540
+172:541
+172:542
+172:543
+172:544
+172:546
+172:547
+173:548
+174:549
+174:550
+175:552
+175:553
+175:554
+175:555
+175:556
+175:557
+175:558
+175:565
+175:566
+175:567
+175:568
+175:569
+176:571
+176:572
+176:573
+178:575
+179:576
+180:577
+181:578
+182:579
+183:580
+184:581
+185:582
+186:583
+187:584
+188:585
+189:586
+191:588
+192:589
+193:590
+194:591
+195:592
+196:593
+197:594
+198:595
+200:597
+201:598
+202:599
+203:600
+208:612
+208:616
+208:617
+208:618
+208:647
+208:648
+208:649
+208:650
+208:651
+208:652
+208:653
+208:654
+208:655
+208:656
+208:657
+208:658
+209:619
+209:620
+209:621
+209:622
+209:623
+210:626
+210:627
+210:628
+210:629
+210:630
+211:633
+211:634
+211:635
+211:636
+211:637
+212:640
+212:641
+212:642
+212:643
+212:644
+215:660
+215:664
+215:707
+215:708
+215:709
+215:710
+215:711
+215:712
+215:713
+216:665
+216:666
+216:667
+216:668
+216:670
+216:671
+216:672
+216:673
+216:674
+216:675
+216:676
+216:684
+216:685
+216:686
+216:687
+216:688
+216:690
+216:691
+216:692
+216:693
+216:694
+216:695
+216:696
+216:697
+216:698
+216:699
+216:700
+216:701
+216:703
+216:704
+216:705
+216:706
+219:715
+219:716
+219:726
+219:733
+219:788
+219:789
+219:790
+219:791
+219:793
+219:794
+219:795
+219:796
+219:797
+219:798
+219:799
+219:800
+220:720
+221:721
+222:722
+223:723
+228:727
+228:728
+228:729
+228:730
+228:731
+229:732
+231:734
+231:735
+231:736
+231:737
+231:738
+231:740
+231:741
+231:742
+231:743
+231:744
+231:745
+231:746
+231:753
+231:754
+231:755
+231:756
+231:757
+231:784
+231:785
+232:760
+232:761
+232:762
+232:763
+232:764
+232:765
+232:766
+232:767
+232:768
+232:773
+232:774
+232:775
+232:776
+232:778
+232:779
+232:780
+234:770
+235:771
+238:781
+238:782
+238:783
+240:786
+241:787
+244:951
+244:952
+244:959
+244:991
+244:992
+244:993
+244:994
+244:995
+244:996
+244:997
+244:998
+245:956
+248:960
+248:961
+248:962
+248:963
+248:965
+248:966
+248:967
+248:968
+248:969
+248:970
+248:971
+248:972
+248:973
+248:974
+248:975
+248:976
+248:978
+248:979
+248:980
+248:981
+248:982
+250:984
+251:985
+252:986
+253:987
+254:988
+255:989
+259:802
+259:806
+259:807
+259:808
+259:854
+259:855
+259:856
+259:857
+259:858
+259:859
+259:860
+259:861
+259:862
+259:863
+259:864
+259:865
+260:809
+260:810
+260:812
+260:813
+260:814
+260:815
+260:816
+260:817
+260:818
+260:819
+260:820
+260:821
+260:822
+260:824
+260:825
+260:826
+260:828
+260:829
+260:830
+260:831
+260:832
+260:833
+260:840
+260:841
+260:842
+260:843
+260:844
+261:848
+261:849
+261:850
+261:851
+264:1297
+264:1301
+264:1314
+264:1315
+264:1316
+264:1317
+264:1318
+264:1319
+264:1320
+265:1302
+265:1303
+265:1304
+265:1305
+265:1306
+265:1307
+265:1308
+265:1309
+265:1310
+265:1311
+265:1312
+265:1313
+268:1322
+268:1326
+268:1336
+268:1337
+268:1338
+268:1339
+268:1340
+268:1341
+268:1342
+269:1327
+269:1328
+269:1329
+269:1330
+269:1331
+269:1332
+269:1333
+269:1334
+269:1335
+272:1000
+272:1004
+272:1081
+272:1082
+272:1083
+272:1084
+272:1085
+272:1086
+272:1087
+273:1005
+273:1006
+273:1007
+273:1074
+273:1075
+273:1076
+273:1077
+273:1079
+273:1080
+274:1008
+274:1009
+275:1011
+276:1012
+277:1013
+278:1014
+280:1016
+280:1017
+280:1018
+280:1019
+280:1021
+280:1022
+280:1023
+280:1024
+280:1025
+280:1026
+280:1027
+280:1036
+280:1037
+280:1038
+280:1039
+280:1040
+280:1043
+280:1044
+280:1045
+280:1046
+280:1047
+280:1048
+280:1049
+280:1052
+280:1053
+280:1054
+280:1055
+280:1056
+280:1059
+280:1060
+280:1061
+280:1062
+280:1063
+280:1066
+280:1067
+280:1068
+280:1069
+280:1070
+280:1072
+280:1073
+284:1089
+284:1090
+284:1102
+284:1103
+284:1104
+284:1283
+284:1284
+284:1285
+284:1286
+284:1287
+284:1288
+284:1289
+284:1290
+284:1291
+284:1292
+284:1293
+284:1294
+284:1295
+285:1105
+285:1106
+285:1107
+285:1108
+285:1109
+285:1110
+285:1111
+285:1112
+285:1113
+285:1114
+286:1117
+286:1118
+286:1119
+286:1120
+286:1121
+286:1122
+286:1123
+286:1124
+286:1125
+286:1126
+287:1129
+287:1130
+287:1131
+287:1132
+287:1133
+287:1134
+287:1135
+287:1136
+287:1137
+287:1138
+287:1139
+287:1140
+287:1141
+288:1144
+288:1145
+288:1146
+288:1147
+288:1148
+288:1149
+288:1150
+288:1151
+288:1152
+288:1153
+288:1154
+288:1155
+288:1156
+289:1093
+289:1094
+289:1159
+289:1160
+289:1161
+289:1162
+289:1163
+289:1164
+289:1165
+289:1166
+289:1167
+289:1168
+289:1169
+289:1170
+289:1171
+289:1172
+290:1173
+291:1174
+292:1095
+292:1096
+292:1177
+292:1178
+292:1179
+292:1180
+292:1181
+292:1182
+292:1183
+292:1184
+292:1185
+292:1186
+292:1187
+292:1188
+292:1189
+292:1190
+294:1192
+295:1193
+296:1194
+298:1198
+298:1199
+298:1200
+298:1201
+298:1202
+298:1203
+298:1204
+298:1205
+298:1206
+299:1209
+299:1210
+299:1211
+299:1212
+299:1213
+299:1214
+299:1215
+300:1218
+300:1219
+300:1220
+300:1221
+301:1224
+301:1225
+301:1226
+301:1227
+301:1228
+301:1229
+301:1230
+301:1231
+301:1232
+301:1233
+302:1236
+302:1237
+302:1238
+302:1239
+302:1240
+303:1097
+303:1243
+303:1244
+303:1245
+303:1246
+303:1247
+303:1248
+304:1098
+304:1251
+304:1252
+304:1253
+304:1254
+304:1255
+304:1256
+305:1099
+305:1259
+305:1260
+305:1261
+305:1262
+305:1263
+305:1264
+306:1100
+306:1267
+306:1268
+306:1269
+306:1270
+306:1271
+306:1272
+307:1275
+307:1276
+307:1277
+307:1278
+307:1279
+307:1280
+310:1807
+310:1808
+310:1820
+310:1821
+310:1822
+310:1908
+310:1913
+310:1928
+310:1929
+310:1930
+310:1931
+310:1932
+310:1933
+310:1934
+310:1935
+310:1936
+310:1937
+310:1938
+310:1939
+310:1940
+311:1812
+312:1813
+313:1814
+314:1815
+315:1816
+316:1817
+319:1909
+319:1910
+319:1911
+319:1912
+321:1914
+321:1915
+321:1916
+323:1918
+324:1919
+325:1920
+326:1921
+327:1922
+328:1923
+329:1924
+330:1925
+331:1926
+333:1823
+333:1824
+333:1825
+333:1826
+333:1827
+333:1828
+333:1829
+333:1830
+333:1831
+333:1832
+335:1834
+336:1835
+337:1836
+338:1837
+339:1838
+340:1839
+341:1840
+342:1841
+343:1842
+344:1843
+345:1844
+346:1845
+348:1849
+348:1850
+348:1851
+348:1852
+348:1853
+348:1854
+348:1855
+348:1856
+348:1857
+348:1858
+350:1860
+351:1861
+352:1862
+353:1863
+354:1864
+355:1865
+356:1866
+357:1867
+358:1868
+359:1869
+360:1870
+361:1871
+362:1872
+363:1873
+364:1874
+365:1875
+367:1879
+367:1880
+367:1881
+367:1882
+367:1883
+367:1884
+367:1885
+367:1886
+367:1887
+367:1888
+369:1890
+370:1891
+371:1892
+372:1893
+373:1894
+374:1895
+375:1896
+376:1897
+377:1898
+378:1899
+379:1900
+380:1901
+381:1902
+382:1903
+383:1904
+387:1942
+387:1943
+387:1952
+387:1991
+387:1992
+387:1993
+387:1994
+387:1995
+387:1996
+387:1997
+387:1998
+388:1947
+389:1948
+390:1949
+393:1953
+393:1954
+393:1955
+393:1956
+393:1989
+393:1990
+394:1957
+395:1958
+395:1959
+397:1961
+398:1962
+399:1963
+400:1964
+402:1966
+402:1967
+402:1968
+402:1969
+402:1970
+402:1971
+402:1972
+402:1973
+402:1974
+402:1975
+402:1976
+402:1977
+402:1978
+402:1980
+402:1981
+404:1983
+405:1984
+406:1985
+407:1986
+408:1987
+413:1628
+413:1629
+413:1638
+413:1639
+413:1640
+413:1793
+413:1794
+413:1795
+413:1796
+413:1797
+413:1798
+413:1799
+413:1800
+413:1801
+413:1802
+413:1803
+413:1804
+413:1805
+414:1641
+414:1642
+414:1643
+414:1644
+414:1645
+414:1646
+414:1647
+414:1648
+415:1632
+415:1651
+415:1652
+415:1653
+415:1654
+415:1655
+415:1656
+415:1789
+415:1790
+416:1633
+416:1634
+416:1658
+416:1659
+416:1660
+416:1661
+416:1662
+416:1663
+416:1664
+416:1666
+416:1667
+416:1668
+416:1669
+416:1670
+416:1671
+416:1672
+416:1673
+416:1680
+416:1681
+416:1682
+416:1683
+416:1684
+416:1782
+416:1783
+416:1784
+416:1785
+416:1786
+418:1687
+419:1688
+420:1689
+421:1690
+422:1691
+423:1692
+424:1693
+425:1694
+426:1695
+427:1696
+428:1697
+429:1698
+430:1699
+431:1700
+432:1701
+434:1635
+434:1636
+434:1705
+434:1706
+434:1707
+434:1708
+434:1709
+434:1711
+434:1712
+434:1713
+434:1714
+434:1715
+434:1716
+434:1717
+434:1718
+434:1725
+434:1726
+434:1727
+434:1728
+434:1729
+436:1732
+437:1733
+438:1734
+439:1735
+440:1736
+441:1737
+442:1738
+443:1739
+444:1740
+445:1741
+446:1742
+447:1743
+448:1744
+449:1745
+450:1746
+451:1747
+452:1748
+453:1749
+454:1750
+455:1751
+456:1752
+457:1753
+458:1754
+459:1755
+460:1756
+462:1760
+462:1761
+462:1762
+462:1763
+464:1765
+465:1766
+466:1767
+467:1768
+468:1769
+469:1770
+470:1771
+471:1772
+472:1773
+473:1774
+474:1775
+475:1776
+476:1777
+477:1778
+480:1788
+484:1344
+484:1345
+484:1361
+484:1362
+484:1363
+484:1614
+484:1615
+484:1616
+484:1617
+484:1618
+484:1619
+484:1620
+484:1621
+484:1622
+484:1623
+484:1624
+484:1625
+484:1626
+485:1348
+485:1349
+485:1350
+485:1364
+485:1365
+485:1366
+485:1367
+485:1368
+485:1369
+485:1371
+485:1372
+485:1373
+485:1374
+485:1375
+485:1376
+485:1377
+485:1386
+485:1387
+485:1388
+485:1389
+485:1390
+485:1393
+485:1394
+485:1395
+485:1396
+485:1397
+485:1398
+485:1399
+485:1400
+485:1407
+485:1408
+485:1409
+485:1410
+485:1411
+485:1413
+485:1414
+487:1416
+488:1417
+489:1418
+490:1419
+491:1420
+492:1421
+493:1422
+494:1423
+495:1424
+496:1425
+497:1426
+498:1427
+501:1351
+501:1352
+501:1353
+501:1431
+501:1432
+501:1433
+501:1434
+501:1435
+501:1436
+501:1438
+501:1439
+501:1440
+501:1441
+501:1442
+501:1443
+501:1444
+501:1453
+501:1454
+501:1455
+501:1456
+501:1457
+501:1460
+501:1461
+501:1462
+501:1463
+501:1464
+501:1465
+501:1466
+501:1467
+501:1474
+501:1475
+501:1476
+501:1477
+501:1478
+501:1480
+501:1481
+503:1483
+504:1484
+505:1485
+506:1486
+507:1487
+508:1488
+509:1489
+510:1490
+511:1491
+512:1492
+513:1493
+514:1494
+515:1495
+516:1496
+519:1354
+519:1355
+519:1500
+519:1501
+519:1502
+519:1503
+519:1504
+519:1505
+519:1507
+519:1508
+519:1509
+519:1510
+519:1511
+519:1512
+519:1513
+519:1514
+519:1521
+519:1522
+519:1523
+519:1524
+519:1525
+519:1527
+519:1528
+521:1530
+522:1531
+523:1532
+524:1533
+525:1534
+526:1535
+527:1536
+528:1537
+531:1356
+531:1357
+531:1541
+531:1542
+531:1543
+531:1544
+531:1545
+531:1546
+531:1548
+531:1549
+531:1550
+531:1551
+531:1552
+531:1553
+531:1554
+531:1555
+531:1562
+531:1563
+531:1564
+531:1565
+531:1566
+531:1568
+531:1569
+533:1571
+534:1572
+535:1573
+536:1574
+537:1575
+538:1576
+539:1577
+540:1578
+543:1358
+543:1359
+543:1582
+543:1583
+543:1584
+543:1585
+543:1586
+543:1587
+543:1589
+543:1590
+543:1591
+543:1592
+543:1593
+543:1594
+543:1595
+543:1596
+543:1603
+543:1604
+543:1605
+543:1606
+543:1607
+543:1609
+543:1610
+543:1611
+548:2000
+548:2010
+548:2011
+548:2012
+548:2027
+548:2028
+548:2029
+548:2030
+548:2031
+548:2032
+548:2033
+548:2034
+548:2035
+548:2036
+548:2037
+548:2038
+549:2004
+550:2005
+551:2006
+552:2007
+555:2013
+555:2014
+555:2015
+555:2016
+555:2017
+556:2020
+556:2021
+556:2022
+556:2023
+556:2024
+559:867
+559:868
+559:877
+559:942
+559:943
+559:944
+559:945
+559:946
+559:947
+559:948
+559:949
+560:873
+561:874
+564:871
+564:878
+564:879
+564:880
+564:881
+564:935
+564:936
+565:883
+565:884
+565:885
+565:886
+565:887
+565:888
+565:889
+565:890
+565:891
+565:893
+565:894
+565:895
+565:896
+565:897
+565:898
+565:899
+565:911
+565:912
+565:913
+565:914
+565:915
+565:917
+565:918
+565:919
+565:920
+565:921
+565:922
+565:923
+565:924
+565:925
+565:926
+565:927
+565:929
+565:930
+565:931
+566:932
+566:933
+566:934
+569:938
+570:939
+571:940
+576:2193
+576:2194
+576:2198
+576:2200
+576:2372
+576:2376
+576:2377
+576:2378
+578:2197
+578:2201
+578:2202
+578:2203
+578:2204
+578:2205
+578:2206
+578:2207
+578:2209
+578:2210
+578:2211
+578:2212
+578:2213
+578:2214
+578:2215
+578:2216
+578:2217
+578:2224
+578:2225
+578:2226
+578:2227
+578:2228
+578:2230
+578:2231
+578:2232
+578:2233
+578:2234
+578:2235
+578:2237
+578:2238
+578:2239
+578:2240
+578:2241
+578:2242
+578:2243
+578:2252
+578:2253
+578:2254
+578:2255
+578:2256
+578:2259
+578:2260
+578:2261
+578:2262
+578:2263
+578:2264
+578:2272
+578:2273
+578:2274
+578:2275
+578:2276
+578:2370
+578:2371
+579:2278
+579:2279
+579:2280
+579:2281
+579:2282
+579:2283
+579:2284
+579:2285
+579:2286
+579:2287
+579:2288
+579:2289
+579:2291
+579:2292
+580:2293
+580:2294
+580:2295
+580:2296
+580:2298
+580:2299
+580:2300
+580:2301
+580:2302
+580:2303
+580:2304
+580:2311
+580:2312
+580:2313
+580:2314
+580:2315
+580:2345
+580:2346
+581:2318
+581:2319
+581:2320
+581:2321
+581:2322
+581:2323
+581:2324
+581:2325
+581:2326
+581:2327
+581:2328
+581:2329
+581:2330
+581:2331
+581:2332
+581:2333
+581:2334
+581:2335
+581:2336
+581:2337
+581:2339
+581:2340
+581:2341
+582:2342
+582:2343
+582:2344
+584:2348
+584:2349
+584:2350
+584:2351
+584:2352
+584:2353
+584:2354
+584:2361
+584:2362
+584:2363
+584:2364
+584:2365
+585:2367
+585:2368
+585:2369
+589:2373
+589:2374
+589:2375
+591:2040
+591:2041
+591:2042
+591:2056
+591:2057
+591:2058
+591:2180
+591:2181
+591:2182
+591:2183
+591:2184
+591:2185
+591:2186
+591:2187
+591:2188
+591:2189
+591:2190
+591:2191
+592:2051
+593:2052
+594:2053
+597:2045
+597:2059
+597:2060
+597:2061
+597:2062
+597:2063
+599:2065
+600:2066
+601:2067
+602:2068
+603:2069
+604:2070
+605:2071
+606:2072
+607:2073
+608:2074
+609:2075
+610:2076
+611:2077
+613:2046
+613:2081
+613:2082
+613:2083
+613:2084
+613:2085
+615:2087
+616:2088
+617:2089
+618:2090
+619:2091
+620:2092
+621:2093
+622:2094
+623:2095
+624:2096
+625:2097
+626:2098
+627:2099
+628:2100
+629:2101
+630:2102
+631:2103
+632:2104
+633:2105
+634:2106
+635:2107
+636:2108
+637:2109
+640:2047
+640:2113
+640:2114
+640:2115
+640:2116
+640:2117
+642:2119
+643:2120
+644:2121
+645:2122
+646:2123
+647:2124
+648:2125
+649:2126
+651:2048
+651:2049
+651:2130
+651:2131
+651:2132
+651:2133
+651:2134
+651:2135
+651:2136
+651:2137
+651:2138
+651:2139
+651:2140
+651:2141
+651:2142
+651:2143
+653:2145
+654:2146
+655:2147
+656:2148
+657:2149
+660:2153
+660:2154
+660:2155
+660:2156
+662:2158
+663:2159
+666:2163
+666:2164
+666:2165
+666:2166
+666:2167
+666:2168
+666:2169
+666:2176
+666:2177
+667:2170
+667:2171
+669:2173
+670:2174
+679:2380
+679:2384
+679:2392
+679:2457
+679:2461
+679:2462
+680:2381
+680:2388
+681:2382
+681:2389
+682:2383
+684:2393
+684:2394
+684:2395
+684:2396
+684:2453
+684:2454
+685:2398
+685:2399
+685:2400
+685:2401
+685:2402
+685:2403
+685:2404
+685:2405
+685:2406
+685:2408
+685:2409
+685:2410
+685:2411
+685:2412
+685:2413
+685:2414
+685:2426
+685:2427
+685:2428
+685:2429
+685:2430
+685:2432
+685:2433
+685:2434
+685:2435
+685:2436
+685:2437
+685:2438
+685:2439
+685:2442
+685:2443
+685:2444
+685:2445
+685:2447
+685:2448
+685:2449
+686:2440
+686:2441
+688:2450
+688:2451
+688:2452
+690:2455
+690:2456
+693:2458
+693:2459
+693:2460
+695:2556
+695:2561
+695:2738
+695:2742
+695:2743
+696:2559
+696:2562
+696:2563
+696:2564
+696:2565
+696:2566
+696:2567
+696:2568
+696:2570
+696:2571
+696:2572
+696:2573
+696:2574
+696:2575
+696:2576
+696:2577
+696:2578
+696:2585
+696:2586
+696:2587
+696:2588
+696:2589
+696:2591
+696:2592
+696:2593
+696:2594
+696:2595
+696:2596
+696:2598
+696:2599
+696:2600
+696:2601
+696:2602
+696:2603
+696:2604
+696:2613
+696:2614
+696:2615
+696:2616
+696:2617
+696:2620
+696:2621
+696:2622
+696:2623
+696:2624
+696:2625
+696:2633
+696:2634
+696:2635
+696:2636
+696:2637
+696:2736
+696:2737
+697:2639
+697:2640
+697:2641
+697:2642
+697:2643
+697:2644
+697:2645
+697:2646
+697:2647
+697:2648
+697:2649
+697:2650
+697:2652
+697:2653
+698:2654
+698:2655
+698:2656
+698:2657
+698:2711
+698:2712
+699:2659
+699:2660
+699:2661
+699:2662
+699:2663
+699:2664
+699:2665
+699:2666
+699:2667
+699:2669
+699:2670
+699:2671
+699:2672
+699:2673
+699:2674
+699:2675
+699:2687
+699:2688
+699:2689
+699:2690
+699:2691
+699:2693
+699:2694
+699:2695
+699:2696
+699:2697
+699:2698
+699:2699
+699:2700
+699:2701
+699:2702
+699:2703
+699:2705
+699:2706
+699:2707
+700:2708
+700:2709
+700:2710
+702:2714
+702:2715
+702:2716
+702:2717
+702:2718
+702:2719
+702:2720
+702:2727
+702:2728
+702:2729
+702:2730
+702:2731
+703:2733
+703:2734
+703:2735
+707:2739
+707:2740
+707:2741
+710:2464
+710:2465
+710:2477
+710:2478
+710:2479
+710:2538
+710:2544
+710:2545
+710:2546
+710:2547
+710:2548
+710:2549
+710:2553
+710:2554
+711:2474
+714:2468
+714:2480
+714:2481
+714:2482
+714:2483
+714:2484
+715:2469
+715:2487
+715:2488
+715:2489
+715:2490
+715:2491
+717:2493
+718:2494
+719:2495
+720:2496
+721:2497
+722:2498
+723:2499
+724:2500
+726:2470
+726:2539
+726:2540
+726:2541
+726:2542
+726:2543
+727:2471
+727:2472
+727:2504
+727:2505
+727:2506
+727:2507
+727:2508
+727:2509
+727:2510
+727:2511
+727:2512
+727:2513
+727:2514
+727:2515
+727:2516
+727:2517
+728:2520
+728:2521
+728:2522
+728:2523
+729:2526
+729:2527
+729:2528
+729:2529
+729:2530
+729:2531
+729:2532
+729:2533
+729:2534
+729:2535
+732:2550
+732:2551
+732:2552
+*E
diff --git a/src/org/antlr/tool/TreeToNFAConverterTokenTypes.java b/src/org/antlr/tool/TreeToNFAConverterTokenTypes.java
new file mode 100644
index 0000000..893f235
--- /dev/null
+++ b/src/org/antlr/tool/TreeToNFAConverterTokenTypes.java
@@ -0,0 +1,131 @@
+// $ANTLR 2.7.7 (2006-01-29): "buildnfa.g" -> "TreeToNFAConverter.java"$
+
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+
+public interface TreeToNFAConverterTokenTypes {
+	int EOF = 1;
+	int NULL_TREE_LOOKAHEAD = 3;
+	int OPTIONS = 4;
+	int TOKENS = 5;
+	int PARSER = 6;
+	int LEXER = 7;
+	int RULE = 8;
+	int BLOCK = 9;
+	int OPTIONAL = 10;
+	int CLOSURE = 11;
+	int POSITIVE_CLOSURE = 12;
+	int SYNPRED = 13;
+	int RANGE = 14;
+	int CHAR_RANGE = 15;
+	int EPSILON = 16;
+	int ALT = 17;
+	int EOR = 18;
+	int EOB = 19;
+	int EOA = 20;
+	int ID = 21;
+	int ARG = 22;
+	int ARGLIST = 23;
+	int RET = 24;
+	int LEXER_GRAMMAR = 25;
+	int PARSER_GRAMMAR = 26;
+	int TREE_GRAMMAR = 27;
+	int COMBINED_GRAMMAR = 28;
+	int INITACTION = 29;
+	int LABEL = 30;
+	int TEMPLATE = 31;
+	int SCOPE = 32;
+	int GATED_SEMPRED = 33;
+	int SYN_SEMPRED = 34;
+	int BACKTRACK_SEMPRED = 35;
+	int FRAGMENT = 36;
+	int ACTION = 37;
+	int DOC_COMMENT = 38;
+	int SEMI = 39;
+	int LITERAL_lexer = 40;
+	int LITERAL_tree = 41;
+	int LITERAL_grammar = 42;
+	int AMPERSAND = 43;
+	int COLON = 44;
+	int RCURLY = 45;
+	int ASSIGN = 46;
+	int STRING_LITERAL = 47;
+	int CHAR_LITERAL = 48;
+	int INT = 49;
+	int STAR = 50;
+	int TOKEN_REF = 51;
+	int LITERAL_protected = 52;
+	int LITERAL_public = 53;
+	int LITERAL_private = 54;
+	int BANG = 55;
+	int ARG_ACTION = 56;
+	int LITERAL_returns = 57;
+	int LITERAL_throws = 58;
+	int COMMA = 59;
+	int LPAREN = 60;
+	int OR = 61;
+	int RPAREN = 62;
+	int LITERAL_catch = 63;
+	int LITERAL_finally = 64;
+	int PLUS_ASSIGN = 65;
+	int SEMPRED = 66;
+	int IMPLIES = 67;
+	int ROOT = 68;
+	int RULE_REF = 69;
+	int NOT = 70;
+	int TREE_BEGIN = 71;
+	int QUESTION = 72;
+	int PLUS = 73;
+	int WILDCARD = 74;
+	int REWRITE = 75;
+	int DOLLAR = 76;
+	int DOUBLE_QUOTE_STRING_LITERAL = 77;
+	int DOUBLE_ANGLE_STRING_LITERAL = 78;
+	int WS = 79;
+	int COMMENT = 80;
+	int SL_COMMENT = 81;
+	int ML_COMMENT = 82;
+	int OPEN_ELEMENT_OPTION = 83;
+	int CLOSE_ELEMENT_OPTION = 84;
+	int ESC = 85;
+	int DIGIT = 86;
+	int XDIGIT = 87;
+	int NESTED_ARG_ACTION = 88;
+	int NESTED_ACTION = 89;
+	int ACTION_CHAR_LITERAL = 90;
+	int ACTION_STRING_LITERAL = 91;
+	int ACTION_ESC = 92;
+	int WS_LOOP = 93;
+	int INTERNAL_RULE_REF = 94;
+	int WS_OPT = 95;
+	int SRC = 96;
+}
diff --git a/src/org/antlr/tool/TreeToNFAConverterTokenTypes.txt b/src/org/antlr/tool/TreeToNFAConverterTokenTypes.txt
new file mode 100644
index 0000000..f799624
--- /dev/null
+++ b/src/org/antlr/tool/TreeToNFAConverterTokenTypes.txt
@@ -0,0 +1,95 @@
+// $ANTLR 2.7.7 (2006-01-29): buildnfa.g -> TreeToNFAConverterTokenTypes.txt$
+TreeToNFAConverter    // output token vocab name
+OPTIONS="options"=4
+TOKENS="tokens"=5
+PARSER="parser"=6
+LEXER=7
+RULE=8
+BLOCK=9
+OPTIONAL=10
+CLOSURE=11
+POSITIVE_CLOSURE=12
+SYNPRED=13
+RANGE=14
+CHAR_RANGE=15
+EPSILON=16
+ALT=17
+EOR=18
+EOB=19
+EOA=20
+ID=21
+ARG=22
+ARGLIST=23
+RET=24
+LEXER_GRAMMAR=25
+PARSER_GRAMMAR=26
+TREE_GRAMMAR=27
+COMBINED_GRAMMAR=28
+INITACTION=29
+LABEL=30
+TEMPLATE=31
+SCOPE="scope"=32
+GATED_SEMPRED=33
+SYN_SEMPRED=34
+BACKTRACK_SEMPRED=35
+FRAGMENT="fragment"=36
+ACTION=37
+DOC_COMMENT=38
+SEMI=39
+LITERAL_lexer="lexer"=40
+LITERAL_tree="tree"=41
+LITERAL_grammar="grammar"=42
+AMPERSAND=43
+COLON=44
+RCURLY=45
+ASSIGN=46
+STRING_LITERAL=47
+CHAR_LITERAL=48
+INT=49
+STAR=50
+TOKEN_REF=51
+LITERAL_protected="protected"=52
+LITERAL_public="public"=53
+LITERAL_private="private"=54
+BANG=55
+ARG_ACTION=56
+LITERAL_returns="returns"=57
+LITERAL_throws="throws"=58
+COMMA=59
+LPAREN=60
+OR=61
+RPAREN=62
+LITERAL_catch="catch"=63
+LITERAL_finally="finally"=64
+PLUS_ASSIGN=65
+SEMPRED=66
+IMPLIES=67
+ROOT=68
+RULE_REF=69
+NOT=70
+TREE_BEGIN=71
+QUESTION=72
+PLUS=73
+WILDCARD=74
+REWRITE=75
+DOLLAR=76
+DOUBLE_QUOTE_STRING_LITERAL=77
+DOUBLE_ANGLE_STRING_LITERAL=78
+WS=79
+COMMENT=80
+SL_COMMENT=81
+ML_COMMENT=82
+OPEN_ELEMENT_OPTION=83
+CLOSE_ELEMENT_OPTION=84
+ESC=85
+DIGIT=86
+XDIGIT=87
+NESTED_ARG_ACTION=88
+NESTED_ACTION=89
+ACTION_CHAR_LITERAL=90
+ACTION_STRING_LITERAL=91
+ACTION_ESC=92
+WS_LOOP=93
+INTERNAL_RULE_REF=94
+WS_OPT=95
+SRC=96
diff --git a/src/org/antlr/tool/antlr.g b/src/org/antlr/tool/antlr.g
new file mode 100644
index 0000000..7180759
--- /dev/null
+++ b/src/org/antlr/tool/antlr.g
@@ -0,0 +1,1222 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import java.io.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import antlr.*;
+}
+
+/** Read in an ANTLR grammar and build an AST.  Try not to do
+ *  any actions, just build the tree.
+ *
+ *  The phases are:
+ *
+ *		antlr.g (this file)
+ *		assign.types.g
+ *		define.g
+ *		buildnfa.g
+ *		antlr.print.g (optional)
+ *		codegen.g
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  2005
+ */
+class ANTLRParser extends Parser;
+options {
+    buildAST = true;
+	exportVocab=ANTLR;
+    ASTLabelType="GrammarAST";
+	k=2;
+}
+
+tokens {
+	OPTIONS="options";
+	TOKENS="tokens";
+	PARSER="parser";
+	
+    LEXER;
+    RULE;
+    BLOCK;
+    OPTIONAL;
+    CLOSURE;
+    POSITIVE_CLOSURE;
+    SYNPRED;
+    RANGE;
+    CHAR_RANGE;
+    EPSILON;
+    ALT;
+    EOR;
+    EOB;
+    EOA; // end of alt
+    ID;
+    ARG;
+    ARGLIST;
+    RET;
+    LEXER_GRAMMAR;
+    PARSER_GRAMMAR;
+    TREE_GRAMMAR;
+    COMBINED_GRAMMAR;
+    INITACTION;
+    LABEL; // $x used in rewrite rules
+    TEMPLATE;
+    SCOPE="scope";
+    GATED_SEMPRED; // {p}? =>
+    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
+    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
+    FRAGMENT="fragment";
+}
+
+{
+	Grammar grammar = null;
+	protected int gtype = 0;
+	protected String currentRuleName = null;
+	protected GrammarAST currentBlockAST = null;
+
+	/* this next stuff supports construction of the Tokens artificial rule.
+	   I hate having some partial functionality here, I like doing everything
+	   in future tree passes, but the Tokens rule is sensitive to filter mode.
+	   And if it adds syn preds, future tree passes will need to process the
+	   fragments defined in Tokens; a cyclic dependency.
+	   As of 1-17-06 then, Tokens is created for lexer grammars in the
+	   antlr grammar parser itself.
+
+	   This grammar is also sensitive to the backtrack grammar option that
+	   tells ANTLR to automatically backtrack when it can't compute a DFA.
+
+	   7-2-06 I moved all option processing to antlr.g from define.g as I
+	   need backtrack option etc... for blocks.  Got messy.
+	*/
+	protected List lexerRuleNames = new ArrayList();
+	public List getLexerRuleNames() { return lexerRuleNames; }
+
+	protected GrammarAST setToBlockWithSet(GrammarAST b) {
+		GrammarAST alt = #(#[ALT,"ALT"],#b,#[EOA,"<end-of-alt>"]);
+		prefixWithSynPred(alt);
+		return #(#[BLOCK,"BLOCK"],
+		           alt,
+		           #[EOB,"<end-of-block>"]
+		        );
+	}
+
+	/** Create a copy of the alt and make it into a BLOCK; all actions,
+	 *  labels, tree operators, rewrites are removed.
+	 */
+	protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {
+		//GrammarAST nalt = (GrammarAST)astFactory.dupTree(alt);
+		GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);
+		GrammarAST blk = #(#[BLOCK,"BLOCK"],
+						   nalt,
+						   #[EOB,"<end-of-block>"]
+						  );
+		return blk;
+	}
+
+	/** Rewrite alt to have a synpred as first element;
+	 *  (xxx)=>xxx
+	 *  but only if they didn't specify one manually.
+	 */
+	protected void prefixWithSynPred(GrammarAST alt) {
+		// if they want backtracking and it's not a lexer rule in combined grammar
+		String autoBacktrack = (String)currentBlockAST.getOption("backtrack");
+		if ( autoBacktrack==null ) {
+			autoBacktrack = (String)grammar.getOption("backtrack");
+		}
+		if ( autoBacktrack!=null&&autoBacktrack.equals("true") &&
+			 !(gtype==COMBINED_GRAMMAR &&
+			 Character.isUpperCase(currentRuleName.charAt(0))) &&
+			 alt.getFirstChild().getType()!=SYN_SEMPRED )
+		{
+			// duplicate alt and make a synpred block around that dup'd alt
+			GrammarAST synpredBlockAST = createBlockFromDupAlt(alt);
+
+			// Create a BACKTRACK_SEMPRED node as if user had typed this in
+			// Effectively we replace (xxx)=>xxx with {synpredxxx}? xxx
+			GrammarAST synpredAST = createSynSemPredFromBlock(synpredBlockAST,
+															  BACKTRACK_SEMPRED);
+
+			// insert BACKTRACK_SEMPRED as first element of alt
+			synpredAST.getLastSibling().setNextSibling(alt.getFirstChild());
+			alt.setFirstChild(synpredAST);
+		}
+	}
+
+	protected GrammarAST createSynSemPredFromBlock(GrammarAST synpredBlockAST,
+												   int synpredTokenType)
+	{
+		// add grammar fragment to a list so we can make fake rules for them
+		// later.
+		String predName = grammar.defineSyntacticPredicate(synpredBlockAST,currentRuleName);
+		// convert (alpha)=> into {synpredN}? where N is some pred count
+		// during code gen we convert to function call with templates
+		String synpredinvoke = predName;
+		GrammarAST p = #[synpredTokenType,synpredinvoke];
+		p.setEnclosingRule(currentRuleName);
+		// track how many decisions have synpreds
+		grammar.blocksWithSynPreds.add(currentBlockAST);
+		return p;
+	}
+
+	public GrammarAST createSimpleRuleAST(String name,
+										  GrammarAST block,
+										  boolean fragment)
+   {
+   		GrammarAST modifier = null;
+   		if ( fragment ) {
+   			modifier = #[FRAGMENT,"fragment"];
+   		}
+   		GrammarAST EORAST = #[EOR,"<end-of-rule>"];
+   		GrammarAST EOBAST = block.getLastChild();
+		EORAST.setLine(EOBAST.getLine());
+		EORAST.setColumn(EOBAST.getColumn());
+		GrammarAST ruleAST =
+		   #([RULE,"rule"],
+                 [ID,name],modifier,[ARG,"ARG"],[RET,"RET"],
+				 [SCOPE,"scope"],block,EORAST);
+		ruleAST.setLine(block.getLine());
+		ruleAST.setColumn(block.getColumn());
+		return ruleAST;
+	}
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		try {
+			token = LT(1);
+		}
+		catch (TokenStreamException tse) {
+			ErrorManager.internalError("can't get token???", tse);
+		}
+		ErrorManager.syntaxError(
+			ErrorManager.MSG_SYNTAX_ERROR,
+			grammar,
+			token,
+			"antlr: "+ex.toString(),
+			ex);
+    }
+
+    public void cleanup(GrammarAST root) {
+		if ( gtype==LEXER_GRAMMAR ) {
+			String filter = (String)grammar.getOption("filter");
+			GrammarAST tokensRuleAST =
+			    grammar.addArtificialMatchTokensRule(
+			    	root,
+			    	lexerRuleNames,
+			    	filter!=null&&filter.equals("true"));
+		}
+    }
+}
+
+grammar![Grammar g]
+{
+	this.grammar = g;
+	GrammarAST opt=null;
+	Token optionsStartToken = null;
+	Map opts;
+}
+   :    //hdr:headerSpec
+        ( ACTION )?
+	    ( cmt:DOC_COMMENT  )?
+        gr:grammarType gid:id SEMI
+			( {optionsStartToken=LT(1);}
+			  opts=optionsSpec {grammar.setOptions(opts, optionsStartToken);}
+			  {opt=(GrammarAST)returnAST;}
+			)?
+		    (ts:tokensSpec!)?
+        	scopes:attrScopes
+		    (a:actions)?
+	        r:rules
+        EOF
+        {
+        #grammar = #(null, #(#gr, #gid, #cmt, opt, #ts, #scopes, #a, #r));
+        cleanup(#grammar);
+        }
+	;
+
+grammarType
+    :   (	"lexer"!  {gtype=LEXER_GRAMMAR;}    // pure lexer
+    	|   "parser"! {gtype=PARSER_GRAMMAR;}   // pure parser
+    	|   "tree"!   {gtype=TREE_GRAMMAR;}     // a tree parser
+    	|			  {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
+    	)
+    	gr:"grammar" {#gr.setType(gtype);}
+    ;
+
+actions
+	:	(action)+
+	;
+
+/** Match stuff like @parser::members {int i;} */
+action
+	:	AMPERSAND^ (actionScopeName COLON! COLON!)? id ACTION
+	;
+
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+actionScopeName
+	:	id
+	|	l:"lexer"	{#l.setType(ID);}
+    |   p:"parser"	{#p.setType(ID);}
+	;
+
+/*
+optionsSpec returns [Map opts=new HashMap()]
+    :   #( OPTIONS (option[opts])+ )
+    ;
+
+option[Map opts]
+{
+    String key=null;
+    Object value=null;
+}
+    :   #( ASSIGN id:ID {key=#id.getText();} value=optionValue )
+        {opts.put(key,value);}
+    ;
+*/
+
+optionsSpec returns [Map opts=new HashMap()]
+	:	OPTIONS^ (option[opts] SEMI!)+ RCURLY!
+	;
+
+option[Map opts]
+{
+    Object value=null;
+}
+    :   o:id ASSIGN^ value=optionValue
+    	{
+    	opts.put(#o.getText(), value);
+    	}
+    	/*
+    	{
+    	if ( #o.getText().equals("filter") && #v.getText().equals("true") ) {
+    		isFilterMode = true;
+    	}
+    	else if ( #o.getText().equals("backtrack") && #v.getText().equals("true") ) {
+    		if ( currentRuleName==null ) { // must grammar level
+    			isAutoBacktrackMode = true;
+    		}
+    		else {
+    			blockAutoBacktrackMode = true;
+    		}
+    	}
+    	}
+    	*/
+    ;
+
+optionValue returns [Object value=null]
+    :   x:id			 {value = #x.getText();}
+    |   s:STRING_LITERAL {String vs = #s.getText();
+                          value=vs.substring(1,vs.length()-1);}
+    |   c:CHAR_LITERAL   {String vs = #c.getText();
+                          value=vs.substring(1,vs.length()-1);}
+    |   i:INT            {value = new Integer(#i.getText());}
+    |	ss:STAR			 {#ss.setType(STRING_LITERAL); value = "*";} // used for k=*
+//  |   cs:charSet       {value = #cs;} // return set AST in this case
+    ;
+
+/*
+optionValue
+	:	id
+	|   STRING_LITERAL
+	|	CHAR_LITERAL
+	|	INT
+//	|   cs:charSet       {value = #cs;} // return set AST in this case
+	;
+*/
+
+tokensSpec
+	:	TOKENS^
+			( tokenSpec	)+
+		RCURLY!
+	;
+
+tokenSpec
+	:	TOKEN_REF ( ASSIGN^ (STRING_LITERAL|CHAR_LITERAL) )? SEMI!
+	;
+
+attrScopes
+	:	(attrScope)*
+	;
+
+attrScope
+	:	"scope"^ id ACTION
+	;
+
+rules
+    :   (
+			options {
+				// limitation of appox LL(k) says ambig upon
+				// DOC_COMMENT TOKEN_REF, but that's an impossible sequence
+				warnWhenFollowAmbig=false;
+			}
+		:	//{g.type==PARSER}? (aliasLexerRule)=>aliasLexerRule |
+			rule
+		)+
+    ;
+
+rule!
+{
+GrammarAST modifier=null, blk=null, blkRoot=null, eob=null;
+int start = ((TokenWithIndex)LT(1)).getIndex();
+int startLine = LT(1).getLine();
+GrammarAST opt = null;
+Map opts = null;
+}
+	:
+	(	d:DOC_COMMENT	
+	)?
+	(	p1:"protected"	{modifier=#p1;}
+	|	p2:"public"		{modifier=#p2;}
+	|	p3:"private"    {modifier=#p3;}
+	|	p4:"fragment"	{modifier=#p4;}
+	)?
+	ruleName:id
+	{currentRuleName=#ruleName.getText();
+     if ( gtype==LEXER_GRAMMAR && #p4==null ) {
+         lexerRuleNames.add(currentRuleName);
+	 }
+	}
+	( BANG )?
+	( aa:ARG_ACTION )?
+	( "returns" rt:ARG_ACTION  )?
+	( throwsSpec )?
+    ( opts=optionsSpec {opt=(GrammarAST)returnAST;} )?
+	scopes:ruleScopeSpec
+	(a:ruleActions)?
+	colon:COLON
+	{
+	blkRoot = #[BLOCK,"BLOCK"];
+	blkRoot.options = opts;
+	blkRoot.setLine(colon.getLine());
+	blkRoot.setColumn(colon.getColumn());
+	eob = #[EOB,"<end-of-block>"];
+    }
+    /*
+	(	{!currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME)}?
+		(setNoParens SEMI) => s:setNoParens // try to collapse sets
+		{
+		blk = #(blkRoot,#(#[ALT,"ALT"],#s,#[EOA,"<end-of-alt>"]),eob);
+		}
+	|	b:altList[opts] {blk = #b;}
+	)
+	*/
+	b:altList[opts] {blk = #b;}
+	semi:SEMI
+	( ex:exceptionGroup )?
+    {
+    int stop = ((TokenWithIndex)LT(1)).getIndex()-1; // point at the semi or exception thingie
+	eob.setLine(semi.getLine());
+	eob.setColumn(semi.getColumn());
+    GrammarAST eor = #[EOR,"<end-of-rule>"];
+   	eor.setEnclosingRule(#ruleName.getText());
+	eor.setLine(semi.getLine());
+	eor.setColumn(semi.getColumn());
+	GrammarAST root = #[RULE,"rule"];
+	root.ruleStartTokenIndex = start;
+	root.ruleStopTokenIndex = stop;
+	root.setLine(startLine);
+	root.options = opts;
+    #rule = #(root,
+              #ruleName,modifier,#(#[ARG,"ARG"],#aa),#(#[RET,"RET"],#rt),
+              opt,#scopes,#a,blk,ex,eor);
+	currentRuleName=null;
+    }
+	;
+
+ruleActions
+	:	(ruleAction)+
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	AMPERSAND^ id ACTION
+	;
+
+throwsSpec
+	:	"throws" id ( COMMA id )*
+		
+	;
+
+ruleScopeSpec
+{
+int line = LT(1).getLine();
+int column = LT(1).getColumn();
+}
+	:!	( options {warnWhenFollowAmbig=false;} : "scope" a:ACTION )?
+		( "scope" ids:idList SEMI! )*
+		{
+		GrammarAST scopeRoot = (GrammarAST)#[SCOPE,"scope"];
+		scopeRoot.setLine(line);
+		scopeRoot.setColumn(column);
+		#ruleScopeSpec = #(scopeRoot, #a, #ids);
+		}
+	;
+
+/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
+block
+{
+GrammarAST save = currentBlockAST;
+Map opts=null;
+}
+    :   /*
+        (set) => s:set  // special block like ('a'|'b'|'0'..'9')
+
+    |	*/
+
+    	lp:LPAREN^ {#lp.setType(BLOCK); #lp.setText("BLOCK");}
+		(
+			// 2nd alt and optional branch ambig due to
+			// linear approx LL(2) issue.  COLON ACTION
+			// matched correctly in 2nd alt.
+			options {
+				warnWhenFollowAmbig = false;
+			}
+		:
+            (opts=optionsSpec {#block.setOptions(grammar,opts);})?
+            ( ruleActions )?
+            COLON!
+		|	ACTION COLON!
+		)?
+
+		{currentBlockAST = #lp;}
+
+		a1:alternative rewrite
+		{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a1);}
+		( OR! a2:alternative rewrite
+		  {if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a2);}
+		)*
+
+        rp:RPAREN!
+        {
+		currentBlockAST = save;
+        GrammarAST eob = #[EOB,"<end-of-block>"];
+        eob.setLine(rp.getLine());
+        eob.setColumn(rp.getColumn());
+        #block.addChild(eob);
+        }
+    ;
+
+altList[Map opts]
+{
+	GrammarAST blkRoot = #[BLOCK,"BLOCK"];
+	blkRoot.options = opts;
+	blkRoot.setLine(LT(0).getLine()); // set to : or (
+	blkRoot.setColumn(LT(0).getColumn());
+	GrammarAST save = currentBlockAST;
+	currentBlockAST = #blkRoot;
+}
+    :   a1:alternative rewrite
+		{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a1);}
+    	( OR! a2:alternative rewrite
+    	  {if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a2);} )*
+        {
+        #altList = #(blkRoot,#altList,#[EOB,"<end-of-block>"]);
+        currentBlockAST = save;
+        }
+    ;
+
+alternative
+{
+    GrammarAST eoa = #[EOA, "<end-of-alt>"];
+    GrammarAST altRoot = #[ALT,"ALT"];
+    altRoot.setLine(LT(1).getLine());
+    altRoot.setColumn(LT(1).getColumn());
+}
+    :   ( el:element )+
+        {
+            if ( #alternative==null ) {
+                #alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);
+            }
+            else {
+            	// we have a real list of stuff
+               	#alternative = #(altRoot, #alternative, eoa);
+            }
+        }
+    |   {
+    	GrammarAST eps = #[EPSILON,"epsilon"];
+		eps.setLine(LT(0).getLine()); // get line/col of '|' or ':' (prev token)
+		eps.setColumn(LT(0).getColumn());
+    	#alternative = #(altRoot,eps,eoa);
+    	}
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ ( finallyClause )?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    "catch"^ ARG_ACTION ACTION
+    ;
+
+finallyClause
+    :    "finally"^ ACTION
+    ;
+
+element
+	:	elementNoOptionSpec
+	;
+
+elementNoOptionSpec
+{
+    IntSet elements=null;
+    GrammarAST sub, sub2;
+}
+	:	id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
+        ( sub=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub;} )?
+    |   atom
+        ( sub2=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub2;} )?
+    |	ebnf
+	|   ACTION
+	|   p:SEMPRED ( IMPLIES! {#p.setType(GATED_SEMPRED);} )?
+		{
+		#p.setEnclosingRule(currentRuleName);
+		grammar.blocksWithSemPreds.add(currentBlockAST);
+		}
+	|   t3:tree
+	;
+
+atom:   range (ROOT^|BANG^)?
+    |   terminal
+    |	notSet (ROOT^|BANG^)?
+    |   rr:RULE_REF^
+		( ARG_ACTION )?
+		(ROOT^|BANG^)?
+    ;
+
+notSet
+{
+    int line = LT(1).getLine();
+    int col = LT(1).getColumn();
+    GrammarAST subrule=null;
+}
+	:	n:NOT^
+		(	notTerminal
+        |   block
+		)
+        {#notSet.setLine(line); #notSet.setColumn(col);}
+	;
+
+tree :
+	TREE_BEGIN^
+        element ( element )+
+    RPAREN!
+	;
+
+/** matches ENBF blocks (and sets via block rule) */
+ebnf!
+{
+    int line = LT(1).getLine();
+    int col = LT(1).getColumn();
+}
+	:	b:block
+		(	QUESTION    {#ebnf=#([OPTIONAL,"?"],#b);}
+		|	STAR	    {#ebnf=#([CLOSURE,"*"],#b);}
+		|	PLUS	    {#ebnf=#([POSITIVE_CLOSURE,"+"],#b);}
+		|   IMPLIES! // syntactic predicate
+			{
+			if ( gtype==COMBINED_GRAMMAR &&
+			     Character.isUpperCase(currentRuleName.charAt(0)) )
+		    {
+                // ignore for lexer rules in combined
+		    	#ebnf = #(#[SYNPRED,"=>"],#b); 
+		    }
+		    else {
+		    	// create manually specified (...)=> predicate;
+                // convert to sempred
+		    	#ebnf = createSynSemPredFromBlock(#b, SYN_SEMPRED);
+			}
+			}
+		|   ROOT {#ebnf = #(#ROOT, #b);}
+		|   BANG {#ebnf = #(#BANG, #b);}
+        |   {#ebnf = #b;}
+		)
+		{#ebnf.setLine(line); #ebnf.setColumn(col);}
+	;
+
+range!
+{
+GrammarAST subrule=null, root=null;
+}
+	:	c1:CHAR_LITERAL RANGE c2:CHAR_LITERAL
+		{
+		GrammarAST r = #[CHAR_RANGE,".."];
+		r.setLine(c1.getLine());
+		r.setColumn(c1.getColumn());
+		#range = #(r, #c1, #c2);
+		root = #range;
+		}
+//    	(subrule=ebnfSuffix[root,false] {#range=subrule;})?
+	;
+
+terminal
+{
+GrammarAST ebnfRoot=null, subrule=null;
+}
+    :   cl:CHAR_LITERAL^ (ROOT^|BANG^)?
+
+	|   tr:TOKEN_REF^
+			( ARG_ACTION )? // Args are only valid for lexer rules
+            (ROOT^|BANG^)?
+
+	|   sl:STRING_LITERAL (ROOT^|BANG^)?
+
+	|   wi:WILDCARD (ROOT^|BANG^)?
+	;
+
+ebnfSuffix[GrammarAST elemAST, boolean inRewrite] returns [GrammarAST subrule=null]
+{
+GrammarAST ebnfRoot=null;
+}
+	:!	(	QUESTION {ebnfRoot = #[OPTIONAL,"?"];}
+   		|	STAR     {ebnfRoot = #[CLOSURE,"*"];}
+   		|	PLUS     {ebnfRoot = #[POSITIVE_CLOSURE,"+"];}
+   		)
+    	{
+		GrammarAST save = currentBlockAST;
+       	ebnfRoot.setLine(elemAST.getLine());
+       	ebnfRoot.setColumn(elemAST.getColumn());
+    	GrammarAST blkRoot = #[BLOCK,"BLOCK"];
+    	currentBlockAST = blkRoot;
+       	GrammarAST eob = #[EOB,"<end-of-block>"];
+		eob.setLine(elemAST.getLine());
+		eob.setColumn(elemAST.getColumn());
+		GrammarAST alt = #(#[ALT,"ALT"],elemAST,#[EOA,"<end-of-alt>"]);
+    	if ( !inRewrite ) {
+    		prefixWithSynPred(alt);
+    	}
+  		subrule =
+  		     #(ebnfRoot,
+  		       #(blkRoot,alt,eob)
+  		      );
+  		currentBlockAST = save;
+   		}
+    ;
+
+notTerminal
+	:   cl:CHAR_LITERAL
+	|	tr:TOKEN_REF
+	|	STRING_LITERAL
+	;
+
+idList
+	:	(id)+
+	;
+
+id	:	TOKEN_REF {#id.setType(ID);}
+	|	RULE_REF  {#id.setType(ID);}
+	;
+
+/** Match anything that looks like an ID and return tree as token type ID */
+idToken
+    :	TOKEN_REF {#idToken.setType(ID);}
+	|	RULE_REF  {#idToken.setType(ID);}
+	;
+
+// R E W R I T E  S Y N T A X
+
+rewrite
+{
+    GrammarAST root = new GrammarAST();
+}
+	:!
+		( options { warnWhenFollowAmbig=false;}
+		: rew:REWRITE pred:SEMPRED alt:rewrite_alternative
+	      {root.addChild( #(#rew, #pred, #alt) );}
+		  {
+          #pred.setEnclosingRule(currentRuleName);
+          #rew.setEnclosingRule(currentRuleName);
+          }
+	    )*
+		rew2:REWRITE alt2:rewrite_alternative
+        {
+        root.addChild( #(#rew2, #alt2) );
+        #rewrite = (GrammarAST)root.getFirstChild();
+        }
+	|
+	;
+
+rewrite_block
+    :   lp:LPAREN^ {#lp.setType(BLOCK); #lp.setText("BLOCK");}
+		rewrite_alternative
+        RPAREN!
+        {
+        GrammarAST eob = #[EOB,"<end-of-block>"];
+        eob.setLine(lp.getLine());
+        eob.setColumn(lp.getColumn());
+        #rewrite_block.addChild(eob);
+        }
+    ;
+
+rewrite_alternative
+{
+    GrammarAST eoa = #[EOA, "<end-of-alt>"];
+    GrammarAST altRoot = #[ALT,"ALT"];
+    altRoot.setLine(LT(1).getLine());
+    altRoot.setColumn(LT(1).getColumn());
+}
+    :	{grammar.buildTemplate()}? rewrite_template
+
+    |	{grammar.buildAST()}? ( rewrite_element )+
+        {
+            if ( #rewrite_alternative==null ) {
+                #rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);
+            }
+            else {
+                #rewrite_alternative = #(altRoot, #rewrite_alternative,eoa);
+            }
+        }
+
+   	|   {#rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);}
+    ;
+
+rewrite_element
+{
+GrammarAST subrule=null;
+}
+	:	t:rewrite_atom
+    	( subrule=ebnfSuffix[#t,true] {#rewrite_element=subrule;} )?
+	|   rewrite_ebnf
+	|   tr:rewrite_tree
+    	( subrule=ebnfSuffix[#tr,true] {#rewrite_element=subrule;} )?
+	;
+
+rewrite_atom
+{
+GrammarAST subrule=null;
+}
+    :   cl:CHAR_LITERAL
+	|   tr:TOKEN_REF^ (ARG_ACTION)? // for imaginary nodes
+    |   rr:RULE_REF
+	|   sl:STRING_LITERAL
+	|!  d:DOLLAR i:id // reference to a label in a rewrite rule
+		{
+		#rewrite_atom = #[LABEL,i_AST.getText()];
+		#rewrite_atom.setLine(#d.getLine());
+		#rewrite_atom.setColumn(#d.getColumn());
+        #rewrite_atom.setEnclosingRule(currentRuleName);
+		}
+	|	ACTION
+	;
+
+rewrite_ebnf!
+{
+    int line = LT(1).getLine();
+    int col = LT(1).getColumn();
+}
+	:	b:rewrite_block
+		(	QUESTION    {#rewrite_ebnf=#([OPTIONAL,"?"],#b);}
+		|	STAR	    {#rewrite_ebnf=#([CLOSURE,"*"],#b);}
+		|	PLUS	    {#rewrite_ebnf=#([POSITIVE_CLOSURE,"+"],#b);}
+		)
+		{#rewrite_ebnf.setLine(line); #rewrite_ebnf.setColumn(col);}
+	;
+
+rewrite_tree :
+	TREE_BEGIN^
+        rewrite_atom ( rewrite_element )*
+    RPAREN!
+	;
+
+/** Build a tree for a template rewrite:
+      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+    where ARGLIST is always there even if no args exist.
+    ID can be "template" keyword.  If first child is ACTION then it's
+    an indirect template ref
+
+    -> foo(a={...}, b={...})
+    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
+    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+rewrite_template
+{Token st=null;}
+	:   // -> template(a={...},...) "..."
+		{LT(1).getText().equals("template")}? // inline
+		rewrite_template_head {st=LT(1);}
+		( DOUBLE_QUOTE_STRING_LITERAL! | DOUBLE_ANGLE_STRING_LITERAL! )
+		{#rewrite_template.addChild(#[st]);}
+
+	|	// -> foo(a={...}, ...)
+		rewrite_template_head
+
+	|	// -> ({expr})(a={...}, ...)
+		rewrite_indirect_template_head
+
+	|	// -> {...}
+		ACTION
+	;
+
+/** -> foo(a={...}, ...) */
+rewrite_template_head
+	:	id lp:LPAREN^ {#lp.setType(TEMPLATE); #lp.setText("TEMPLATE");}
+		rewrite_template_args
+		RPAREN!
+	;
+
+/** -> ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	lp:LPAREN^ {#lp.setType(TEMPLATE); #lp.setText("TEMPLATE");}
+		ACTION
+		RPAREN!
+		LPAREN! rewrite_template_args RPAREN!
+	;
+
+rewrite_template_args
+	:	rewrite_template_arg (COMMA! rewrite_template_arg)*
+		{#rewrite_template_args = #(#[ARGLIST,"ARGLIST"], rewrite_template_args);}
+	|	{#rewrite_template_args = #[ARGLIST,"ARGLIST"];}
+	;
+
+rewrite_template_arg
+	:   id a:ASSIGN^ {#a.setType(ARG); #a.setText("ARG");} ACTION
+	;
+
+class ANTLRLexer extends Lexer;
+options {
+	k=2;
+	exportVocab=ANTLR;
+	testLiterals=false;
+	interactive=true;
+	charVocabulary='\003'..'\377';
+}
+
+{
+    /** advance the current column number by one; don't do tabs.
+     *  we want char position in line to be sent to AntlrWorks.
+     */
+    public void tab() {
+		setColumn( getColumn()+1 );
+    }
+}
+
+WS	:	(	' '
+		|	'\t'
+		|	('\r')? '\n' {newline();}
+		)
+	;
+
+COMMENT :
+	( SL_COMMENT | t:ML_COMMENT {$setType(t.getType());} )
+	;
+
+protected
+SL_COMMENT
+ 	:	"//"
+ 	 	(	(" $ANTLR")=> " $ANTLR " SRC ('\r')? '\n' // src directive
+ 		|	( options {greedy=false;} : . )* ('\r')? '\n'
+		)
+		{ newline(); }
+	;
+
+protected
+ML_COMMENT :
+	"/*"
+	(	{ LA(2)!='/' }? '*' {$setType(DOC_COMMENT);}
+	|
+	)
+	(
+		options {
+			greedy=false;  // make it exit upon "*/"
+		}
+	:	'\r' '\n'	{newline();}
+	|	'\n'		{newline();}
+	|	~('\n'|'\r')
+	)*
+	"*/"
+	;
+
+OPEN_ELEMENT_OPTION
+	:	'<'
+	;
+
+CLOSE_ELEMENT_OPTION
+	:	'>'
+	;
+
+AMPERSAND : '@';
+
+COMMA : ',';
+
+QUESTION :	'?' ;
+
+TREE_BEGIN : "^(" ;
+
+LPAREN:	'(' ;
+
+RPAREN:	')' ;
+
+COLON :	':' ;
+
+STAR:	'*' ;
+
+PLUS:	'+' ;
+
+ASSIGN : '=' ;
+
+PLUS_ASSIGN : "+=" ;
+
+IMPLIES : "=>" ;
+
+REWRITE : "->" ;
+
+SEMI:	';' ;
+
+ROOT : '^' ;
+
+BANG : '!' ;
+
+OR	:	'|' ;
+
+WILDCARD : '.' ;
+
+RANGE : ".." ;
+
+NOT :	'~' ;
+
+RCURLY:	'}'	;
+
+DOLLAR : '$' ;
+
+CHAR_LITERAL
+	:	'\'' (ESC|'\n'{newline();}|~'\'')* '\''
+		{
+		StringBuffer s = Grammar.getUnescapedStringFromGrammarStringLiteral($getText);
+		if ( s.length()>1 ) {
+			$setType(STRING_LITERAL);
+		}
+		}
+	;
+
+DOUBLE_QUOTE_STRING_LITERAL
+	:	'"' ('\\'! '"'|'\n'{newline();}|~'"')* '"'
+	;
+
+DOUBLE_ANGLE_STRING_LITERAL
+	:	"<<" (options {greedy=false;}:'\n'{newline();}|.)* ">>"
+	;
+
+protected
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	('0'..'3')
+			(
+				options {
+					warnWhenFollowAmbig = false;
+				}
+			:
+			('0'..'9')
+				(
+					options {
+						warnWhenFollowAmbig = false;
+					}
+				:
+				'0'..'9'
+				)?
+			)?
+		|	('4'..'7')
+			(
+				options {
+					warnWhenFollowAmbig = false;
+				}
+			:
+			('0'..'9')
+			)?
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+
+protected
+DIGIT
+	:	'0'..'9'
+	;
+
+protected
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+INT	:	('0'..'9')+
+	;
+
+ARG_ACTION
+   :
+	NESTED_ARG_ACTION
+	;
+
+protected
+NESTED_ARG_ACTION :
+	'['!
+	(
+		NESTED_ARG_ACTION
+	|	'\r' '\n'	{newline();}
+	|	'\n'		{newline();}
+	|	ACTION_STRING_LITERAL
+	|	~']'
+	)*
+	']'!
+	;
+
+ACTION
+{int actionLine=getLine(); int actionColumn = getColumn(); }
+	:	NESTED_ACTION
+		(	'?'! {_ttype = SEMPRED;} )?
+		{
+			Token t = makeToken(_ttype);
+			String action = $getText;
+			action = action.substring(1,action.length()-1);
+			t.setText(action);
+			t.setLine(actionLine);			// set action line to start
+			t.setColumn(actionColumn);
+			$setToken(t);
+		}
+	;
+
+protected
+NESTED_ACTION :
+	'{'
+	(
+		options {
+			greedy = false; // exit upon '}'
+		}
+	:
+		(
+			'\r' '\n'	{newline();}
+		|	'\n'		{newline();}
+		)
+	|	NESTED_ACTION
+	|	ACTION_CHAR_LITERAL
+	|	COMMENT
+	|	ACTION_STRING_LITERAL
+	|	.
+	)*
+	'}'
+   ;
+
+protected
+ACTION_CHAR_LITERAL
+	:	'\'' (ACTION_ESC|'\n'{newline();}|~'\'')* '\''
+	;
+
+protected
+ACTION_STRING_LITERAL
+	:	'"' (ACTION_ESC|'\n'{newline();}|~'"')* '"'
+	;
+
+protected
+ACTION_ESC
+	:	"\\'"
+	|	"\\\""
+	|	'\\' ~('\''|'"')
+	;
+
+TOKEN_REF
+options { testLiterals = true; }
+	:	'A'..'Z'
+		(	// scarf as many letters/numbers as you can
+			options {
+				warnWhenFollowAmbig=false;
+			}
+		:
+			'a'..'z'|'A'..'Z'|'_'|'0'..'9'
+		)*
+	;
+
+// we get a warning here when looking for options '{', but it works right
+RULE_REF
+{
+	int t=0;
+}
+	:	t=INTERNAL_RULE_REF {_ttype=t;}
+		(	{t==OPTIONS}? WS_LOOP ('{' {_ttype = OPTIONS;})?
+		|	{t==TOKENS}? WS_LOOP ('{' {_ttype = TOKENS;})?
+		|
+		)
+	;
+
+protected
+WS_LOOP
+	:	(	// grab as much WS as you can
+			options {
+				greedy=true;
+			}
+		:
+			WS
+		|	COMMENT
+		)*
+	;
+
+protected
+INTERNAL_RULE_REF returns [int t]
+{
+	t = RULE_REF;
+}
+	:	'a'..'z'
+		(	// scarf as many letters/numbers as you can
+			options {
+				warnWhenFollowAmbig=false;
+			}
+		:
+			'a'..'z'|'A'..'Z'|'_'|'0'..'9'
+		)*
+		{t = testLiteralsTable(t);}
+	;
+
+protected
+WS_OPT
+	:	(WS)?
+	;
+
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+protected
+SRC	:	"src" ' ' file:ACTION_STRING_LITERAL ' ' line:INT
+		{
+		newline();
+		setFilename(file.getText().substring(1,file.getText().length()-1));
+		setLine(Integer.parseInt(line.getText())-1);  // -1 because SL_COMMENT will increment the line no. KR
+		$setType(Token.SKIP); // don't let this go to the parser
+		}
+	;
+
diff --git a/src/org/antlr/tool/antlr.print.g b/src/org/antlr/tool/antlr.print.g
new file mode 100644
index 0000000..db55383
--- /dev/null
+++ b/src/org/antlr/tool/antlr.print.g
@@ -0,0 +1,362 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+}
+
+/** Print out a grammar (no pretty printing).
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  August 19, 2003
+ */
+class ANTLRTreePrinter extends TreeParser;
+
+options {
+	importVocab = ANTLR;
+	ASTLabelType = "GrammarAST";
+    codeGenBitsetTestThreshold=999;
+}
+
+{
+	protected Grammar grammar;
+	protected boolean showActions;
+    protected StringBuffer buf = new StringBuffer(300);
+
+    public void out(String s) {
+        buf.append(s);
+    }
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "antlr.print: "+ex.toString(),
+            ex);
+    }
+
+	/** Normalize a grammar print out by removing all double spaces
+	 *  and trailing/beginning stuff.  FOr example, convert
+	 *
+	 *  ( A  |  B  |  C )*
+	 *
+	 *  to
+	 *
+	 *  ( A | B | C )*
+	 */
+	public static String normalize(String g) {
+	    StringTokenizer st = new StringTokenizer(g, " ", false);
+		StringBuffer buf = new StringBuffer();
+		while ( st.hasMoreTokens() ) {
+			String w = st.nextToken();
+			buf.append(w);
+			buf.append(" ");
+		}
+		return buf.toString().trim();
+	}
+}
+
+/** Call this to figure out how to print */
+toString[Grammar g, boolean showActions] returns [String s=null]
+{
+grammar = g;
+this.showActions = showActions;
+}
+    :   (   grammar
+        |   rule
+        |   alternative
+        |   element
+        |	single_rewrite
+        |   EOR {s="EOR";}
+        )
+        {return normalize(buf.toString());}
+    ;
+
+// --------------
+
+grammar
+    :   ( #( LEXER_GRAMMAR grammarSpec["lexer " ] )
+	    | #( PARSER_GRAMMAR grammarSpec["parser "] )
+	    | #( TREE_GRAMMAR grammarSpec["tree "] )
+	    | #( COMBINED_GRAMMAR grammarSpec[""] )
+	    )
+     ;
+
+attrScope
+	:	#( "scope" ID ACTION )
+	;
+
+grammarSpec[String gtype]
+	:	 id:ID {out(gtype+"grammar "+#id.getText());}
+        (cmt:DOC_COMMENT {out(#cmt.getText()+"\n");} )?
+        (optionsSpec)? {out(";\n");}
+        (tokensSpec)?
+        (attrScope)*
+        (actions)?
+        rules
+    ;
+
+actions
+	:	( action )+
+	;
+
+action
+{
+String scope=null, name=null;
+String action=null;
+}
+	:	#(AMPERSAND id1:ID
+			( id2:ID a1:ACTION
+			  {scope=#id1.getText(); name=#a1.getText(); action=#a1.getText();}
+			| a2:ACTION
+			  {scope=null; name=#id1.getText(); action=#a2.getText();}
+			)
+		 )
+		 {
+		 if ( showActions ) {
+		 	out("@"+(scope!=null?scope+"::":"")+name+action);
+		 }
+		 }
+	;
+
+optionsSpec
+    :   #( OPTIONS {out(" options {");}
+    	   (option {out("; ");})+
+    	   {out("} ");}
+    	 )
+    ;
+
+option
+    :   #( ASSIGN id:ID {out(#id.getText()+"=");} optionValue )
+    ;
+
+optionValue
+	:	id:ID            {out(#id.getText());}
+	|   s:STRING_LITERAL {out(#s.getText());}
+	|	c:CHAR_LITERAL   {out(#c.getText());}
+	|	i:INT            {out(#i.getText());}
+//	|   charSet
+	;
+
+/*
+charSet
+	:   #( CHARSET charSetElement )
+	;
+
+charSetElement
+	:   c:CHAR_LITERAL {out(#c.getText());}
+	|   #( OR c1:CHAR_LITERAL c2:CHAR_LITERAL )
+	|   #( RANGE c3:CHAR_LITERAL c4:CHAR_LITERAL )
+	;
+*/
+
+tokensSpec
+	:	#( TOKENS ( tokenSpec )+ )
+	;
+
+tokenSpec
+	:	TOKEN_REF
+	|	#( ASSIGN TOKEN_REF (STRING_LITERAL|CHAR_LITERAL) )
+	;
+
+rules
+    :   ( rule )+
+    ;
+
+rule
+    :   #( RULE id:ID
+           (modifier)?
+           {out(#id.getText());}
+           #(ARG (arg:ARG_ACTION {out("["+#arg.getText()+"]");} )? )
+           #(RET (ret:ARG_ACTION {out(" returns ["+#ret.getText()+"]");} )? )
+           (optionsSpec)?
+           (ruleScopeSpec)?
+		   (ruleAction)*
+           {out(" : ");}
+           b:block[false]
+           (exceptionGroup)?
+           EOR {out(";\n");}
+         )
+    ;
+
+ruleAction
+	:	#(AMPERSAND id:ID a:ACTION )
+		{if ( showActions ) out("@"+#id.getText()+"{"+#a.getText()+"}");}
+	;
+
+modifier
+{out(#modifier.getText()); out(" ");}
+	:	"protected"
+	|	"public"
+	|	"private"
+	|	"fragment"
+	;
+
+ruleScopeSpec
+ 	:	#( "scope" (ACTION)? ( ID )* )
+ 	;
+
+block[boolean forceParens]
+{
+int numAlts = countAltsForBlock(#block);
+}
+    :   #(  BLOCK {if ( forceParens||numAlts>1 ) out(" (");}
+            (optionsSpec {out(" : ");} )?
+            alternative rewrite ( {out(" | ");} alternative rewrite )*
+            EOB   {if ( forceParens||numAlts>1 ) out(")");}
+         )
+    ;
+
+countAltsForBlock returns [int n=0]
+    :   #( BLOCK (OPTIONS)? (ALT (REWRITE)* {n++;})+ EOB )
+	;
+
+alternative
+    :   #( ALT (element)+ EOA )
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    #("catch" ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    #("finally" ACTION)
+    ;
+
+single_rewrite
+	:	#( REWRITE {out(" ->");} (SEMPRED {out(" {"+#SEMPRED.getText()+"}?");})?
+	       ( alternative | rewrite_template | ACTION {out(" {"+#ACTION.getText()+"}");})
+	     )
+	;
+
+rewrite_template
+	:	#( TEMPLATE
+		   (id:ID {out(" "+#id.getText());}|ind:ACTION {out(" ({"+#ind.getText()+"})");})
+	       #( ARGLIST
+              {out("(");}
+ 	       	  ( #( ARG arg:ID {out(#arg.getText()+"=");}
+	               a:ACTION   {out(#a.getText());}
+	             )
+	          )*
+              {out(")");}
+	        )
+		   ( DOUBLE_QUOTE_STRING_LITERAL {out(" "+#DOUBLE_QUOTE_STRING_LITERAL.getText());}
+		   | DOUBLE_ANGLE_STRING_LITERAL {out(" "+#DOUBLE_ANGLE_STRING_LITERAL.getText());}
+		   )?
+	     )
+	;
+
+rewrite
+	:	(single_rewrite)*
+	;
+
+element
+    :   #(ROOT element)
+    |   #(BANG element)
+    |   atom
+    |   #(NOT {out("~");} element)
+    |   #(RANGE atom {out("..");} atom)
+    |   #(CHAR_RANGE atom {out("..");} atom)
+    |	#(ASSIGN id:ID {out(#id.getText()+"=");} element)
+    |	#(PLUS_ASSIGN id2:ID {out(#id2.getText()+"+=");} element)
+    |   ebnf
+    |   tree
+    |   #( SYNPRED block[true] ) {out("=>");}
+    |   a:ACTION  {if ( showActions ) {out("{"); out(a.getText()); out("}");}}
+    |   pred:SEMPRED
+    	{
+    	if ( showActions ) {out("{"); out(pred.getText()); out("}?");}
+    	else {out("{...}?");}
+    	}
+    |   spred:SYN_SEMPRED
+    	{
+    	  String name = spred.getText();
+    	  GrammarAST predAST=grammar.getSyntacticPredicate(name);
+    	  block(predAST, true);
+    	  out("=>");
+    	}
+    |   BACKTRACK_SEMPRED // don't print anything (auto backtrack stuff)
+    |   gpred:GATED_SEMPRED
+    	{
+    	if ( showActions ) {out("{"); out(gpred.getText()); out("}? =>");}
+    	else {out("{...}? =>");}
+    	}
+    |   EPSILON
+    ;
+
+ebnf:   block[true] {out(" ");}
+    |   #( OPTIONAL block[true] ) {out("? ");}
+    |   #( CLOSURE block[true] )  {out("* ");}
+    |   #( POSITIVE_CLOSURE block[true] ) {out("+ ");}
+    ;
+
+tree:   #(TREE_BEGIN {out(" ^(");} element (element)* {out(") ");} )
+    ;
+
+atom
+{out(" ");}
+    :   (	#( RULE_REF		{out(#atom.toString());}
+			   (rarg:ARG_ACTION	{out("["+#rarg.toString()+"]");})?
+			   (ast_suffix)?
+             )
+		|   #( TOKEN_REF		{out(#atom.toString());} 
+			   (targ:ARG_ACTION	{out("["+#targ.toString()+"]");} )?
+			   (ast_suffix)?
+             )
+		|   #( CHAR_LITERAL	{out(#atom.toString());}
+			   (ast_suffix)?
+             )
+		|   #( STRING_LITERAL	{out(#atom.toString());}
+			   (ast_suffix)?
+             )
+		|   #( WILDCARD		{out(#atom.toString());}
+			   (ast_suffix)?
+             )
+		)
+		{out(" ");}
+    |	LABEL {out(" $"+#LABEL.getText());} // used in -> rewrites
+    ;
+
+ast_suffix
+	:	ROOT {out("^");}
+	|	BANG  {out("!");}
+	;
diff --git a/src/org/antlr/tool/assign.types.g b/src/org/antlr/tool/assign.types.g
new file mode 100644
index 0000000..4c773d1
--- /dev/null
+++ b/src/org/antlr/tool/assign.types.g
@@ -0,0 +1,472 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.analysis.*;
+	import org.antlr.misc.*;
+	import java.io.*;
+}
+
+/** [Warning: TJP says that this is probably out of date as of 11/19/2005,
+ *   but since it's probably still useful, I'll leave in.  Don't have energy
+ *   to update at the moment.]
+ *
+ *  Compute the token types for all literals and rules etc..  There are
+ *  a few different cases to consider for grammar types and a few situations
+ *  within.
+ *
+ *  CASE 1 : pure parser grammar
+ *	a) Any reference to a token gets a token type.
+ *  b) The tokens section may alias a token name to a string or char
+ *
+ *  CASE 2 : pure lexer grammar
+ *  a) Import token vocabulary if available. Set token types for any new tokens
+ *     to values above last imported token type
+ *  b) token rule definitions get token types if not already defined
+ *  c) literals do NOT get token types
+ *
+ *  CASE 3 : merged parser / lexer grammar
+ *	a) Any char or string literal gets a token type in a parser rule
+ *  b) Any reference to a token gets a token type if not referencing
+ *     a fragment lexer rule
+ *  c) The tokens section may alias a token name to a string or char
+ *     which must add a rule to the lexer
+ *  d) token rule definitions get token types if not already defined
+ *  e) token rule definitions may also alias a token name to a literal.
+ *     E.g., Rule 'FOR : "for";' will alias FOR to "for" in the sense that
+ *     references to either in the parser grammar will yield the token type
+ *
+ *  What this pass does:
+ *
+ *  0. Collects basic info about the grammar like grammar name and type;
+ *     Oh, I have go get the options in case they affect the token types.
+ *     E.g., tokenVocab option.
+ *     Imports any token vocab name/type pairs into a local hashtable.
+ *  1. Finds a list of all literals and token names.
+ *  2. Finds a list of all token name rule definitions;
+ *     no token rules implies pure parser.
+ *  3. Finds a list of all simple token rule defs of form "<NAME> : <literal>;"
+ *     and aliases them.
+ *  4. Walks token names table and assign types to any unassigned
+ *  5. Walks aliases and assign types to referenced literals
+ *  6. Walks literals, assigning types if untyped
+ *  4. Informs the Grammar object of the type definitions such as:
+ *     g.defineToken(<charliteral>, ttype);
+ *     g.defineToken(<stringliteral>, ttype);
+ *     g.defineToken(<tokenID>, ttype);
+ *     where some of the ttype values will be the same for aliases tokens.
+ */
+class AssignTokenTypesWalker extends TreeParser;
+
+options {
+	importVocab = ANTLR;
+	ASTLabelType = "GrammarAST";
+    codeGenBitsetTestThreshold=999;
+}
+
+{
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "assign.types: "+ex.toString(),
+            ex);
+    }
+
+protected GrammarAST stringAlias;
+protected GrammarAST charAlias;
+protected GrammarAST stringAlias2;
+protected GrammarAST charAlias2;
+
+protected Grammar grammar;
+protected Map stringLiterals = new LinkedHashMap(); // Map<literal,Integer>
+protected Map tokens = new LinkedHashMap();         // Map<name,Integer>
+/** Track actual lexer rule defs so we don't get repeated token defs in 
+ *  generated lexer.
+ */
+protected Set tokenRuleDefs = new HashSet();        // Set<name>
+protected Map aliases = new LinkedHashMap();        // Map<name,literal>
+protected String currentRuleName;
+protected static final Integer UNASSIGNED = Utils.integer(-1);
+protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
+
+/** Track string literals in any non-lexer rule (could be in tokens{} section) */
+protected void trackString(GrammarAST t) {
+	// if lexer, don't allow aliasing in tokens section
+	if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
+		ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
+								  grammar,
+								  t.token,
+								  t.getText());
+		return;
+	}
+	// in a plain parser grammar rule, cannot reference literals
+	// (unless defined previously via tokenVocab option)
+	if ( grammar.type==Grammar.PARSER &&
+	     grammar.getTokenType(t.getText())==Label.INVALID )
+    {
+		ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
+								  grammar,
+								  t.token,
+								  t.getText());
+	}
+	// otherwise add literal to token types if referenced from parser rule
+	// or in the tokens{} section
+	if ( (currentRuleName==null ||
+         Character.isLowerCase(currentRuleName.charAt(0))) &&
+         grammar.getTokenType(t.getText())==Label.INVALID )
+	{
+		stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
+	}
+}
+
+protected void trackToken(GrammarAST t) {
+	// imported token names might exist, only add if new
+	if ( grammar.getTokenType(t.getText())==Label.INVALID ) {
+		tokens.put(t.getText(), UNASSIGNED);
+	}
+}
+
+protected void trackTokenRule(GrammarAST t,
+							  GrammarAST modifier,
+							  GrammarAST block)
+{
+	// imported token names might exist, only add if new
+	if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
+		if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
+			return;
+		}
+		int existing = grammar.getTokenType(t.getText());
+		if ( existing==Label.INVALID ) {
+			tokens.put(t.getText(), UNASSIGNED);
+		}
+		// look for "<TOKEN> : <literal> ;" pattern
+        // (can have optional action last)
+		if ( block.hasSameTreeStructure(charAlias) ||
+             block.hasSameTreeStructure(stringAlias) ||
+             block.hasSameTreeStructure(charAlias2) ||
+             block.hasSameTreeStructure(stringAlias2) )
+        {
+			alias(t, (GrammarAST)block.getFirstChild().getFirstChild());
+			tokenRuleDefs.add(t.getText());
+		}
+	}
+	// else error
+}
+
+protected void alias(GrammarAST t, GrammarAST s) {
+	aliases.put(t.getText(), s.getText());
+}
+
+protected void assignTypes() {
+	/*
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+	*/
+
+	assignTokenIDTypes();
+
+	aliasTokenIDsAndLiterals();
+
+	assignStringTypes();
+
+	/*
+	System.out.println("AFTER:");
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+	*/
+
+	notifyGrammarObject();
+}
+
+	protected void assignStringTypes() {
+		// walk string literals assigning types to unassigned ones
+		Set s = stringLiterals.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String lit = (String) it.next();
+			Integer oldTypeI = (Integer)stringLiterals.get(lit);
+			int oldType = oldTypeI.intValue();
+			if ( oldType<Label.MIN_TOKEN_TYPE ) {
+				Integer typeI = Utils.integer(grammar.getNewTokenType());
+				stringLiterals.put(lit, typeI);
+				// if string referenced in combined grammar parser rule,
+				// automatically define in the generated lexer
+				grammar.defineLexerRuleForStringLiteral(lit, typeI.intValue());
+			}
+		}
+	}
+
+	protected void aliasTokenIDsAndLiterals() {
+		if ( grammar.type==Grammar.LEXER ) {
+			return; // strings/chars are never token types in LEXER
+		}
+		// walk aliases if any and assign types to aliased literals if literal
+		// was referenced
+		Set s = aliases.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			String literal = (String)aliases.get(tokenID);
+			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
+				stringLiterals.put(literal, tokens.get(tokenID));
+				// an alias still means you need a lexer rule for it
+				Integer typeI = (Integer)tokens.get(tokenID);
+				if ( !tokenRuleDefs.contains(tokenID) ) {
+					grammar.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI.intValue());
+				}
+			}
+		}
+	}
+
+	protected void assignTokenIDTypes() {
+		// walk token names, assigning values if unassigned
+		Set s = tokens.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			if ( tokens.get(tokenID)==UNASSIGNED ) {
+				tokens.put(tokenID, Utils.integer(grammar.getNewTokenType()));
+			}
+		}
+	}
+
+	protected void notifyGrammarObject() {
+		Set s = tokens.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			int ttype = ((Integer)tokens.get(tokenID)).intValue();
+			grammar.defineToken(tokenID, ttype);
+		}
+		s = stringLiterals.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String lit = (String) it.next();
+			int ttype = ((Integer)stringLiterals.get(lit)).intValue();
+			grammar.defineToken(lit, ttype);
+		}
+	}
+
+	protected void init(Grammar g) {
+		this.grammar = g;
+        stringAlias = 
+            #(#[BLOCK], #(#[ALT], #[STRING_LITERAL], #[EOA]), #[EOB]);
+        charAlias =
+            #(#[BLOCK], #(#[ALT], #[CHAR_LITERAL], #[EOA]), #[EOB]);
+        stringAlias2 =
+            #(#[BLOCK], #(#[ALT], #[STRING_LITERAL], #[ACTION], #[EOA]),#[EOB]);
+        charAlias2 = 
+            #(#[BLOCK], #(#[ALT], #[CHAR_LITERAL], #[ACTION], #[EOA]), #[EOB]);
+	}
+}
+
+grammar[Grammar g]
+{
+	init(g);
+}
+    :   ( #( LEXER_GRAMMAR 	  {grammar.type = Grammar.LEXER;} 	  	grammarSpec )
+	    | #( PARSER_GRAMMAR   {grammar.type = Grammar.PARSER;}      grammarSpec )
+	    | #( TREE_GRAMMAR     {grammar.type = Grammar.TREE_PARSER;} grammarSpec )
+	    | #( COMBINED_GRAMMAR {grammar.type = Grammar.COMBINED;}    grammarSpec )
+	    )
+        {assignTypes();}
+    ;
+
+grammarSpec
+{Map opts=null;}
+	:	id:ID {grammar.setName(#id.getText());}
+		(cmt:DOC_COMMENT)?
+		(optionsSpec)?
+        (tokensSpec)?
+        (attrScope)*
+        (AMPERSAND)* // skip actions
+        rules
+	;
+
+attrScope
+	:	#( "scope" ID ACTION )
+	;
+
+optionsSpec returns [Map opts=new HashMap()]
+    :   #( OPTIONS (option[opts])+ )
+    ;
+
+option[Map opts]
+{
+    String key=null;
+    Object value=null;
+}
+    :   #( ASSIGN id:ID {key=#id.getText();} value=optionValue )
+        {
+        opts.put(key,value);
+        // check for grammar-level option to import vocabulary
+        if ( currentRuleName==null && key.equals("tokenVocab") ) {
+            grammar.importTokenVocabulary((String)value);
+        }
+        }
+    ;
+
+optionValue returns [Object value=null]
+    :   id:ID			 {value = #id.getText();}
+    |   s:STRING_LITERAL {value = #s.getText();}
+    |   c:CHAR_LITERAL   {value = #c.getText();}
+    |   i:INT            {value = new Integer(#i.getText());}
+//  |   cs:charSet       {value = #cs;} // return set AST in this case
+    ;
+
+charSet
+	:   #( CHARSET charSetElement )
+	;
+
+charSetElement
+	:   c:CHAR_LITERAL
+	|   #( OR c1:CHAR_LITERAL c2:CHAR_LITERAL )
+	|   #( RANGE c3:CHAR_LITERAL c4:CHAR_LITERAL )
+	;
+
+tokensSpec
+	:	#( TOKENS ( tokenSpec )+ )
+	;
+
+tokenSpec
+	:	t:TOKEN_REF           {trackToken(t);}
+	|	#( ASSIGN
+		   t2:TOKEN_REF       {trackToken(t2);}
+		   ( s:STRING_LITERAL {trackString(s); alias(t2,s);}
+		   | c:CHAR_LITERAL   {trackString(c); alias(t2,c);}
+		   )
+		 )
+	;
+
+rules
+    :   ( rule )+
+    ;
+
+rule
+    :   #( RULE id:ID {currentRuleName=#id.getText();}
+           (m:modifier)?
+           (ARG (ARG_ACTION)?)
+           (RET (ARG_ACTION)?)
+           (optionsSpec)?
+           (ruleScopeSpec)?
+       	   (AMPERSAND)*
+           b:block
+           (exceptionGroup)?
+           EOR
+           {trackTokenRule(#id,#m,#b);}
+         )
+    ;
+
+modifier
+	:	"protected"
+	|	"public"
+	|	"private"
+	|	"fragment"
+	;
+
+ruleScopeSpec
+ 	:	#( "scope" (ACTION)? ( ID )* )
+ 	;
+
+block
+    :   #(  BLOCK
+            (optionsSpec)?
+            ( alternative rewrite )+
+            EOB   
+         )
+    ;
+
+alternative
+    :   #( ALT (element)+ EOA )
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    #("catch" ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    #("finally" ACTION)
+    ;
+
+rewrite
+	:	( #( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION) ) )*
+	;
+
+element
+    :   #(ROOT element)
+    |   #(BANG element)
+    |   atom
+    |   #(NOT element)
+    |   #(RANGE atom atom)
+    |   #(CHAR_RANGE atom atom)
+    |	#(ASSIGN ID element)
+    |	#(PLUS_ASSIGN ID element)
+    |   ebnf
+    |   tree
+    |   #( SYNPRED block ) 
+    |   ACTION
+    |   SEMPRED
+    |   SYN_SEMPRED
+    |   BACKTRACK_SEMPRED
+    |   GATED_SEMPRED
+    |   EPSILON 
+    ;
+
+ebnf:   block
+    |   #( OPTIONAL block ) 
+    |   #( CLOSURE block )  
+    |   #( POSITIVE_CLOSURE block ) 
+    ;
+
+tree:   #(TREE_BEGIN  element (element)*  )
+    ;
+
+atom
+    :   RULE_REF
+    |   t:TOKEN_REF      {trackToken(t);}
+    |   c:CHAR_LITERAL   {trackString(c);}
+    |   s:STRING_LITERAL {trackString(s);}
+    |   WILDCARD
+    ;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
diff --git a/src/org/antlr/tool/buildnfa.g b/src/org/antlr/tool/buildnfa.g
new file mode 100644
index 0000000..9793680
--- /dev/null
+++ b/src/org/antlr/tool/buildnfa.g
@@ -0,0 +1,732 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+import java.util.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+}
+
+/** Build an NFA from a tree representing an ANTLR grammar. */
+class TreeToNFAConverter extends TreeParser;
+
+options {
+	importVocab = ANTLR;
+	ASTLabelType = "GrammarAST";
+}
+
+{
+/** Factory used to create nodes and submachines */
+protected NFAFactory factory = null;
+
+/** Which NFA object are we filling in? */
+protected NFA nfa = null;
+
+/** Which grammar are we converting an NFA for? */
+protected Grammar grammar = null;
+
+protected String currentRuleName = null;
+
+protected int outerAltNum = 0;
+protected int blockLevel = 0;
+
+public TreeToNFAConverter(Grammar g, NFA nfa, NFAFactory factory) {
+	this();
+	this.grammar = g;
+	this.nfa = nfa;
+	this.factory = factory;
+}
+
+protected void init() {
+    // define all the rule begin/end NFAStates to solve forward reference issues
+    Collection rules = grammar.getRules();
+    for (Iterator itr = rules.iterator(); itr.hasNext();) {
+		Rule r = (Rule) itr.next();
+        String ruleName = r.name;
+        NFAState ruleBeginState = factory.newState();
+        ruleBeginState.setDescription("rule "+ruleName+" start");
+		ruleBeginState.setEnclosingRuleName(ruleName);
+        grammar.setRuleStartState(ruleName, ruleBeginState);
+        NFAState ruleEndState = factory.newState();
+        ruleEndState.setDescription("rule "+ruleName+" end");
+        ruleEndState.setAcceptState(true);
+		ruleEndState.setEnclosingRuleName(ruleName);
+        grammar.setRuleStopState(ruleName, ruleEndState);
+    }
+}
+
+protected void addFollowTransition(String ruleName, NFAState following) {
+     //System.out.println("adding follow link to rule "+ruleName);
+     // find last link in FOLLOW chain emanating from rule
+     NFAState end = grammar.getRuleStopState(ruleName);
+     while ( end.transition(1)!=null ) {
+         end = (NFAState)end.transition(1).target;
+     }
+     if ( end.transition(0)!=null ) {
+         // already points to a following node
+         // gotta add another node to keep edges to a max of 2
+         NFAState n = factory.newState();
+         Transition e = new Transition(Label.EPSILON, n);
+         end.addTransition(e);
+         end = n;
+     }
+     Transition followEdge = new Transition(Label.EPSILON, following);
+     end.addTransition(followEdge);
+}
+
+protected void finish() {
+    List rules = new LinkedList();
+    rules.addAll(grammar.getRules());
+    int numEntryPoints = factory.build_EOFStates(rules);
+    if ( numEntryPoints==0 ) {
+        ErrorManager.grammarWarning(ErrorManager.MSG_NO_GRAMMAR_START_RULE,
+                                   grammar,
+                                   null,
+                                   grammar.name);
+    }
+}
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "buildnfa: "+ex.toString(),
+            ex);
+    }
+}
+
+grammar
+    :   {init();}
+        ( #( LEXER_GRAMMAR grammarSpec )
+	    | #( PARSER_GRAMMAR grammarSpec )
+	    | #( TREE_GRAMMAR grammarSpec )
+	    | #( COMBINED_GRAMMAR grammarSpec )
+	    )
+        {finish();}
+    ;
+
+attrScope
+	:	#( "scope" ID ACTION )
+	;
+
+grammarSpec
+	:	ID
+		(cmt:DOC_COMMENT)?
+        ( #(OPTIONS .) )?
+        ( #(TOKENS .) )?
+        (attrScope)*
+        (AMPERSAND)* // skip actions
+        rules
+	;
+
+rules
+    :   ( rule )+
+    ;
+
+rule
+{
+    StateCluster g=null;
+    StateCluster b = null;
+    String r=null;
+}
+    :   #( RULE id:ID {r=#id.getText();}
+		{currentRuleName = r; factory.currentRuleName = r;}
+		(modifier)?
+        (ARG (ARG_ACTION)?)
+        (RET (ARG_ACTION)?)
+		( OPTIONS )?
+		( ruleScopeSpec )?
+		   (AMPERSAND)*
+		   {GrammarAST blk = (GrammarAST)_t;}
+		   b=block
+           (exceptionGroup)?
+           EOR
+           {
+                if ( blk.setValue!=null ) {
+                    // if block comes back as a set not BLOCK, make it
+                    // a single ALT block
+                    b = factory.build_AlternativeBlockFromSet(b);
+                }
+				if ( Character.isLowerCase(r.charAt(0)) ||
+					 grammar.type==Grammar.LEXER )
+				{
+					// attach start node to block for this rule
+					NFAState start = grammar.getRuleStartState(r);
+					start.setAssociatedASTNode(#id);
+					start.addTransition(new Transition(Label.EPSILON, b.left));
+
+					// track decision if > 1 alts
+					if ( grammar.getNumberOfAltsForDecisionNFA(b.left)>1 ) {
+						b.left.setDescription(grammar.grammarTreeToString(#rule,false));
+						b.left.setDecisionASTNode(blk);
+						int d = grammar.assignDecisionNumber( b.left );
+						grammar.setDecisionNFA( d, b.left );
+                    	grammar.setDecisionBlockAST(d, blk);
+					}
+
+					// hook to end of rule node
+					NFAState end = grammar.getRuleStopState(r);
+					b.right.addTransition(new Transition(Label.EPSILON,end));
+				}
+           }
+         )
+    ;
+
+modifier
+	:	"protected"
+	|	"public"
+	|	"private"
+	|	"fragment"
+	;
+
+ruleScopeSpec
+ 	:	#( "scope" (ACTION)? ( ID )* )
+ 	;
+
+block returns [StateCluster g = null]
+{
+    StateCluster a = null;
+    List alts = new LinkedList();
+    this.blockLevel++;
+    if ( this.blockLevel==1 ) {this.outerAltNum=1;}
+}
+    :   {grammar.isValidSet(this,#block) &&
+		 !currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME)}?
+		g=set
+        {this.blockLevel--;}
+
+    |	#( BLOCK ( OPTIONS )?
+           ( a=alternative rewrite
+             {
+             alts.add(a);
+             if ( this.blockLevel==1 ) {this.outerAltNum++;}
+             }
+           )+ 
+           EOB
+        )
+        {g = factory.build_AlternativeBlock(alts);}
+        {this.blockLevel--;}
+    ;
+
+alternative returns [StateCluster g=null]
+{
+    StateCluster e = null;
+}
+    :   #( ALT (e=element {g = factory.build_AB(g,e);} )+ )
+        {
+        if (g==null) { // if alt was a list of actions or whatever
+            g = factory.build_Epsilon();
+        }
+        else {
+        	factory.optimizeAlternative(g);
+        }
+        }
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    #("catch" ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    #("finally" ACTION)
+    ;
+
+rewrite
+	:	(
+			{
+			if ( grammar.getOption("output")==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+										  grammar, #rewrite.token, currentRuleName);
+			}
+			}
+			#( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION) )
+		)*
+	;
+
+element returns [StateCluster g=null]
+    :   #(ROOT g=element)
+    |   #(BANG g=element)
+    |	#(ASSIGN ID g=element)
+    |	#(PLUS_ASSIGN ID g=element)
+    |   #(RANGE a:atom b:atom)
+        {g = factory.build_Range(grammar.getTokenType(#a.getText()),
+                                 grammar.getTokenType(#b.getText()));}
+    |   #(CHAR_RANGE c1:CHAR_LITERAL c2:CHAR_LITERAL)
+        {
+        if ( grammar.type==Grammar.LEXER ) {
+        	g = factory.build_CharRange(#c1.getText(), #c2.getText());
+        }
+        }
+    |   g=atom_or_notatom
+    |   g=ebnf
+    |   g=tree
+    |   #( SYNPRED block )
+    |   ACTION
+    |   pred:SEMPRED {g = factory.build_SemanticPredicate(#pred);}
+    |   spred:SYN_SEMPRED {g = factory.build_SemanticPredicate(#spred);}
+    |   bpred:BACKTRACK_SEMPRED {g = factory.build_SemanticPredicate(#bpred);}
+    |   gpred:GATED_SEMPRED {g = factory.build_SemanticPredicate(#gpred);}
+    |   EPSILON {g = factory.build_Epsilon();}
+    ;
+
+ebnf returns [StateCluster g=null]
+{
+    StateCluster b = null;
+    GrammarAST blk = #ebnf;
+    if ( blk.getType()!=BLOCK ) {
+    	blk = (GrammarAST)blk.getFirstChild();
+    }
+    GrammarAST eob = blk.getLastChild();
+}
+    :   {grammar.isValidSet(this,#ebnf)}? g=set
+
+    |	b=block
+        {
+        // track decision if > 1 alts
+        if ( grammar.getNumberOfAltsForDecisionNFA(b.left)>1 ) {
+            b.left.setDescription(grammar.grammarTreeToString(blk,false));
+            b.left.setDecisionASTNode(blk);
+            int d = grammar.assignDecisionNumber( b.left );
+            grammar.setDecisionNFA( d, b.left );
+            grammar.setDecisionBlockAST(d, blk);
+        }
+        g = b;
+        }
+    |   #( OPTIONAL b=block )
+        {
+        if ( blk.setValue!=null ) {
+            // if block comes back SET not BLOCK, make it
+            // a single ALT block
+            b = factory.build_AlternativeBlockFromSet(b);
+        }
+        g = factory.build_Aoptional(b);
+    	g.left.setDescription(grammar.grammarTreeToString(#ebnf,false));
+        // there is always at least one alt even if block has just 1 alt
+        int d = grammar.assignDecisionNumber( g.left );
+		grammar.setDecisionNFA(d, g.left);
+        grammar.setDecisionBlockAST(d, blk);
+        g.left.setDecisionASTNode(#ebnf);
+    	}
+    |   #( CLOSURE b=block )
+        {
+        if ( blk.setValue!=null ) {
+            b = factory.build_AlternativeBlockFromSet(b);
+        }
+        g = factory.build_Astar(b);
+		// track the loop back / exit decision point
+    	b.right.setDescription("()* loopback of "+grammar.grammarTreeToString(#ebnf,false));
+        int d = grammar.assignDecisionNumber( b.right );
+		grammar.setDecisionNFA(d, b.right);
+        grammar.setDecisionBlockAST(d, blk);
+        b.right.setDecisionASTNode(eob);
+        // make block entry state also have same decision for interpreting grammar
+        NFAState altBlockState = (NFAState)g.left.transition(0).target;
+        altBlockState.setDecisionASTNode(#ebnf);
+        altBlockState.setDecisionNumber(d);
+        g.left.setDecisionNumber(d); // this is the bypass decision (2 alts)
+        g.left.setDecisionASTNode(#ebnf);
+    	}
+    |   #( POSITIVE_CLOSURE b=block )
+        {
+        if ( blk.setValue!=null ) {
+            b = factory.build_AlternativeBlockFromSet(b);
+        }
+        g = factory.build_Aplus(b);
+        // don't make a decision on left edge, can reuse loop end decision
+		// track the loop back / exit decision point
+    	b.right.setDescription("()+ loopback of "+grammar.grammarTreeToString(#ebnf,false));
+        int d = grammar.assignDecisionNumber( b.right );
+		grammar.setDecisionNFA(d, b.right);
+        grammar.setDecisionBlockAST(d, blk);
+        b.right.setDecisionASTNode(eob);
+        // make block entry state also have same decision for interpreting grammar
+        NFAState altBlockState = (NFAState)g.left.transition(0).target;
+        altBlockState.setDecisionASTNode(#ebnf);
+        altBlockState.setDecisionNumber(d);
+        }
+    ;
+
+tree returns [StateCluster g=null]
+{
+StateCluster e=null;
+GrammarAST el=null;
+StateCluster down=null, up=null;
+}
+	:   #( TREE_BEGIN
+		   {el=(GrammarAST)_t;}
+		   g=element
+		   {
+           down = factory.build_Atom(Label.DOWN);
+           // TODO set following states for imaginary nodes?
+           //el.followingNFAState = down.right;
+		   g = factory.build_AB(g,down);
+		   }
+		   ( {el=(GrammarAST)_t;} e=element {g = factory.build_AB(g,e);} )*
+		   {
+           up = factory.build_Atom(Label.UP);
+           //el.followingNFAState = up.right;
+		   g = factory.build_AB(g,up);
+		   // tree roots point at right edge of DOWN for LOOK computation later
+		   #tree.NFATreeDownState = down.left;
+		   }
+		 )
+    ;
+
+atom_or_notatom returns [StateCluster g=null]
+	:	g=atom
+	|	#(  n:NOT
+            (  c:CHAR_LITERAL (ast1:ast_suffix)?
+	           {
+	            int ttype=0;
+     			if ( grammar.type==Grammar.LEXER ) {
+        			ttype = Grammar.getCharValueFromGrammarCharLiteral(#c.getText());
+     			}
+     			else {
+        			ttype = grammar.getTokenType(#c.getText());
+        		}
+                IntSet notAtom = grammar.complement(ttype);
+                if ( notAtom.isNil() ) {
+                    ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
+					  			              grammar,
+								              #c.token,
+									          #c.getText());
+                }
+	            g=factory.build_Set(notAtom);
+	           }
+            |  t:TOKEN_REF (ast3:ast_suffix)?
+	           {
+	            int ttype=0;
+                IntSet notAtom = null;
+     			if ( grammar.type==Grammar.LEXER ) {
+        			notAtom = grammar.getSetFromRule(this,#t.getText());
+        	   		if ( notAtom==null ) {
+                  		ErrorManager.grammarError(ErrorManager.MSG_RULE_INVALID_SET,
+				  			              grammar,
+							              #t.token,
+								          #t.getText());
+        	   		}
+        	   		else {
+	            		notAtom = grammar.complement(notAtom);
+	            	}
+     			}
+     			else {
+        			ttype = grammar.getTokenType(#t.getText());
+	            	notAtom = grammar.complement(ttype);
+        		}
+               if ( notAtom==null || notAtom.isNil() ) {
+                  ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
+				  			              grammar,
+							              #t.token,
+								          #t.getText());
+               }
+	           g=factory.build_Set(notAtom);
+	           }
+            |  g=set
+	           {
+	           GrammarAST stNode = (GrammarAST)n.getFirstChild();
+               //IntSet notSet = grammar.complement(stNode.getSetValue());
+               // let code generator complement the sets
+               IntSet s = stNode.getSetValue();
+               stNode.setSetValue(s);
+               // let code gen do the complement again; here we compute
+               // for NFA construction
+               s = grammar.complement(s);
+               if ( s.isNil() ) {
+                  ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
+				  			              grammar,
+							              #n.token);
+               }
+	           g=factory.build_Set(s);
+	           }
+            )
+        	{#n.followingNFAState = g.right;}
+         )
+	;
+
+atom returns [StateCluster g=null]
+    :   #( r:RULE_REF (rarg:ARG_ACTION)? (as1:ast_suffix)? )
+        {
+        NFAState start = grammar.getRuleStartState(r.getText());
+        if ( start!=null ) {
+            int ruleIndex = grammar.getRuleIndex(r.getText());
+            g = factory.build_RuleRef(ruleIndex, start);
+            r.followingNFAState = g.right;
+            if ( g.left.transition(0) instanceof RuleClosureTransition
+            	 && grammar.type!=Grammar.LEXER )
+            {
+                addFollowTransition(r.getText(), g.right);
+            }
+            // else rule ref got inlined to a set
+        }
+        }
+
+    |   #( t:TOKEN_REF (targ:ARG_ACTION)? (as2:ast_suffix)? )
+        {
+        if ( grammar.type==Grammar.LEXER ) {
+            NFAState start = grammar.getRuleStartState(t.getText());
+            if ( start!=null ) {
+                int ruleIndex = grammar.getRuleIndex(t.getText());
+                g = factory.build_RuleRef(ruleIndex, start);
+                // don't add FOLLOW transitions in the lexer;
+                // only exact context should be used.
+            }
+        }
+        else {
+            int tokenType = grammar.getTokenType(t.getText());
+            g = factory.build_Atom(tokenType);
+            t.followingNFAState = g.right;
+        }
+        }
+
+    |   #( c:CHAR_LITERAL (as3:ast_suffix)? )
+    	{
+    	if ( grammar.type==Grammar.LEXER ) {
+    		g = factory.build_CharLiteralAtom(c.getText());
+    	}
+    	else {
+            int tokenType = grammar.getTokenType(c.getText());
+            g = factory.build_Atom(tokenType);
+            c.followingNFAState = g.right;
+    	}
+    	}
+
+    |   #( s:STRING_LITERAL (as4:ast_suffix)? )
+    	{
+     	if ( grammar.type==Grammar.LEXER ) {
+     		g = factory.build_StringLiteralAtom(s.getText());
+     	}
+     	else {
+             int tokenType = grammar.getTokenType(s.getText());
+             g = factory.build_Atom(tokenType);
+             s.followingNFAState = g.right;
+     	}
+     	}
+
+    |   #( w:WILDCARD (as5:ast_suffix)? )    {g = factory.build_Wildcard();}
+
+	//|	g=set
+	;
+
+ast_suffix
+{
+if ( grammar.getOption("output")==null ) {
+	ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+							  grammar, #ast_suffix.token, currentRuleName);
+}
+}
+	:	ROOT
+	|	BANG
+	;
+
+set returns [StateCluster g=null]
+{
+IntSet elements=new IntervalSet();
+#set.setSetValue(elements); // track set for use by code gen
+}
+	:	#( b:BLOCK
+           (#(ALT (BACKTRACK_SEMPRED)? setElement[elements] EOA))+
+           EOB
+         )
+        {
+        g = factory.build_Set(elements);
+        #b.followingNFAState = g.right;
+        #b.setValue = elements; // track set value of this block
+        }
+		//{System.out.println("set elements="+elements.toString(grammar));}
+	;
+
+setRule returns [IntSet elements=new IntervalSet()]
+{IntSet s=null;}
+	:	#( RULE id:ID (modifier)? ARG RET ( OPTIONS )? ( ruleScopeSpec )?
+		   	(AMPERSAND)*
+           	#( BLOCK ( OPTIONS )?
+           	   ( #(ALT setElement[elements] EOA) )+
+           	   EOB
+           	 )
+           	(exceptionGroup)?
+           	EOR
+         )
+    ;
+    exception
+    	catch[RecognitionException re] {throw re;}
+
+setElement[IntSet elements]
+{
+    int ttype;
+    IntSet ns=null;
+    StateCluster gset;
+}
+    :   c:CHAR_LITERAL
+        {
+     	if ( grammar.type==Grammar.LEXER ) {
+        	ttype = Grammar.getCharValueFromGrammarCharLiteral(c.getText());
+     	}
+     	else {
+        	ttype = grammar.getTokenType(c.getText());
+        }
+        if ( elements.member(ttype) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+									  grammar,
+									  #c.token,
+									  #c.getText());
+        }
+        elements.add(ttype);
+        }
+    |   t:TOKEN_REF
+        {
+		if ( grammar.type==Grammar.LEXER ) {
+			// recursively will invoke this rule to match elements in target rule ref
+			IntSet ruleSet = grammar.getSetFromRule(this,#t.getText());
+			if ( ruleSet==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_RULE_INVALID_SET,
+								  grammar,
+								  #t.token,
+								  #t.getText());
+			}
+			else {
+				elements.addAll(ruleSet);
+			}
+		}
+		else {
+			ttype = grammar.getTokenType(t.getText());
+			if ( elements.member(ttype) ) {
+				ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+										  grammar,
+										  #t.token,
+										  #t.getText());
+			}
+			elements.add(ttype);
+			}
+        }
+
+    |   s:STRING_LITERAL
+        {
+        ttype = grammar.getTokenType(s.getText());
+        if ( elements.member(ttype) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+									  grammar,
+									  #s.token,
+									  #s.getText());
+        }
+        elements.add(ttype);
+        }
+    |	#(CHAR_RANGE c1:CHAR_LITERAL c2:CHAR_LITERAL)
+    	{
+     	if ( grammar.type==Grammar.LEXER ) {
+	        int a = Grammar.getCharValueFromGrammarCharLiteral(c1.getText());
+    	    int b = Grammar.getCharValueFromGrammarCharLiteral(c2.getText());
+    		elements.addAll(IntervalSet.of(a,b));
+     	}
+    	}
+
+	|   gset=set
+        {
+		Transition setTrans = gset.left.transition(0);
+        elements.addAll(setTrans.label.getSet());
+        }
+
+    |   #(  NOT {ns=new IntervalSet();}
+            setElement[ns]
+            {
+                IntSet not = grammar.complement(ns);
+                elements.addAll(not);
+            }
+        )
+    ;
+
+/** Check to see if this block can be a set.  Can't have actions
+ *  etc...  Also can't be in a rule with a rewrite as we need
+ *  to track what's inside set for use in rewrite.
+ */
+testBlockAsSet
+{
+    int nAlts=0;
+    Rule r = grammar.getRule(currentRuleName);
+}
+	:   #( BLOCK
+           (   #(ALT (BACKTRACK_SEMPRED)? testSetElement {nAlts++;} EOA)
+                {!r.hasRewrite(outerAltNum)}?
+           )+
+           EOB
+        )
+        {nAlts>1}? // set of 1 element is not good
+	;
+    exception
+    	catch[RecognitionException re] {throw re;}
+
+testSetRule
+	:	#( RULE id:ID (modifier)? ARG RET ( OPTIONS )? ( ruleScopeSpec )?
+		   	(AMPERSAND)*
+            #( BLOCK
+                ( #(ALT (BACKTRACK_SEMPRED)? testSetElement EOA) )+
+                EOB
+            )
+           	(exceptionGroup)?
+           	EOR
+         )
+    ;
+    exception
+    	catch[RecognitionException re] {throw re;}
+
+/** Match just an element; no ast suffix etc.. */
+testSetElement
+{
+AST r = _t;
+}
+    :   c:CHAR_LITERAL
+    |   t:TOKEN_REF
+        {
+		if ( grammar.type==Grammar.LEXER ) {
+	        Rule rule = grammar.getRule(#t.getText());
+	        if ( rule==null ) {
+	        	throw new RecognitionException("invalid rule");
+	        }
+			// recursively will invoke this rule to match elements in target rule ref
+	        testSetRule(rule.tree);
+		}
+        }
+    |   {grammar.type!=Grammar.LEXER}? s:STRING_LITERAL 
+    |	#(CHAR_RANGE c1:CHAR_LITERAL c2:CHAR_LITERAL)
+	|   testBlockAsSet
+    |   #( NOT testSetElement )
+    ;
+    exception
+     	catch[RecognitionException re] {throw re;}
diff --git a/src/org/antlr/tool/define.g b/src/org/antlr/tool/define.g
new file mode 100644
index 0000000..92b2bc0
--- /dev/null
+++ b/src/org/antlr/tool/define.g
@@ -0,0 +1,615 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.misc.*;
+}
+
+class DefineGrammarItemsWalker extends TreeParser;
+
+options {
+	importVocab = ANTLR;
+	ASTLabelType = "GrammarAST";
+    codeGenBitsetTestThreshold=999;
+}
+
+{
+protected Grammar grammar;
+protected GrammarAST root;
+protected String currentRuleName;
+protected GrammarAST currentRewriteBlock;
+protected GrammarAST currentRewriteRule;
+protected int outerAltNum = 0;
+protected int blockLevel = 0;
+
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "define: "+ex.toString(),
+            ex);
+    }
+
+	protected void finish() {
+		trimGrammar();
+	}
+
+	/** Remove any lexer rules from a COMBINED; already passed to lexer */
+	protected void trimGrammar() {
+		if ( grammar.type!=Grammar.COMBINED ) {
+			return;
+		}
+		// form is (header ... ) ( grammar ID (scope ...) ... ( rule ... ) ( rule ... ) ... )
+		GrammarAST p = root;
+		// find the grammar spec
+		while ( !p.getText().equals("grammar") ) {
+			p = (GrammarAST)p.getNextSibling();
+		}
+		p = (GrammarAST)p.getFirstChild(); // jump down to first child of grammar
+		// look for first RULE def
+		GrammarAST prev = p; // points to the ID (grammar name)
+		while ( p.getType()!=RULE ) {
+			prev = p;
+			p = (GrammarAST)p.getNextSibling();
+		}
+		// prev points at last node before first rule subtree at this point
+		while ( p!=null ) {
+			String ruleName = p.getFirstChild().getText();
+			//System.out.println("rule "+ruleName+" prev="+prev.getText());
+			if ( Character.isUpperCase(ruleName.charAt(0)) ) {
+				// remove lexer rule
+				prev.setNextSibling(p.getNextSibling());
+			}
+			else {
+				prev = p; // non-lexer rule; move on
+			}
+			p = (GrammarAST)p.getNextSibling();
+		}
+		//System.out.println("root after removal is: "+root.toStringList());
+	}
+
+    protected void trackInlineAction(GrammarAST actionAST) {
+		Rule r = grammar.getRule(currentRuleName);
+        if ( r!=null ) {
+            r.trackInlineAction(actionAST);
+        }
+    }
+
+}
+
+grammar[Grammar g]
+{
+grammar = g;
+root = #grammar;
+}
+    :   ( #( LEXER_GRAMMAR 	  {grammar.type = Grammar.LEXER;} 	    grammarSpec )
+	    | #( PARSER_GRAMMAR   {grammar.type = Grammar.PARSER;}      grammarSpec )
+	    | #( TREE_GRAMMAR     {grammar.type = Grammar.TREE_PARSER;} grammarSpec )
+	    | #( COMBINED_GRAMMAR {grammar.type = Grammar.COMBINED;}    grammarSpec )
+	    )
+	    {finish();}
+    ;
+
+attrScope
+	:	#( "scope" name:ID attrs:ACTION )
+		{
+		AttributeScope scope = grammar.defineGlobalScope(name.getText(),#attrs.token);
+		scope.isDynamicGlobalScope = true;
+		scope.addAttributes(attrs.getText(), ";");
+		}
+	;
+
+grammarSpec
+{
+Map opts=null;
+Token optionsStartToken=null;
+}
+	:	id:ID
+		(cmt:DOC_COMMENT)?
+        //(#(OPTIONS .))? // already parsed these in assign.types.g
+        ( {optionsStartToken=((GrammarAST)_t).getToken();}
+          optionsSpec
+        )?
+        (tokensSpec)?
+        (attrScope)*
+        (actions)?
+        rules
+	;
+
+actions
+	:	( action )+
+	;
+
+action
+{
+String scope=null;
+GrammarAST nameAST=null, actionAST=null;
+}
+	:	#(amp:AMPERSAND id1:ID
+			( id2:ID a1:ACTION
+			  {scope=#id1.getText(); nameAST=#id2; actionAST=#a1;}
+			| a2:ACTION
+			  {scope=null; nameAST=#id1; actionAST=#a2;}
+			)
+		 )
+		 {
+		 grammar.defineNamedAction(#amp,scope,nameAST,actionAST);
+		 }
+	;
+
+optionsSpec
+	:	OPTIONS
+	;
+
+tokensSpec
+	:	#( TOKENS ( tokenSpec )+ )
+	;
+
+tokenSpec
+	:	t:TOKEN_REF
+	|	#( ASSIGN
+		   t2:TOKEN_REF
+		   ( s:STRING_LITERAL
+		   | c:CHAR_LITERAL
+		   )
+		 )
+	;
+
+rules
+    :   ( rule )+
+    ;
+
+rule
+{
+String mod=null;
+String name=null;
+Map opts=null;
+Rule r = null;
+}
+    :   #( RULE id:ID {opts = #RULE.options;}
+           (mod=modifier)?
+           #( ARG (args:ARG_ACTION)? )
+           #( RET (ret:ARG_ACTION)? )
+           (optionsSpec)?
+			{
+			name = #id.getText();
+			currentRuleName = name;
+			if ( Character.isUpperCase(name.charAt(0)) &&
+				 grammar.type==Grammar.COMBINED )
+			{
+				// a merged grammar spec, track lexer rules and send to another grammar
+				grammar.defineLexerRuleFoundInParser(#id.getToken(), #rule);
+			}
+			else {
+				int numAlts = countAltsForRule(#rule);
+				grammar.defineRule(#id.getToken(), mod, opts, #rule, #args, numAlts);
+				r = grammar.getRule(name);
+				if ( #args!=null ) {
+					r.parameterScope = grammar.createParameterScope(name,#args.token);
+					r.parameterScope.addAttributes(#args.getText(), ",");
+				}
+				if ( #ret!=null ) {
+					r.returnScope = grammar.createReturnScope(name,#ret.token);
+					r.returnScope.addAttributes(#ret.getText(), ",");
+				}
+			}
+			}
+           (ruleScopeSpec[r])?
+		   (ruleAction[r])*
+           {this.blockLevel=0;}
+           b:block
+           (exceptionGroup)?
+           EOR
+           {
+           // copy rule options into the block AST, which is where
+           // the analysis will look for k option etc...
+           #b.options = opts;
+           }
+         )
+    ;
+
+countAltsForRule returns [int n=0]
+    :   #( RULE id:ID (modifier)? ARG RET (OPTIONS)? ("scope")? (AMPERSAND)*
+           #(  BLOCK (OPTIONS)? (ALT (REWRITE)* {n++;})+ EOB )
+           (exceptionGroup)?
+           EOR
+         )
+	;
+
+ruleAction[Rule r]
+	:	#(amp:AMPERSAND id:ID a:ACTION ) {if (r!=null) r.defineNamedAction(#amp,#id,#a);}
+	;
+
+modifier returns [String mod]
+{
+mod = #modifier.getText();
+}
+	:	"protected"
+	|	"public"
+	|	"private"
+	|	"fragment"
+	;
+
+ruleScopeSpec[Rule r]
+ 	:	#( "scope"
+ 	       ( attrs:ACTION
+ 	         {
+ 	         r.ruleScope = grammar.createRuleScope(r.name,#attrs.token);
+			 r.ruleScope.isDynamicRuleScope = true;
+			 r.ruleScope.addAttributes(#attrs.getText(), ";");
+			 }
+		   )?
+ 	       ( uses:ID
+ 	         {
+ 	         if ( grammar.getGlobalScope(#uses.getText())==null ) {
+				 ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
+										   grammar,
+										   #uses.token,
+										   #uses.getText());
+	         }
+ 	         else {
+ 	         	if ( r.useScopes==null ) {r.useScopes=new ArrayList();}
+ 	         	r.useScopes.add(#uses.getText());
+ 	         }
+ 	         }
+ 	       )*
+ 	     )
+ 	;
+
+block
+{
+this.blockLevel++;
+if ( this.blockLevel==1 ) {this.outerAltNum=1;}
+}
+    :   #(  BLOCK
+            (optionsSpec)?
+            (blockAction)*
+            ( alternative rewrite
+              {if ( this.blockLevel==1 ) {this.outerAltNum++;}}
+            )+
+            EOB
+         )
+         {this.blockLevel--;}
+    ;
+
+// TODO: this does nothing now! subrules cannot have init actions. :(
+blockAction
+	:	#(amp:AMPERSAND id:ID a:ACTION ) // {r.defineAction(#amp,#id,#a);}
+	;
+
+alternative
+{
+if ( grammar.type!=Grammar.LEXER && grammar.getOption("output")!=null && blockLevel==1 ) {
+	GrammarAST aRewriteNode = #alternative.findFirstType(REWRITE);
+	if ( aRewriteNode!=null||
+		 (#alternative.getNextSibling()!=null &&
+		  #alternative.getNextSibling().getType()==REWRITE) )
+	{
+		Rule r = grammar.getRule(currentRuleName);
+		r.trackAltsWithRewrites(#alternative,this.outerAltNum);
+	}
+}
+}
+    :   #( ALT (element)+ EOA )
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :   #("catch" ARG_ACTION ACTION) {trackInlineAction(#ACTION);}
+    ;
+
+finallyClause
+    :    #("finally" ACTION) {trackInlineAction(#ACTION);}
+    ;
+
+element
+    :   #(ROOT element)
+    |   #(BANG element)
+    |   atom
+    |   #(NOT element)
+    |   #(RANGE atom atom)
+    |   #(CHAR_RANGE atom atom)
+    |	#(ASSIGN id:ID el:element)
+    	{
+		if ( #el.getType()==ANTLRParser.ROOT ||
+             #el.getType()==ANTLRParser.BANG )
+		{
+            #el = (GrammarAST)#el.getFirstChild();
+        }
+    	if ( #el.getType()==RULE_REF) {
+    		grammar.defineRuleRefLabel(currentRuleName,#id.getToken(),#el);
+    	}
+    	else {
+    		grammar.defineTokenRefLabel(currentRuleName,#id.getToken(),#el);
+    	}
+    	}
+    |	#(	PLUS_ASSIGN id2:ID a2:element
+    	    {
+            if ( #a2.getType()==ANTLRParser.ROOT ||
+                 #a2.getType()==ANTLRParser.BANG )
+            {
+                #a2 = (GrammarAST)#a2.getFirstChild();
+            }
+    	    if ( #a2.getType()==RULE_REF ) {
+    	    	grammar.defineRuleListLabel(currentRuleName,#id2.getToken(),#a2);
+    	    }
+    	    else {
+    	    	grammar.defineTokenListLabel(currentRuleName,#id2.getToken(),#a2);
+    	    }
+    	    }
+         )
+    |   ebnf
+    |   tree
+    |   #( SYNPRED block )
+    |   act:ACTION
+        {
+        #act.outerAltNum = this.outerAltNum;
+		trackInlineAction(#act);
+        }
+    |   SEMPRED
+        {
+        #SEMPRED.outerAltNum = this.outerAltNum;
+        trackInlineAction(#SEMPRED);
+        }
+    |   SYN_SEMPRED
+    |   BACKTRACK_SEMPRED
+    |   GATED_SEMPRED
+        {
+        #GATED_SEMPRED.outerAltNum = this.outerAltNum;
+        trackInlineAction(#GATED_SEMPRED);
+        }
+    |   EPSILON 
+    ;
+
+ebnf:   (dotLoop)=> dotLoop // .* or .+
+    |   block
+    |   #( OPTIONAL block )
+    |   #( CLOSURE block )
+    |   #( POSITIVE_CLOSURE block )
+    ;
+
+/** Track the .* and .+ idioms and make them nongreedy by default.
+ */
+dotLoop
+{
+    GrammarAST block = (GrammarAST)#dotLoop.getFirstChild();
+}
+    :   (   #( CLOSURE dotBlock )           
+        |   #( POSITIVE_CLOSURE dotBlock )
+        )
+        {
+        Map opts=new HashMap();
+        opts.put("greedy", "false");
+        if ( grammar.type!=Grammar.LEXER ) {
+            // parser grammars assume k=1 for .* loops
+            // otherwise they (analysis?) look til EOF!
+            opts.put("k", Utils.integer(1));
+        }
+        block.setOptions(grammar,opts);
+        }
+    ;
+
+dotBlock
+    :   #( BLOCK #( ALT WILDCARD EOA ) EOB )
+    ;
+
+tree:   #(TREE_BEGIN element (element)*)
+    ;
+
+atom
+    :   #( rr:RULE_REF (rarg:ARG_ACTION)? )
+    	{
+        grammar.altReferencesRule(currentRuleName, #rr, this.outerAltNum);
+		if ( #rarg!=null ) {
+            #rarg.outerAltNum = this.outerAltNum;
+            trackInlineAction(#rarg);
+        }
+        }
+    |   #( t:TOKEN_REF (targ:ARG_ACTION )? )
+    	{
+		if ( #targ!=null ) {
+            #targ.outerAltNum = this.outerAltNum;
+            trackInlineAction(#targ);
+        }
+    	if ( grammar.type==Grammar.LEXER ) {
+    		grammar.altReferencesRule(currentRuleName, #t, this.outerAltNum);
+    	}
+    	else {
+    		grammar.altReferencesTokenID(currentRuleName, #t, this.outerAltNum);
+    	}
+    	}
+    |   c:CHAR_LITERAL
+    	{
+    	if ( grammar.type!=Grammar.LEXER ) {
+    		Rule rule = grammar.getRule(currentRuleName);
+			if ( rule!=null ) {
+				rule.trackTokenReferenceInAlt(#c, outerAltNum);
+    		}
+    	}
+    	}
+    |   s:STRING_LITERAL
+    	{
+    	if ( grammar.type!=Grammar.LEXER ) {
+    		Rule rule = grammar.getRule(currentRuleName);
+			if ( rule!=null ) {
+				rule.trackTokenReferenceInAlt(#s, outerAltNum);
+    		}
+    	}
+    	}
+    |   WILDCARD
+    ;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
+
+rewrite
+{
+currentRewriteRule = #rewrite; // has to execute during guessing
+if ( grammar.buildAST() ) {
+    #rewrite.rewriteRefsDeep = new HashSet<GrammarAST>();
+}
+}
+	:	(
+            #( REWRITE (pred:SEMPRED)? rewrite_alternative )
+            {
+            if ( #pred!=null ) {
+                #pred.outerAltNum = this.outerAltNum;
+                trackInlineAction(#pred);
+            }
+            }
+        )*
+        //{System.out.println("-> refs = "+#rewrite.rewriteRefs);}
+	;
+
+rewrite_block
+{
+GrammarAST enclosingBlock = currentRewriteBlock;
+if ( inputState.guessing==0 ) {  // don't do if guessing
+    currentRewriteBlock=#rewrite_block; // pts to BLOCK node
+    currentRewriteBlock.rewriteRefsShallow = new HashSet<GrammarAST>();
+    currentRewriteBlock.rewriteRefsDeep = new HashSet<GrammarAST>();
+}
+}
+    :   #( BLOCK rewrite_alternative EOB )
+        //{System.out.println("atoms="+currentRewriteBlock.rewriteRefs);}
+        {
+        // copy the element refs in this block to the surrounding block
+        if ( enclosingBlock!=null ) {
+            enclosingBlock.rewriteRefsDeep
+                .addAll(currentRewriteBlock.rewriteRefsShallow);
+        }
+        currentRewriteBlock = enclosingBlock; // restore old BLOCK ptr
+        }
+    ;
+
+rewrite_alternative
+    :   {grammar.buildAST()}?
+    	#( a:ALT ( ( rewrite_element )+ | EPSILON ) EOA )
+    |	{grammar.buildTemplate()}? rewrite_template
+    ;
+
+rewrite_element
+    :   rewrite_atom
+    |   rewrite_ebnf
+    |   rewrite_tree
+    ;
+
+rewrite_ebnf
+    :   #( OPTIONAL rewrite_block )
+    |   #( CLOSURE rewrite_block )
+    |   #( POSITIVE_CLOSURE rewrite_block )
+    ;
+
+rewrite_tree
+	:   #(	TREE_BEGIN rewrite_atom ( rewrite_element )* )
+    ;
+
+rewrite_atom
+{
+Rule r = grammar.getRule(currentRuleName);
+Set tokenRefsInAlt = r.getTokenRefsInAlt(outerAltNum);
+boolean imaginary =
+    #rewrite_atom.getType()==TOKEN_REF &&
+    !tokenRefsInAlt.contains(#rewrite_atom.getText());
+if ( !imaginary && grammar.buildAST() &&
+     (#rewrite_atom.getType()==RULE_REF ||
+      #rewrite_atom.getType()==LABEL ||
+      #rewrite_atom.getType()==TOKEN_REF ||
+      #rewrite_atom.getType()==CHAR_LITERAL ||
+      #rewrite_atom.getType()==STRING_LITERAL) )
+{
+    // track per block and for entire rewrite rule
+    if ( currentRewriteBlock!=null ) {
+        currentRewriteBlock.rewriteRefsShallow.add(#rewrite_atom);
+        currentRewriteBlock.rewriteRefsDeep.add(#rewrite_atom);
+    }
+    currentRewriteRule.rewriteRefsDeep.add(#rewrite_atom);
+}
+}
+    :   RULE_REF 
+    |   ( #(TOKEN_REF (arg:ARG_ACTION)?) | CHAR_LITERAL | STRING_LITERAL )
+        {
+        if ( #arg!=null ) {
+            #arg.outerAltNum = this.outerAltNum;
+            trackInlineAction(#arg);
+        }
+        }
+
+    |	LABEL
+
+    |	ACTION
+        {
+            #ACTION.outerAltNum = this.outerAltNum;
+            trackInlineAction(#ACTION);
+        }
+    ;
+
+rewrite_template
+    :	#( ALT EPSILON EOA ) 
+   	|	#( TEMPLATE (id:ID|ind:ACTION)
+	       #( ARGLIST
+                ( #( ARG arg:ID a:ACTION )
+                {
+                    #a.outerAltNum = this.outerAltNum;
+                    trackInlineAction(#a);
+                }
+                )*
+            )
+            {
+            if ( #ind!=null ) {
+                #ind.outerAltNum = this.outerAltNum;
+                trackInlineAction(#ind);
+            }
+            }
+
+		   ( DOUBLE_QUOTE_STRING_LITERAL
+		   | DOUBLE_ANGLE_STRING_LITERAL
+		   )?
+	     )
+
+	|	act:ACTION
+        {
+        #act.outerAltNum = this.outerAltNum;
+        trackInlineAction(#act);
+        }
+	;
diff --git a/src/org/antlr/tool/templates/depend.stg b/src/org/antlr/tool/templates/depend.stg
new file mode 100644
index 0000000..c093054
--- /dev/null
+++ b/src/org/antlr/tool/templates/depend.stg
@@ -0,0 +1,12 @@
+/** templates used to generate make-compatible dependencies */
+group depend;
+
+/** Generate "f : x, y, z" dependencies for input
+ *  dependencies and generated files. in and out
+ *  are File objects.  For example, you can say
+ *  <f.canonicalPath>
+ */
+dependencies(grammarFileName,in,out) ::= <<
+<if(in)><grammarFileName>: <in; separator=", "><endif>
+<out:{f | <f> : <grammarFileName>}; separator="\n">
+>>
diff --git a/src/org/antlr/tool/templates/dot/decision-rank.st b/src/org/antlr/tool/templates/dot/decision-rank.st
new file mode 100644
index 0000000..d5142f6
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/decision-rank.st
@@ -0,0 +1 @@
+{rank=same; rankdir=TB; <states; separator="; ">}
diff --git a/src/org/antlr/tool/templates/dot/dfa.st b/src/org/antlr/tool/templates/dot/dfa.st
new file mode 100644
index 0000000..5b81e70
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/dfa.st
@@ -0,0 +1,7 @@
+digraph NFA {
+<if(rankdir)>rankdir=<rankdir>;<endif>
+<decisionRanks; separator="\n">
+<states; separator="\n">
+<edges; separator="\n">
+}
+
diff --git a/src/org/antlr/tool/templates/dot/edge.st b/src/org/antlr/tool/templates/dot/edge.st
new file mode 100644
index 0000000..1bb8e96
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/edge.st
@@ -0,0 +1 @@
+<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
diff --git a/src/org/antlr/tool/templates/dot/epsilon-edge.st b/src/org/antlr/tool/templates/dot/epsilon-edge.st
new file mode 100644
index 0000000..2a49b2b
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/epsilon-edge.st
@@ -0,0 +1 @@
+<src> -> <target> [fontname="Times-Italic", label = "e"];
diff --git a/src/org/antlr/tool/templates/dot/nfa.st b/src/org/antlr/tool/templates/dot/nfa.st
new file mode 100644
index 0000000..280ced2
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/nfa.st
@@ -0,0 +1,6 @@
+digraph NFA {
+rankdir=LR;
+<decisionRanks; separator="\n">
+<states; separator="\n">
+<edges; separator="\n">
+}
diff --git a/src/org/antlr/tool/templates/dot/state.st b/src/org/antlr/tool/templates/dot/state.st
new file mode 100644
index 0000000..f68e3a8
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/state.st
@@ -0,0 +1 @@
+node [fontsize=11, shape = <if(useBox)>box<else>circle, fixedsize=true, width=.4<endif>]; <name>
diff --git a/src/org/antlr/tool/templates/dot/stopstate.st b/src/org/antlr/tool/templates/dot/stopstate.st
new file mode 100644
index 0000000..572d460
--- /dev/null
+++ b/src/org/antlr/tool/templates/dot/stopstate.st
@@ -0,0 +1 @@
+node [fontsize=11, shape = <if(useBox)>polygon,sides=4,peripheries=2<else>doublecircle, fixedsize=true, width=.6<endif>]; <name>
diff --git a/src/org/antlr/tool/templates/messages/formats/antlr.stg b/src/org/antlr/tool/templates/messages/formats/antlr.stg
new file mode 100644
index 0000000..cf9cbc9
--- /dev/null
+++ b/src/org/antlr/tool/templates/messages/formats/antlr.stg
@@ -0,0 +1,42 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ New style messages. This file contains the actual layout of the messages emitted by ANTLR.
+ The text itself is coming out of the languages/*stg files, according to the chosen locale.
+ This file contains the default format ANTLR uses.
+*/
+
+group antlr;
+
+location(file, line, column) ::= "<file>:<line>:<column>:"
+
+message(id, text) ::= "(<id>) <text>"
+
+report(location, message, type) ::= "<type>(<message.id>): <location> <message.text>"
+
+wantsSingleLineMessage() ::= "false"
\ No newline at end of file
diff --git a/src/org/antlr/tool/templates/messages/formats/gnu.stg b/src/org/antlr/tool/templates/messages/formats/gnu.stg
new file mode 100644
index 0000000..001ab54
--- /dev/null
+++ b/src/org/antlr/tool/templates/messages/formats/gnu.stg
@@ -0,0 +1,42 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ New style messages. This file contains the actual layout of the messages emitted by ANTLR.
+ The text itself is coming out of the languages/*stg files, according to the chosen locale.
+ This file contains the format that mimicks GCC output.
+*/
+
+group gnu;
+
+location(file, line, column) ::= "<file>:<line>:"
+
+message(id, text) ::= "<text> (<id>)"
+
+report(location, message, type) ::= "<location> <type>: <message>"
+
+wantsSingleLineMessage() ::= "true"
\ No newline at end of file
diff --git a/src/org/antlr/tool/templates/messages/formats/vs2005.stg b/src/org/antlr/tool/templates/messages/formats/vs2005.stg
new file mode 100644
index 0000000..34c88e4
--- /dev/null
+++ b/src/org/antlr/tool/templates/messages/formats/vs2005.stg
@@ -0,0 +1,42 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ New style messages. This file contains the actual layout of the messages emitted by ANTLR.
+ The text itself is coming out of the languages/*stg files, according to the chosen locale.
+ This file contains the default format ANTLR uses.
+*/
+
+group antlr;
+
+location(file, line, column) ::= "<file>(<line>,<column>)"
+
+message(id, text) ::= "error <id> : <text>"
+
+report(location, message, type) ::= "<location> : <type> <message.id> : <message.text>"
+
+wantsSingleLineMessage() ::= "true"
diff --git a/src/org/antlr/tool/templates/messages/languages/en.stg b/src/org/antlr/tool/templates/messages/languages/en.stg
new file mode 100644
index 0000000..a02df50
--- /dev/null
+++ b/src/org/antlr/tool/templates/messages/languages/en.stg
@@ -0,0 +1,278 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ New style messages. This file only contains the messages in English, but no
+ information about which file, line, or column it occurred in.
+ The location and message ids are taken out of the formats directory.
+														Kay Roepke
+*/
+group en_US;
+
+// TOOL ERRORS
+// file errors
+CANNOT_WRITE_FILE(arg,exception,stackTrace) ::= <<
+cannot write file <arg>: <exception>
+<stackTrace; separator="\n">
+>>
+CANNOT_CLOSE_FILE(arg,exception,stackTrace) ::= "cannot close file <arg>: <exception>"
+CANNOT_FIND_TOKENS_FILE(arg) ::= "cannot find tokens file <arg>"
+ERROR_READING_TOKENS_FILE(arg,exception,stackTrace) ::= <<
+problem reading token vocabulary file <arg>: <exception>
+<stackTrace; separator="\n">
+>>
+DIR_NOT_FOUND(arg) ::= "directory not found: <arg>"
+OUTPUT_DIR_IS_FILE(arg) ::= "output directory is a file: <arg>"
+CANNOT_OPEN_FILE(arg) ::= "cannot find or open file: <arg>"
+
+INTERNAL_ERROR(arg,arg2,exception,stackTrace) ::= <<
+internal error: <arg> <arg2><if(exception)>: <exception><endif>
+<stackTrace; separator="\n">
+>>
+INTERNAL_WARNING(arg) ::= "internal warning: <arg>"
+ERROR_CREATING_ARTIFICIAL_RULE(arg,exception,stackTrace) ::= <<
+problems creating lexer rule listing all tokens: <exception>
+<stackTrace; separator="\n">
+>>
+TOKENS_FILE_SYNTAX_ERROR(arg,arg2) ::=
+	"problems parsing token vocabulary file <arg> on line <arg2>"
+CANNOT_GEN_DOT_FILE(arg,exception,stackTrace) ::=
+	"cannot write DFA DOT file <arg>: <exception>"
+BAD_ACTION_AST_STRUCTURE(exception,stackTrace) ::=
+	"bad internal tree structure for action '<arg>': <exception>"
+BAD_AST_STRUCTURE(arg,exception,stackTrace) ::= <<
+bad internal tree structure '<arg>': <exception>
+<stackTrace; separator="\n">
+>>
+FILE_AND_GRAMMAR_NAME_DIFFER(arg,arg2) ::=
+  "file <arg2> contains grammar <arg>; names must be identical"
+FILENAME_EXTENSION_ERROR(arg) ::=
+  "file <arg> must end in a file extension, normally .g"
+
+// code gen errors
+MISSING_CODE_GEN_TEMPLATES(arg) ::=
+	"cannot find code generation templates <arg>.stg"
+MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES() ::=
+	"cannot find code generation cyclic DFA templates for language <arg>"
+CODE_GEN_TEMPLATES_INCOMPLETE(arg) ::=
+	"at least one code generation template missing for language <arg>"
+CANNOT_CREATE_TARGET_GENERATOR(arg,exception,stackTrace) ::=
+	"cannot create target <arg> code generator: <exception>"
+CANNOT_COMPUTE_SAMPLE_INPUT_SEQ() ::=
+	"cannot generate a sample input sequence from lookahead DFA"
+
+// grammar interpretation errors
+/*
+NO_VIABLE_DFA_ALT(arg,arg2) ::=
+	"no viable transition from state <arg> on <arg2> while interpreting DFA"
+*/
+
+// GRAMMAR ERRORS
+SYNTAX_ERROR(arg) ::= "syntax error: <arg>"
+RULE_REDEFINITION(arg) ::=
+	"rule <arg> redefinition"
+LEXER_RULES_NOT_ALLOWED(arg) ::=
+	"lexer rule <arg> not allowed in parser"
+PARSER_RULES_NOT_ALLOWED(arg) ::=
+	"parser rule <arg> not allowed in lexer"
+CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL(arg) ::=
+	"cannot find an attribute name in attribute declaration"
+NO_TOKEN_DEFINITION(arg) ::=
+	"no lexer rule corresponding to token: <arg>"
+UNDEFINED_RULE_REF(arg) ::=
+	"reference to undefined rule: <arg>"
+LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(arg) ::=
+	"literal has no associated lexer rule: <arg>"
+CANNOT_ALIAS_TOKENS_IN_LEXER(arg) ::=
+	"literals are illegal in lexer tokens{} section: <arg>"
+ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
+	"reference to attribute outside of a rule: <arg><if(arg2)>.<arg2><endif>"
+UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
+	"unknown attribute for <arg>: <arg2>"
+UNKNOWN_RULE_ATTRIBUTE(arg,arg2) ::=
+	"unknown attribute for rule <arg>: <arg2>"
+UNKNOWN_SIMPLE_ATTRIBUTE(arg,args2) ::=
+	"attribute is not a token, parameter, or return value: <arg>"
+ISOLATED_RULE_SCOPE(arg) ::=
+	"missing attribute access on rule scope: <arg>"
+INVALID_RULE_PARAMETER_REF(arg,arg2) ::=
+	"cannot access rule <arg>'s parameter: <arg2>"
+INVALID_RULE_SCOPE_ATTRIBUTE_REF(arg,arg2) ::=
+	"cannot access rule <arg>'s dynamically-scoped attribute: <arg2>"
+SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE(arg) ::=
+	"symbol <arg> conflicts with global dynamic scope with same name"
+WRITE_TO_READONLY_ATTR(arg,arg2,arg3) ::=
+	"cannot write to read only attribute: $<arg><if(arg2)>.<arg2><endif>"
+LABEL_CONFLICTS_WITH_RULE(arg) ::=
+	"label <arg> conflicts with rule with same name"
+LABEL_CONFLICTS_WITH_TOKEN(arg) ::=
+	"label <arg> conflicts with token with same name"
+LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE(arg,arg2) ::=
+	"label <arg> conflicts with rule <arg2>'s dynamically-scoped attribute with same name"
+LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
+	"label <arg> conflicts with rule <arg2>'s return value or parameter with same name"
+ATTRIBUTE_CONFLICTS_WITH_RULE(arg,arg2) ::=
+	"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with the rule name"
+ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
+	"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with<arg2>'s return value or parameter with same name"
+LABEL_TYPE_CONFLICT(arg,arg2) ::=
+	"label <arg> type mismatch with previous definition: <arg2>"
+ARG_RETVAL_CONFLICT(arg,arg2) ::=
+	"rule <arg2>'s argument <arg> conflicts a return value with same name"
+NONUNIQUE_REF(arg) ::=
+	"<arg> is a non-unique reference"
+FORWARD_ELEMENT_REF(arg) ::=
+	"illegal forward reference: <arg>"
+MISSING_RULE_ARGS(arg) ::=
+	"missing parameter(s) on rule reference: <arg>"
+RULE_HAS_NO_ARGS(arg) ::=
+	"rule <arg> has no defined parameters"
+ARGS_ON_TOKEN_REF(arg) ::=
+	"token reference <arg> may not have parameters"
+/*
+NONCHAR_RANGE() ::=
+	"range operator can only be used in the lexer"
+*/
+ILLEGAL_OPTION(arg) ::=
+	"illegal option <arg>"
+LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT(arg) ::=
+	"rule '+=' list labels are not allowed w/o output option: <arg>"
+UNDEFINED_TOKEN_REF_IN_REWRITE(arg) ::=
+  "reference to undefined token in rewrite rule: <arg>"
+REWRITE_ELEMENT_NOT_PRESENT_ON_LHS(arg) ::=
+  "reference to rewrite element <arg> without reference on left of ->"
+UNDEFINED_LABEL_REF_IN_REWRITE(arg) ::=
+  "reference to undefined label in rewrite rule: $<arg>"
+NO_GRAMMAR_START_RULE (arg) ::=
+  "grammar <arg>: no start rule (no rule can obviously be followed by EOF)"
+EMPTY_COMPLEMENT(arg) ::= <<
+<if(arg)>
+set complement ~<arg> is empty
+<else>
+set complement is empty
+<endif>
+>>
+UNKNOWN_DYNAMIC_SCOPE(arg) ::=
+  "unknown dynamic scope: <arg>"
+UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE(arg,arg2) ::=
+  "unknown dynamically-scoped attribute for scope <arg>: <arg2>"
+AMBIGUOUS_RULE_SCOPE(arg) ::=
+  "reference $<arg> is ambiguous since rule <arg> is referenced in the production and rule <arg> also has a dynamic scope"
+ISOLATED_RULE_ATTRIBUTE(arg) ::=
+  "reference to locally-defined rule scope attribute without rule name: <arg>"
+INVALID_ACTION_SCOPE(arg,arg2) ::=
+  "unknown or invalid action scope for <arg2> grammar: <arg>"
+ACTION_REDEFINITION(arg) ::=
+  "redefinition of <arg> action"
+DOUBLE_QUOTES_ILLEGAL(arg) ::=
+  "string literals must use single quotes (such as \'begin\'): <arg>"
+INVALID_TEMPLATE_ACTION(arg) ::=
+  "invalid StringTemplate % shorthand syntax: '<arg>'"
+MISSING_ATTRIBUTE_NAME() ::=
+  "missing attribute name on $ reference"
+ARG_INIT_VALUES_ILLEGAL(arg) ::=
+  "rule parameters may not have init values: <arg>"
+REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION(arg) ::=
+  "rule <arg> uses rewrite syntax or operator with no output option or lexer rule uses !"
+NO_RULES(arg) ::= "grammar file <arg> has no rules"
+MISSING_AST_TYPE_IN_TREE_GRAMMAR(arg) ::=
+  "tree grammar <arg> has no ASTLabelType option"
+REWRITE_FOR_MULTI_ELEMENT_ALT(arg) ::=
+  "with rewrite=true, alt <arg> not simple node or obvious tree element; text attribute for rule not guaranteed to be correct"
+RULE_INVALID_SET(arg) ::= 
+  "Cannot complement rule <arg>; not a simple set or element"
+
+// GRAMMAR WARNINGS
+
+GRAMMAR_NONDETERMINISM(input,conflictingAlts,paths,disabled) ::=
+<<
+<if(paths)>
+Decision can match input such as "<input>" using multiple alternatives:
+<paths:{  alt <it.alt> via NFA path <it.states; separator=","><\n>}>
+<else>
+Decision can match input such as "<input>" using multiple alternatives: <conflictingAlts; separator=", "><\n>
+<endif>
+<if(disabled)>
+As a result, alternative(s) <disabled; separator=","> were disabled for that input
+<endif>
+>>
+
+DANGLING_STATE(danglingAlts) ::= <<
+the decision cannot distinguish between alternative(s) <danglingAlts; separator=","> for at least one input sequence
+>>
+
+UNREACHABLE_ALTS(alts) ::= <<
+The following alternatives are unreachable: <alts; separator=","><\n>
+>>
+
+INSUFFICIENT_PREDICATES(alts) ::= <<
+The following alternatives are insufficiently covered with predicates: <alts; separator=","><\n>
+>>
+
+DUPLICATE_SET_ENTRY(arg) ::=
+	"duplicate token type <arg> when collapsing subrule into set"
+
+ANALYSIS_ABORTED(enclosingRule) ::= <<
+ANTLR could not analyze this decision in rule <enclosingRule>; often this is because of recursive rule references visible from the left edge of alternatives.  ANTLR will re-analyze the decision with a fixed lookahead of k=1.  Consider using "options {k=1;}" for that decision and possibly adding a syntactic predicate.
+>>
+
+RECURSION_OVERLOW(alt,input,targetRules,callSiteStates) ::= <<
+Alternative <alt>: after matching input such as <input> decision cannot predict what comes next due to recursion overflow <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule>};separator=", ">}; separator=" and ">
+>>
+
+LEFT_RECURSION(targetRules,alt,callSiteStates) ::= <<
+Alternative <alt> discovers infinite left-recursion <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule>};separator=", ">}; separator=" and ">
+>>
+
+UNREACHABLE_TOKENS(tokens) ::= <<
+The following token definitions are unreachable: <tokens; separator=",">
+>>
+
+TOKEN_NONDETERMINISM(input,conflictingTokens,paths,disabled) ::=
+<<
+<if(paths)>
+Decision can match input such as "<input>" using multiple alternatives:
+<paths:{  alt <it.alt> via NFA path <it.states; separator=","><\n>}>
+<else>
+Multiple token rules can match input such as "<input>": <conflictingTokens; separator=", "><\n>
+<endif>
+<if(disabled)>
+As a result, tokens(s) <disabled; separator=","> were disabled for that input
+<endif>
+>>
+
+LEFT_RECURSION_CYCLES(listOfCycles) ::= <<
+The following sets of rules are mutually left-recursive <listOfCycles:{c| [<c:{r|<r>}; separator=", ">]}; separator=" and ">
+>>
+
+NONREGULAR_DECISION(ruleName,alts) ::= <<
+[fatal] rule <ruleName> has non-LL(*) decision due to recursive rule invocations reachable from alts <alts; separator=",">.  Resolve by left-factoring or using syntactic predicates or using backtrack=true option.
+>>
+
+/* l10n for message levels */
+warning() ::= "warning"
+error() ::= "error"

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-java/antlr3.git



More information about the pkg-java-commits mailing list