[antlr3] 02/07: Imported Upstream version 3.2

Tony Mancill tmancill at moszumanska.debian.org
Sat Jul 11 18:38:41 UTC 2015


This is an automated email from the git hooks/post-receive script.

tmancill pushed a commit to branch master
in repository antlr3.

commit b4ef709ef80832af012650d878e0a89f457be1b6
Author: tony mancill <tmancill at debian.org>
Date:   Sat Jul 11 10:52:32 2015 -0700

    Imported Upstream version 3.2
---
 BUILD.txt                                          |  478 +++
 antlr3-maven-plugin/pom.xml                        |  358 ++
 .../java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java |   90 +
 .../java/org/antlr/mojo/antlr3/Antlr3Mojo.java     |  506 +++
 .../src/site/apt/examples/import.apt               |    8 +
 .../src/site/apt/examples/libraries.apt            |   47 +
 .../src/site/apt/examples/simple.apt               |   40 +
 antlr3-maven-plugin/src/site/apt/index.apt         |   63 +
 antlr3-maven-plugin/src/site/apt/usage.apt.vm      |  193 +
 antlr3-maven-plugin/src/site/site.xml              |   33 +
 antlrjar.xml                                       |  106 +
 antlrsources.xml                                   |  318 ++
 build.properties                                   |    8 -
 build.xml                                          |  227 --
 gunit-maven-plugin/pom.xml                         |  242 ++
 .../org/antlr/mojo/antlr3/GUnitExecuteMojo.java    |  410 ++
 gunit/CHANGES.txt                                  |   87 +
 LICENSE.txt => gunit/LICENSE.txt                   |    2 +-
 gunit/README.txt                                   |   56 +
 gunit/antlr.config                                 |    0
 gunit/pom.xml                                      |  168 +
 gunit/src/main/antlr3/org/antlr/gunit/gUnit.g      |  352 ++
 .../org/antlr/gunit/swingui/parsers/ANTLRv3.g      |  619 +++
 .../org/antlr/gunit/swingui/parsers/StGUnit.g      |  213 +
 .../main/java/org/antlr/gunit/AbstractTest.java    |   83 +
 .../src/main/java/org/antlr/gunit/BooleanTest.java |   49 +-
 .../src/main/java/org/antlr/gunit/GrammarInfo.java |   96 +
 .../src/main/java/org/antlr/gunit/ITestCase.java   |   46 +-
 .../src/main/java/org/antlr/gunit/ITestSuite.java  |   29 +-
 gunit/src/main/java/org/antlr/gunit/Interp.java    |   91 +
 .../org/antlr/gunit/InvalidInputException.java     |   15 +-
 .../main/java/org/antlr/gunit/JUnitCodeGen.java    |  325 ++
 .../src/main/java/org/antlr/gunit/OutputTest.java  |   58 +-
 .../src/main/java/org/antlr/gunit/ReturnTest.java  |   53 +-
 .../main/java/org/antlr/gunit/gUnitBaseTest.java   |  457 +++
 .../main/java/org/antlr/gunit/gUnitExecutor.java   |  629 +++
 .../main/java/org/antlr/gunit/gUnitTestInput.java  |   26 +-
 .../main/java/org/antlr/gunit/gUnitTestResult.java |   78 +-
 .../main/java/org/antlr/gunit/gUnitTestSuite.java  |   80 +
 .../antlr/gunit/swingui/AbstractInputEditor.java   |   30 +-
 .../java/org/antlr/gunit/swingui/IController.java  |   14 +-
 .../java/org/antlr/gunit/swingui/ImageFactory.java |   83 +
 .../antlr/gunit/swingui/RuleListController.java    |  160 +
 .../org/antlr/gunit/swingui/RunnerController.java  |  240 ++
 .../antlr/gunit/swingui/StatusBarController.java   |   93 +
 .../gunit/swingui/TestCaseEditController.java      |  633 +++
 .../main/java/org/antlr/gunit/swingui/Tool.java    |   45 +-
 .../antlr/gunit/swingui/WorkSpaceController.java   |  288 ++
 .../org/antlr/gunit/swingui/WorkSpaceView.java     |  103 +
 .../antlr/gunit/swingui/model/ITestCaseInput.java  |   13 +-
 .../antlr/gunit/swingui/model/ITestCaseOutput.java |   21 +-
 .../java/org/antlr/gunit/swingui/model/Rule.java   |   66 +-
 .../org/antlr/gunit/swingui/model/TestCase.java    |   70 +-
 .../gunit/swingui/model/TestCaseInputFile.java     |   43 +-
 .../swingui/model/TestCaseInputMultiString.java    |   36 +-
 .../gunit/swingui/model/TestCaseInputString.java   |   38 +-
 .../gunit/swingui/model/TestCaseOutputAST.java     |   36 +-
 .../gunit/swingui/model/TestCaseOutputResult.java  |   59 +-
 .../gunit/swingui/model/TestCaseOutputReturn.java  |   32 +-
 .../gunit/swingui/model/TestCaseOutputStdOut.java  |   32 +-
 .../org/antlr/gunit/swingui/model/TestSuite.java   |  100 +
 .../gunit/swingui/model/TestSuiteFactory.java      |  193 +
 .../gunit/swingui/runner/NotifiedTestExecuter.java |   73 +
 .../antlr/gunit/swingui/runner/ParserLoader.java   |  124 +
 .../gunit/swingui/runner/TestSuiteAdapter.java     |  104 +
 .../antlr/gunit/swingui/runner/gUnitAdapter.java   |   80 +
 .../resources/org/antlr/gunit/gUnitTestResult.stg  |   93 +-
 gunit/src/main/resources/org/antlr/gunit/junit.stg |   83 +
 .../resources/org/antlr/gunit/swingui/gunit.stg    |   19 +
 .../org/antlr/gunit/swingui/images/accept.png      |  Bin 0 -> 1516 bytes
 .../org/antlr/gunit/swingui/images/add.png         |  Bin 0 -> 1510 bytes
 .../org/antlr/gunit/swingui/images/addfile24.png   |  Bin 0 -> 1653 bytes
 .../org/antlr/gunit/swingui/images/delete24.png    |  Bin 0 -> 1468 bytes
 .../org/antlr/gunit/swingui/images/edit16.png      |  Bin 0 -> 775 bytes
 .../org/antlr/gunit/swingui/images/favb16.png      |  Bin 0 -> 842 bytes
 .../org/antlr/gunit/swingui/images/favb24.png      |  Bin 0 -> 1417 bytes
 .../org/antlr/gunit/swingui/images/file16.png      |  Bin 0 -> 831 bytes
 .../antlr/gunit/swingui/images/filesearch24.png    |  Bin 0 -> 1653 bytes
 .../org/antlr/gunit/swingui/images/floppy24.png    |  Bin 0 -> 1586 bytes
 .../org/antlr/gunit/swingui/images/folder24.png    |  Bin 0 -> 1345 bytes
 .../org/antlr/gunit/swingui/images/help24.png      |  Bin 0 -> 1471 bytes
 .../org/antlr/gunit/swingui/images/next24.png      |  Bin 0 -> 1507 bytes
 .../org/antlr/gunit/swingui/images/redo24.png      |  Bin 0 -> 1305 bytes
 .../org/antlr/gunit/swingui/images/refresh24.png   |  Bin 0 -> 1546 bytes
 .../org/antlr/gunit/swingui/images/runfail.png     |  Bin 0 -> 869 bytes
 .../org/antlr/gunit/swingui/images/runpass.png     |  Bin 0 -> 904 bytes
 .../org/antlr/gunit/swingui/images/saveas24.png    |  Bin 0 -> 1622 bytes
 .../org/antlr/gunit/swingui/images/testgroup.png   |  Bin 0 -> 808 bytes
 .../org/antlr/gunit/swingui/images/testgroupx.png  |  Bin 0 -> 777 bytes
 .../org/antlr/gunit/swingui/images/testsuite.png   |  Bin 0 -> 936 bytes
 .../org/antlr/gunit/swingui/images/textfile16.png  |  Bin 0 -> 859 bytes
 .../org/antlr/gunit/swingui/images/textfile24.png  |  Bin 0 -> 1493 bytes
 .../org/antlr/gunit/swingui/images/undo24.png      |  Bin 0 -> 1323 bytes
 .../org/antlr/gunit/swingui/images/windowb16.png   |  Bin 0 -> 948 bytes
 gunit/src/test/java/org/antlr/gunit/GunitTest.java |   38 +
 pom.xml                                            |  250 ++
 runtime/Java/antlr.config                          |    0
 runtime/Java/doxyfile                              |    2 +-
 runtime/Java/pom.xml                               |  119 +
 .../java}/org/antlr/runtime/ANTLRFileStream.java   |    2 +-
 .../java/org/antlr/runtime/ANTLRInputStream.java}  |   60 +-
 .../java}/org/antlr/runtime/ANTLRReaderStream.java |   29 +-
 .../java}/org/antlr/runtime/ANTLRStringStream.java |    9 +-
 .../java}/org/antlr/runtime/BaseRecognizer.java    |  508 +--
 .../{ => main/java}/org/antlr/runtime/BitSet.java  |    3 +-
 .../java}/org/antlr/runtime/CharStream.java        |    2 +-
 .../java}/org/antlr/runtime/CharStreamState.java   |    2 +-
 .../java}/org/antlr/runtime/ClassicToken.java      |   34 +
 .../java}/org/antlr/runtime/CommonToken.java       |   15 +-
 .../java}/org/antlr/runtime/CommonTokenStream.java |   11 +-
 .../src/{ => main/java}/org/antlr/runtime/DFA.java |   50 +-
 .../org/antlr/runtime/EarlyExitException.java      |    2 +-
 .../antlr/runtime/FailedPredicateException.java    |    2 +-
 .../java}/org/antlr/runtime/IntStream.java         |    8 +-
 .../{ => main/java}/org/antlr/runtime/Lexer.java   |  212 +-
 .../antlr/runtime/MismatchedNotSetException.java   |    2 +-
 .../antlr/runtime/MismatchedRangeException.java    |    5 +-
 .../org/antlr/runtime/MismatchedSetException.java  |    2 +-
 .../antlr/runtime/MismatchedTokenException.java    |    9 +-
 .../runtime/MismatchedTreeNodeException.java}      |   15 +-
 .../org/antlr/runtime/MissingTokenException.java}  |   25 +-
 .../org/antlr/runtime/NoViableAltException.java    |    9 +-
 .../{ => main/java}/org/antlr/runtime/Parser.java  |   39 +-
 .../org/antlr/runtime/ParserRuleReturnScope.java   |    7 +-
 .../org/antlr/runtime/RecognitionException.java    |    2 +-
 .../org/antlr/runtime/RecognizerSharedState.java   |  144 +
 .../java/org/antlr/runtime/RuleReturnScope.java}   |   25 +-
 .../java/org/antlr/runtime/SerializedGrammar.java  |  170 +
 .../{ => main/java}/org/antlr/runtime/Token.java   |   33 +-
 .../org/antlr/runtime/TokenRewriteStream.java      |  370 +-
 .../java}/org/antlr/runtime/TokenSource.java       |    7 +-
 .../java}/org/antlr/runtime/TokenStream.java       |    2 +-
 .../org/antlr/runtime/UnwantedTokenException.java} |   22 +-
 .../runtime/debug/BlankDebugEventListener.java     |    8 +-
 .../org/antlr/runtime/debug/DebugEventHub.java     |   44 +-
 .../antlr/runtime/debug/DebugEventListener.java    |   19 +-
 .../antlr/runtime/debug/DebugEventRepeater.java    |   34 +-
 .../antlr/runtime/debug/DebugEventSocketProxy.java |  108 +-
 .../java}/org/antlr/runtime/debug/DebugParser.java |   34 +-
 .../org/antlr/runtime/debug/DebugTokenStream.java  |    6 +-
 .../org/antlr/runtime/debug/DebugTreeAdaptor.java  |  100 +-
 .../antlr/runtime/debug/DebugTreeNodeStream.java   |   14 +-
 .../org/antlr/runtime/debug/DebugTreeParser.java   |   50 +-
 .../org/antlr/runtime/debug/ParseTreeBuilder.java  |   37 +-
 .../java}/org/antlr/runtime/debug/Profiler.java    |    6 +-
 .../debug/RemoteDebugEventSocketListener.java      |   58 +-
 .../runtime/debug/TraceDebugEventListener.java     |   27 +
 .../java}/org/antlr/runtime/debug/Tracer.java      |    2 +-
 .../java/org/antlr/runtime/misc/FastQueue.java     |   93 +
 .../main/java/org/antlr/runtime/misc/IntArray.java |   95 +-
 .../org/antlr/runtime/misc/LookaheadStream.java    |  163 +
 .../java}/org/antlr/runtime/misc/Stats.java        |   27 +
 .../main/java/org/antlr/runtime/tree/BaseTree.java |  349 ++
 .../org/antlr/runtime/tree/BaseTreeAdaptor.java    |  112 +-
 .../runtime/tree/BufferedTreeNodeStream.java}      |  214 +-
 .../org/antlr/runtime/tree/CommonErrorNode.java    |  108 +
 .../java}/org/antlr/runtime/tree/CommonTree.java   |   70 +-
 .../org/antlr/runtime/tree/CommonTreeAdaptor.java  |   93 +-
 .../antlr/runtime/tree/CommonTreeNodeStream.java   |  167 +
 .../org/antlr/runtime/tree/DOTTreeGenerator.java   |   41 +-
 .../java}/org/antlr/runtime/tree/ParseTree.java    |   42 +-
 .../runtime/tree/RewriteCardinalityException.java  |    2 +-
 .../runtime/tree/RewriteEarlyExitException.java    |    2 +-
 .../runtime/tree/RewriteEmptyStreamException.java  |    2 +-
 .../runtime/tree/RewriteRuleElementStream.java     |    4 +-
 .../antlr/runtime/tree/RewriteRuleNodeStream.java} |   31 +-
 .../runtime/tree/RewriteRuleSubtreeStream.java     |    2 +-
 .../antlr/runtime/tree/RewriteRuleTokenStream.java |   17 +-
 .../src/main/java/org/antlr/runtime/tree/Tree.java |  127 +
 .../java}/org/antlr/runtime/tree/TreeAdaptor.java  |   63 +-
 .../java/org/antlr/runtime/tree/TreeFilter.java    |  135 +
 .../java/org/antlr/runtime/tree/TreeIterator.java  |  131 +
 .../org/antlr/runtime/tree/TreeNodeStream.java     |   23 +-
 .../java}/org/antlr/runtime/tree/TreeParser.java   |   69 +-
 .../org/antlr/runtime/tree/TreePatternLexer.java   |    2 +-
 .../org/antlr/runtime/tree/TreePatternParser.java  |    4 +-
 .../java/org/antlr/runtime/tree/TreeRewriter.java  |  120 +
 .../antlr/runtime/tree/TreeRuleReturnScope.java    |    3 +-
 .../java/org/antlr/runtime/tree/TreeVisitor.java   |   42 +
 .../org/antlr/runtime/tree/TreeVisitorAction.java  |   19 +
 .../java}/org/antlr/runtime/tree/TreeWizard.java   |  153 +-
 .../src/org/antlr/runtime/ANTLRInputStream.java    |   43 -
 .../antlr/runtime/MismatchedTreeNodeException.java |   22 -
 .../src/org/antlr/runtime/RuleReturnScope.java     |   15 -
 .../Java/src/org/antlr/runtime/tree/BaseTree.java  |  193 -
 runtime/Java/src/org/antlr/runtime/tree/Tree.java  |   64 -
 .../runtime/tree/UnBufferedTreeNodeStream.java     |  561 ---
 src/org/antlr/Tool.java                            |  551 ---
 src/org/antlr/analysis/NFAConversionThread.java    |   38 -
 src/org/antlr/codegen/ANTLRTokenTypes.txt          |   95 -
 src/org/antlr/codegen/ActionTranslator.tokens      |   35 -
 src/org/antlr/codegen/ActionTranslatorLexer.java   | 3640 -----------------
 src/org/antlr/codegen/CTarget.java                 |  238 --
 src/org/antlr/codegen/CodeGenTreeWalker.java       | 3132 ---------------
 src/org/antlr/codegen/CodeGenTreeWalker.smap       | 2419 ------------
 .../antlr/codegen/CodeGenTreeWalkerTokenTypes.java |  135 -
 .../antlr/codegen/CodeGenTreeWalkerTokenTypes.txt  |   95 -
 src/org/antlr/codegen/templates/C/Dbg.stg          |  184 -
 src/org/antlr/codegen/templates/Python/AST.stg     |  478 ---
 src/org/antlr/misc/Barrier.java                    |   35 -
 src/org/antlr/misc/Interval.java                   |  137 -
 src/org/antlr/misc/MutableInteger.java             |   15 -
 src/org/antlr/test/BaseTest.java                   |  542 ---
 src/org/antlr/test/DebugTestRewriteAST.java        |    6 -
 src/org/antlr/test/ErrorQueue.java                 |   41 -
 src/org/antlr/test/TestCommonTreeNodeStream.java   |  203 -
 src/org/antlr/test/TestMessages.java               |   46 -
 src/org/antlr/test/TestTokenRewriteStream.java     |  462 ---
 .../antlr/test/TestUnBufferedTreeNodeStream.java   |  111 -
 src/org/antlr/tool/ANTLRLexer.java                 | 1794 ---------
 src/org/antlr/tool/ANTLRLexer.smap                 | 1203 ------
 src/org/antlr/tool/ANTLRParser.java                | 4172 --------------------
 src/org/antlr/tool/ANTLRParser.smap                | 2758 -------------
 src/org/antlr/tool/ANTLRTokenTypes.java            |  133 -
 src/org/antlr/tool/ANTLRTokenTypes.txt             |   95 -
 src/org/antlr/tool/ANTLRTreePrinter.java           | 2295 -----------
 src/org/antlr/tool/ANTLRTreePrinter.smap           | 1670 --------
 src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java |  129 -
 src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt  |   95 -
 src/org/antlr/tool/ANTLRv3.g                       |  745 ----
 src/org/antlr/tool/ActionAnalysis.tokens           |    5 -
 src/org/antlr/tool/ActionAnalysisLexer.java        |  400 --
 src/org/antlr/tool/AssignTokenTypesWalker.java     | 1949 ---------
 src/org/antlr/tool/AssignTokenTypesWalker.smap     | 1403 -------
 .../tool/AssignTokenTypesWalkerTokenTypes.java     |  133 -
 .../tool/AssignTokenTypesWalkerTokenTypes.txt      |   96 -
 src/org/antlr/tool/BuildDependencyGenerator.java   |  193 -
 src/org/antlr/tool/DefineGrammarItemsWalker.java   | 2995 --------------
 src/org/antlr/tool/DefineGrammarItemsWalker.smap   | 2248 -----------
 .../tool/DefineGrammarItemsWalkerTokenTypes.java   |  130 -
 .../tool/DefineGrammarItemsWalkerTokenTypes.txt    |   95 -
 src/org/antlr/tool/RandomPhrase.java               |  180 -
 src/org/antlr/tool/TreeToNFAConverter.java         | 2852 -------------
 src/org/antlr/tool/TreeToNFAConverter.smap         | 2084 ----------
 .../antlr/tool/TreeToNFAConverterTokenTypes.java   |  131 -
 .../antlr/tool/TreeToNFAConverterTokenTypes.txt    |   95 -
 README.txt => tool/CHANGES.txt                     | 1385 ++++++-
 LICENSE.txt => tool/LICENSE.txt                    |    2 +-
 tool/README.txt                                    |  123 +
 tool/antlr.config                                  |    0
 tool/pom.xml                                       |  106 +
 .../src/main/antlr2/org/antlr/grammar/v2}/antlr.g  |  353 +-
 .../antlr2/org/antlr/grammar/v2}/antlr.print.g     |   18 +-
 .../antlr2/org/antlr/grammar/v2}/assign.types.g    |  238 +-
 .../main/antlr2/org/antlr/grammar/v2}/buildnfa.g   |  123 +-
 .../main/antlr2/org/antlr/grammar/v2}/codegen.g    |  226 +-
 .../src/main/antlr2/org/antlr/grammar/v2}/define.g |   73 +-
 .../src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g |  625 +++
 .../main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g |  261 ++
 .../antlr3/org/antlr/grammar/v3}/ActionAnalysis.g  |   16 +-
 .../org/antlr/grammar/v3}/ActionTranslator.g       |   73 +-
 tool/src/main/java/org/antlr/Tool.java             | 1382 +++++++
 .../main/java/org/antlr/analysis/ActionLabel.java  |   30 +-
 .../AnalysisRecursionOverflowException.java        |   23 +-
 .../antlr/analysis/AnalysisTimeoutException.java   |   13 +-
 .../src/main/java}/org/antlr/analysis/DFA.java     |  325 +-
 .../java}/org/antlr/analysis/DFAOptimizer.java     |    2 +
 .../main/java}/org/antlr/analysis/DFAState.java    |  351 +-
 .../java}/org/antlr/analysis/DecisionProbe.java    |  294 +-
 .../main/java/org/antlr/analysis/LL1Analyzer.java  |  446 +++
 tool/src/main/java/org/antlr/analysis/LL1DFA.java  |  179 +
 .../src/main/java}/org/antlr/analysis/Label.java   |   88 +-
 .../java}/org/antlr/analysis/LookaheadSet.java     |   51 +-
 .../src/main/java}/org/antlr/analysis/NFA.java     |   28 +-
 .../java}/org/antlr/analysis/NFAConfiguration.java |   17 +-
 .../main/java}/org/antlr/analysis/NFAContext.java  |   17 +-
 .../org/antlr/analysis/NFAConversionThread.java    |  111 +-
 .../main/java}/org/antlr/analysis/NFAState.java    |   51 +-
 .../org/antlr/analysis/NFAToDFAConverter.java      |  584 ++-
 .../antlr/analysis/NonLLStarDecisionException.java |   19 +-
 .../java/org/antlr/analysis/PredicateLabel.java    |   85 +-
 .../org/antlr/analysis/RuleClosureTransition.java  |   29 +-
 .../java}/org/antlr/analysis/SemanticContext.java  |   36 +-
 .../src/main/java}/org/antlr/analysis/State.java   |    0
 .../java}/org/antlr/analysis/StateCluster.java     |    0
 .../main/java}/org/antlr/analysis/Transition.java  |   10 +-
 .../org/antlr/codegen/ACyclicDFACodeGenerator.java |   16 +-
 .../java/org/antlr/codegen/ActionScriptTarget.java |  134 +
 .../main/java}/org/antlr/codegen/CPPTarget.java    |    0
 .../main/java/org/antlr/codegen/CSharp2Target.java |  103 +-
 .../main/java}/org/antlr/codegen/CSharpTarget.java |   11 +
 tool/src/main/java/org/antlr/codegen/CTarget.java  |  329 ++
 .../java}/org/antlr/codegen/CodeGenerator.java     |  309 +-
 .../main/java/org/antlr/codegen/DelphiTarget.java  |  147 +
 .../java/org/antlr/codegen/JavaScriptTarget.java   |   47 +
 .../main/java}/org/antlr/codegen/JavaTarget.java   |    0
 .../main/java}/org/antlr/codegen/ObjCTarget.java   |    0
 .../main/java/org/antlr/codegen/Perl5Target.java   |   92 +
 .../main/java}/org/antlr/codegen/PythonTarget.java |    4 +-
 .../main/java}/org/antlr/codegen/RubyTarget.java   |    0
 .../src/main/java}/org/antlr/codegen/Target.java   |   78 +-
 .../src/main/java/org/antlr/misc/Barrier.java      |   61 +-
 .../src/main/java}/org/antlr/misc/BitSet.java      |   16 +-
 tool/src/main/java/org/antlr/misc/Graph.java       |  107 +
 .../main/java}/org/antlr/misc/IntArrayList.java    |    0
 .../src/main/java}/org/antlr/misc/IntSet.java      |    0
 tool/src/main/java/org/antlr/misc/Interval.java    |  142 +
 .../src/main/java}/org/antlr/misc/IntervalSet.java |  131 +-
 .../src/main/java/org/antlr/misc/MultiMap.java     |   25 +-
 .../main/java/org/antlr/misc/MutableInteger.java   |   24 +-
 .../main/java}/org/antlr/misc/OrderedHashSet.java  |   24 +-
 .../src/main/java}/org/antlr/misc/Utils.java       |    0
 .../java}/org/antlr/tool/ANTLRErrorListener.java   |    2 +-
 .../org/antlr/tool/AssignTokenTypesBehavior.java   |  311 ++
 .../src/main/java}/org/antlr/tool/Attribute.java   |    2 +-
 .../main/java}/org/antlr/tool/AttributeScope.java  |   26 +-
 .../org/antlr/tool/BuildDependencyGenerator.java   |  265 ++
 .../main/java/org/antlr/tool/CompositeGrammar.java |  519 +++
 .../java/org/antlr/tool/CompositeGrammarTree.java  |  155 +
 .../main/java}/org/antlr/tool/DOTGenerator.java    |  114 +-
 .../main/java}/org/antlr/tool/ErrorManager.java    |   89 +-
 .../main/java}/org/antlr/tool/FASerializer.java    |   18 +-
 .../src/main/java}/org/antlr/tool/Grammar.java     | 2026 +++++++---
 .../src/main/java}/org/antlr/tool/GrammarAST.java  |  134 +-
 .../antlr/tool/GrammarAnalysisAbortedMessage.java  |   12 +-
 .../antlr/tool/GrammarDanglingStateMessage.java    |   23 +-
 .../tool/GrammarInsufficientPredicatesMessage.java |   44 +-
 .../antlr/tool/GrammarNonDeterminismMessage.java   |   10 +-
 .../main/java}/org/antlr/tool/GrammarReport.java   |   16 +-
 .../main/java}/org/antlr/tool/GrammarSanity.java   |  111 +-
 .../org/antlr/tool/GrammarSemanticsMessage.java    |    2 +-
 .../java/org/antlr/tool/GrammarSerializerFoo.java  |  191 +
 .../main/java/org/antlr/tool/GrammarSpelunker.java |  254 ++
 .../java}/org/antlr/tool/GrammarSyntaxMessage.java |    2 +-
 .../antlr/tool/GrammarUnreachableAltsMessage.java  |    6 +-
 .../src/main/java}/org/antlr/tool/Interp.java      |   46 +-
 .../src/main/java}/org/antlr/tool/Interpreter.java |   88 +-
 .../org/antlr/tool/LeftRecursionCyclesMessage.java |    2 +-
 .../src/main/java}/org/antlr/tool/Message.java     |    2 +-
 .../src/main/java}/org/antlr/tool/NFAFactory.java  |  181 +-
 .../java}/org/antlr/tool/NameSpaceChecker.java     |   56 +-
 .../org/antlr/tool/NonRegularDecisionMessage.java  |    8 +-
 .../src/main/java/org/antlr/tool/RandomPhrase.java |  222 ++
 .../org/antlr/tool/RecursionOverflowMessage.java   |    2 +-
 .../src/main/java}/org/antlr/tool/Rule.java        |   94 +-
 .../main/java}/org/antlr/tool/RuleLabelScope.java  |    5 +-
 tool/src/main/java/org/antlr/tool/Strip.java       |  239 ++
 .../src/main/java}/org/antlr/tool/ToolMessage.java |    2 +-
 tool/src/main/java/org/antlr/tool/serialize.g      |  238 ++
 tool/src/main/resources/org/antlr/antlr.properties |    7 +
 .../org/antlr/codegen/templates/ANTLRCore.sti      |   25 +-
 .../antlr/codegen/templates/ActionScript/AST.stg   |  404 ++
 .../codegen/templates/ActionScript/ASTParser.stg   |  190 +
 .../templates/ActionScript/ASTTreeParser.stg       |  296 ++
 .../templates/ActionScript/ActionScript.stg        |  664 ++--
 .../org/antlr/codegen/templates/C/AST.stg          |  396 +-
 .../org/antlr/codegen/templates/C/ASTDbg.stg       |   23 +-
 .../org/antlr/codegen/templates/C/ASTParser.stg    |  206 +
 .../antlr/codegen/templates/C/ASTTreeParser.stg    |  327 ++
 .../resources}/org/antlr/codegen/templates/C/C.stg | 1075 +++--
 .../org/antlr/codegen/templates/C/Dbg.stg          |  240 ++
 .../org/antlr/codegen/templates/CPP}/CPP.stg       |    4 +-
 .../org/antlr/codegen/templates/CSharp/AST.stg     |  880 ++---
 .../org/antlr/codegen/templates/CSharp/ASTDbg.stg  |  141 +-
 .../antlr/codegen/templates/CSharp/ASTParser.stg   |  220 ++
 .../codegen/templates/CSharp/ASTTreeParser.stg     |  315 ++
 .../org/antlr/codegen/templates/CSharp/CSharp.stg  | 2824 ++++++-------
 .../org/antlr/codegen/templates/CSharp/Dbg.stg     |  492 ++-
 .../org/antlr/codegen/templates/CSharp/ST.stg      |  342 +-
 .../org/antlr/codegen/templates/CSharp2}/AST.stg   |  880 ++---
 .../antlr/codegen/templates/CSharp2}/ASTDbg.stg    |  141 +-
 .../antlr/codegen/templates/CSharp2/ASTParser.stg  |  220 ++
 .../codegen/templates/CSharp2/ASTTreeParser.stg    |  315 ++
 .../antlr/codegen/templates/CSharp2/CSharp2.stg    | 2812 ++++++-------
 .../org/antlr/codegen/templates/CSharp2}/Dbg.stg   |  492 ++-
 .../org/antlr/codegen/templates/CSharp2}/ST.stg    |   94 +-
 .../org/antlr/codegen/templates/CSharp3/AST.stg    |  433 ++
 .../org/antlr/codegen/templates/CSharp3/ASTDbg.stg |  100 +
 .../antlr/codegen/templates/CSharp3/ASTParser.stg  |  194 +
 .../codegen/templates/CSharp3/ASTTreeParser.stg    |  300 ++
 .../antlr/codegen/templates/CSharp3/CSharp3.stg    | 1509 +++++++
 .../org/antlr/codegen/templates/CSharp3/Dbg.stg    |  299 ++
 .../org/antlr/codegen/templates/CSharp3/ST.stg     |  167 +
 .../org/antlr/codegen/templates/Delphi/AST.stg     |  445 +++
 .../antlr/codegen/templates/Delphi/ASTParser.stg   |  220 ++
 .../codegen/templates/Delphi/ASTTreeParser.stg     |  307 ++
 .../org/antlr/codegen/templates/Delphi/Delphi.stg  | 1805 +++++++++
 .../org/antlr/codegen/templates/Java/AST.stg       |  260 +-
 .../org/antlr/codegen/templates/Java/ASTDbg.stg    |   40 +-
 .../org/antlr/codegen/templates/Java/ASTParser.stg |  190 +
 .../antlr/codegen/templates/Java/ASTTreeParser.stg |  296 ++
 .../org/antlr/codegen/templates/Java/Dbg.stg       |  117 +-
 .../org/antlr/codegen/templates/Java/Java.stg      |  344 +-
 .../org/antlr/codegen/templates/Java/ST.stg        |    8 +-
 .../org/antlr/codegen/templates/JavaScript/AST.stg |  391 ++
 .../codegen/templates/JavaScript/ASTParser.stg     |  161 +
 .../codegen/templates/JavaScript/ASTTreeParser.stg |  253 ++
 .../codegen/templates/JavaScript/JavaScript.stg    | 1326 +++++++
 .../org/antlr/codegen/templates/ObjC/AST.stg       |  248 +-
 .../org/antlr/codegen/templates/ObjC/ASTDbg.stg    |    0
 .../org/antlr/codegen/templates/ObjC/ASTParser.stg |  189 +
 .../antlr/codegen/templates/ObjC/ASTTreeParser.stg |  129 +
 .../org/antlr/codegen/templates/ObjC/Dbg.stg       |    0
 .../org/antlr/codegen/templates/ObjC/ObjC.stg      |  168 +-
 .../codegen/templates/Perl5/ASTTreeParser.stg      |  258 ++
 .../org/antlr/codegen/templates/Perl5/Perl5.stg    |  728 ++--
 .../org/antlr/codegen/templates/Python/AST.stg     |  458 +++
 .../org/antlr/codegen/templates/Python}/ASTDbg.stg |   52 +-
 .../antlr/codegen/templates/Python/ASTParser.stg   |  198 +
 .../codegen/templates/Python/ASTTreeParser.stg     |  312 ++
 .../org/antlr/codegen/templates/Python/Dbg.stg     |  317 ++
 .../org/antlr/codegen/templates/Python/Python.stg  |  534 ++-
 .../org/antlr/codegen/templates/Python}/ST.stg     |  148 +-
 .../org/antlr/codegen/templates/Ruby/Ruby.stg      |   27 +-
 .../resources}/org/antlr/tool/templates/depend.stg |    0
 .../org/antlr/tool/templates/dot/action-edge.st    |    0
 .../org/antlr/tool/templates/dot/decision-rank.st  |    0
 .../resources}/org/antlr/tool/templates/dot/dfa.st |    0
 .../org/antlr/tool/templates/dot/edge.st           |    0
 .../org/antlr/tool/templates/dot/epsilon-edge.st   |    0
 .../resources}/org/antlr/tool/templates/dot/nfa.st |    0
 .../org/antlr/tool/templates/dot/state.st          |    0
 .../org/antlr/tool/templates/dot/stopstate.st      |    0
 .../tool/templates/messages/formats/antlr.stg      |    0
 .../antlr/tool/templates/messages/formats/gnu.stg  |    0
 .../tool/templates/messages/formats/vs2005.stg     |    0
 .../antlr/tool/templates/messages/languages/en.stg |   67 +-
 tool/src/test/java/org/antlr/test/BaseTest.java    |  890 +++++
 .../java}/org/antlr/test/DebugTestAutoAST.java     |    0
 .../org/antlr/test/DebugTestCompositeGrammars.java |   11 +-
 .../java/org/antlr/test/DebugTestRewriteAST.java   |   12 +-
 .../src/test/java/org/antlr/test/ErrorQueue.java   |   47 +-
 .../java}/org/antlr/test/TestASTConstruction.java  |   74 +-
 .../test/java}/org/antlr/test/TestAttributes.java  | 1419 ++++---
 .../src/test/java}/org/antlr/test/TestAutoAST.java |  362 +-
 .../org/antlr/test/TestBufferedTreeNodeStream.java |   71 +
 .../org/antlr/test/TestCharDFAConversion.java      |  177 +-
 .../java/org/antlr/test/TestCompositeGrammars.java |  895 +++++
 .../java}/org/antlr/test/TestDFAConversion.java    |  804 +++-
 .../test/java}/org/antlr/test/TestDFAMatching.java |   29 +-
 .../test/java/org/antlr/test/TestFastQueue.java    |  131 +
 .../test/java/org/antlr/test/TestHeteroAST.java    |  517 +++
 .../org/antlr/test/TestInterpretedLexing.java      |   17 +-
 .../org/antlr/test/TestInterpretedParsing.java     |   15 +-
 .../test/java}/org/antlr/test/TestIntervalSet.java |   76 +-
 .../org/antlr/test/TestJavaCodeGeneration.java     |   35 +-
 .../src/test/java}/org/antlr/test/TestLexer.java   |   92 +-
 .../src/test/java/org/antlr/test/TestMessages.java |   78 +
 .../java}/org/antlr/test/TestNFAConstruction.java  |  737 ++--
 .../test/java}/org/antlr/test/TestRewriteAST.java  |  404 +-
 .../java}/org/antlr/test/TestRewriteTemplates.java |   40 +-
 .../test/TestSemanticPredicateEvaluation.java      |   35 +-
 .../org/antlr/test/TestSemanticPredicates.java     |  433 +-
 .../src/test/java}/org/antlr/test/TestSets.java    |   80 +-
 .../org/antlr/test/TestSymbolDefinitions.java      |  288 +-
 .../test/TestSyntacticPredicateEvaluation.java     |  105 +-
 .../test/java}/org/antlr/test/TestTemplates.java   |   78 +-
 .../org/antlr/test/TestTokenRewriteStream.java     |  797 ++++
 .../java/org/antlr/test/TestTopologicalSort.java   |  113 +
 .../org/antlr/test/TestTreeGrammarRewriteAST.java  | 1102 ++++++
 .../test/java/org/antlr/test/TestTreeIterator.java |  131 +
 .../java}/org/antlr/test/TestTreeNodeStream.java   |  114 +-
 .../test/java}/org/antlr/test/TestTreeParsing.java |  118 +-
 .../test/java}/org/antlr/test/TestTreeWizard.java  |   80 +-
 tool/src/test/java/org/antlr/test/TestTrees.java   |  409 ++
 455 files changed, 54895 insertions(+), 56729 deletions(-)

diff --git a/BUILD.txt b/BUILD.txt
new file mode 100644
index 0000000..f66bf6a
--- /dev/null
+++ b/BUILD.txt
@@ -0,0 +1,478 @@
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+============================================================================
+
+This file contains the build instructions for the ANTLR toolset as
+of version 3.1.3 and beyond.
+
+The ANTLR toolset must be built using the Maven build system as
+this build system updates the version numbers and controls the
+whole build process. However, if you just want the latest build
+and do not care to learn anything about Maven, then visit:
+
+   http://antlr.org/hudson
+
+And download the current complete jar from the Tool_Daily
+Hudson project (just follow the links for last successful build).
+At the time of writing, the link for the last successful
+snapshot build is:
+
+   http://antlr.org/hudson/job/ANTLR_Tool_Daily/lastSuccessfulBuild/org.antlr$antlr/
+
+If you are looking for the latest released version of ANTLR, then
+visit the downloads page on the main antlr.org website.
+
+These instructions are mainly for the ANTLR development team,
+though you are free to build ANTLR yourself of course.
+
+Source code Structure
+-----------------------
+
+The main development branch of ANTLR is stored within the Perforce SCM at:
+
+   //depot/code/antlr/main/...
+
+release branches are stored in Perforce like so:
+
+   //depot/code/antlr/release-3.1.3/...
+
+In this top level directory, you will find a master build file for Maven called pom.xml and
+you will also note that there are a number of subdirectories:
+
+ tool                  - The ANTLR tool itself
+ runtime/Java          - The ANTLR Java runtime
+ runtime/X             - The runtime for language target X
+ gunit                 - The grammar test tool
+ antlr3-maven-plugin   - The plugin tool for Maven that allows Maven projects to process
+                         ANTLR grammars.
+
+Each of these sub-directories also contains a file pom.xml that controls the build of each
+sub-component (or module in Maven parlance).
+
+Build Parameters
+-----------------
+
+Alongside each pom.xml (other than for the antlr3-maven-plugin), you will see that there
+is a file called antlr.config. This file is called a filter and should contain a set of key/value
+pairs in the same manner as Java properties files:
+
+antlr.something="Some config thang!"
+
+When the build of any component happens, any values in the antlr.config for the master
+build file and any values in the antlr.config file for each component are made available
+to the build. This is mainly used by the resource processor, which will filter any file it
+finds under: src/main/resources/** and replace any references such as ${antlr.something}
+with the actual value at the time of the build.
+
+Building
+--------
+
+Building ANTLR is trivial, assuming that you have loaded Maven version 2.0.9 or
+better on to your build system and installed it as explained here:
+
+http://maven.apache.org/download.html
+
+If you are unfamiliar with Maven (and even if you are), the best resource for learning
+about it is The Definitive Guide:
+
+http://www.sonatype.com/books/maven-book/reference/public-book.html
+
+The instructions here assume that Maven is installed and working correctly.
+
+If this is the first time you have built the ANTLR toolset, you will possibly
+need to install the master pom in your local repository (however the build
+may be able to locate this in the ANTLR snapshot or release repository). If you try
+to build sub-modules on their own (as in run the mvn command in the sub directory
+for that tool, such as runtime/Java), and you receive a message that
+maven cannot find the master pom, then execute this in the main (or release) 
+directory:
+
+mvn -N install
+
+This command will install the master build pom in your local maven repository
+(it's ~/.m2 on UNIX) and individual builds of sub-modules will now work correctly.
+
+To build then, simply cd into the master build directory (e.g. $P4ROOT//code/antlr/main)
+and type:
+
+mvn -Dmaven.test.skip=true
+
+Assuming that everything is correctly installed and synchronized, then ANTLR will build
+and skip any unit tests in the modules (the ANTLR tool tests can take a long time).
+
+This command will build each of the tools in the correct order and will create the jar
+artifacts of all the components in your local development Maven repository (which
+takes precedence over remote repositories by default). At the end of the build you
+should see:
+
+[INFO] ------------------------------------------------------------------------
+[INFO] Reactor Summary:
+[INFO] ------------------------------------------------------------------------
+[INFO] ANTLR Master build control POM ........................ SUCCESS [1.373s]
+[INFO] Antlr 3 Runtime ....................................... SUCCESS [0.879s]
+[INFO] ANTLR Grammar Tool .................................... SUCCESS [5.431s]
+[INFO] Maven plugin for ANTLR V3 ............................. SUCCESS [1.277s]
+[INFO] ANTLR gUnit ........................................... SUCCESS [1.566s]
+[INFO] Maven plugin for gUnit ANTLR V3 ....................... SUCCESS [0.079s]
+[INFO] ------------------------------------------------------------------------
+[INFO] ------------------------------------------------------------------------
+[INFO] BUILD SUCCESSFUL
+[INFO] ------------------------------------------------------------------------
+[INFO] Total time: 11 seconds
+
+However, unless you are using Maven exclusively in your projects, you will most
+likely want to build the ANTLR Uber Jar, which is an executable jar containing
+all the components that ANTLR needs to build and run parsers (note that at
+runtime, you need only the runtime components you use, such as the Java
+runtime and say stringtemplate).
+
+Because the Uber jar is not something we want to deploy to Maven repositories
+it is built with a special invocation of Maven:
+
+mvn -Dmaven.test.skip=true package assembly:assembly
+
+Note that Maven will appear to build everything twice, which is a quirk of how
+it calculates the dependencies and makes sure it has everything packaged up
+so it can build the uber-jar assembly.
+
+Somewhere in the build output (towards the end), you will find a line like this:
+
+[INFO] Building jar: /home/jimi/antlrsrc/code/antlr/main/target/antlr-master-3.1.3-SNAPSHOT-completejar.jar
+
+This is the executable jar that you need and you can either copy it somewhere or,
+like me, you can create this script (assuming UNIX) somewhere in your PATH:
+
+#! /bin/bash
+java -jar ~/antlrsrc/code/antlr/main/target/antlr-master-3.1.3-SNAPSHOT-completejar.jar $*
+
+Version Numbering
+-------------------
+
+The first and Golden rule is that any pom files stored under the main branch of the toolset
+should never be modified to contain a release version number. They should always contain
+a.b.c-SNAPSHOT (e.g. 3.1.3-SNAPSHOT). Only release branches should have their
+pom version numbers set to a release version. You can release as many SNAPSHOTS
+as you like, but only one release version. However, release versions may be updated
+with a patch level: 3.1.3-1, 3.1.3-2 and so on.
+
+Fortunately, Maven helps us with the version numbering in a number of ways. Firstly,
+the pom.xml files for the various modules do not specify a version of the
+artifacts themselves. They pick up their version number from the master build pom.
+However, there is a catch, because they need to know what version of the parent pom
+they inherit from and so they DO mention the version number. However, this does
+prevent accidentally releasing different versions of sub-modules than the master pom
+describes.
+
+Fortunately once again, Maven has a neat way of helping us with change the version.
+All you need do is check out all the pom.xml files from perforce, then modify the
+<version>a.b.c-SNAPSHOT</version> in the master pom. When the version number
+is correct in the master pom, you make sure your working directory is the location
+of the master pom and type:
+
+mvn versions:update-child-modules
+
+This command will then update the child pom.xml files to reflect the version number
+defined in the master pom.xml.
+
+There is unfortunately one last catch here though and that is that the antlr3-maven-plugin
+and the gunit-maven-plugin are not able to use the parent pom. The reason for 
+this is subtle but makes sense as doing so would create a circular dependency 
+between the ANTLR tool (which uses the plugin to build its own grammar files), 
+and the plugins (which uses the tool to build grammar files and gunit to test).
+
+This catch-22 situation means that the pom.xml file in the antlr3-maven-plugin directory
+and the one in the gunit-maven-plugin directory MUST be updated manually (or we 
+must write a script to do this).
+
+Finally, we need to remember that because the tool is dependent on the antlr3-maven-plugin
+and the plugin is itself dependent on the the tool, that we must manually update
+the versions of each that they reference. So, when we bump the version of the toolset
+to say 3.1.4-SNAPSHOT, we need to change the antlr3-maven-plugin pom.xml and the 
+gunit-maven-plugin pom.xml to reference that version of the antlr tool. The tool 
+itself is always built with the prior released version of the plugin, so when we 
+release we must change the main branch of the plugin to use the newly released 
+version of the plugin. This is covered in the release checklist.
+
+Deploying
+----------
+Deploying the tools at the current version is relatively easy, but to deploy to the
+ANTLR repositories (snapshot or release) you must have been granted access
+to the antlr.org server and supplied an ssh key. Few people will have this access of
+course.
+
+Assuming that you have ssh access to antlr.org, then you will need to do the following
+before deployment will authorize and work correctly (UNIX assumed here):
+
+$ eval `ssh-agent`
+Agent PID nnnnn
+$ ssh-add
+Enter passphrase for /home/you/.ssh/id_rsa:
+Identity added....
+
+Next, because we do not publish access information for antlr.org, you will need
+to configure the repository server names locally. You do this by creating (or
+adding to) the file:
+
+~/.m2/settings.xml
+
+Which should look like this:
+
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+    User-specific configuration for maven. Includes things that should not
+    be distributed with the pom.xml file, such as developer identity, along with
+    local settings, like proxy information. The default location for the
+    settings file is ~/.m2/settings.xml
+-->
+<settings xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+    <servers>
+        <server>
+            <id>antlr-snapshot</id>
+            <username>mavensync</username>
+            <passphrase>passphrase for your private key</passphrase>
+            <privateKey>/home/youruserlogin/.ssh/id_rsa</privateKey>
+        </server>
+        <server>
+            <id>antlr-repo</id>
+            <username>mavensync</username>
+            <passphrase>passphrase for your private key</passphrase>
+            <privateKey>/home/youruserlogin/.ssh/id_rsa</privateKey>
+        </server>
+    </servers>
+</settings>
+
+When this configuration is in place, you will be able to deploy the components,
+either individually or from the master directory:
+
+mvn -Dmaven.test.skip=true deploy
+
+You will then see lots of information about checking existing version information
+and so on, and the components will be deployed.
+
+Note that so long as the artifacts are versioned with a.b.c-SNAPSHOT then
+deployment will always be to the development snapshot directory. When the
+artifacts are versioned with a release version then deployment will be to the
+antlr.org release repository, which will then be mirrored around the world. It
+is important not to deploy a release until you have built and tested it to your
+satisfaction.
+
+Release Checklist
+------------------
+
+Here is the procedure to use to make a release of ANTLR. Note that we should
+really use the mvn release:release command, but the perforce plugin for Maven is
+not commercial quality and I want to rewrite it.
+
+For this checklist, let's assume that the current development version of ANTLR
+is 3.1.3-SNAPSHOT. This means that it will probably (but not necessarily)
+become release version 3.1.3 and that the development version will bump
+to 3.1.4-SNAPSHOT.
+
+0) Run a build of the main branch and check that it is builds and passes as many
+   tests as you want it to.
+
+1) First make a branch from main into the target release directory. Then submit
+   this to perforce. You could change versions numbers before submitting, but
+   doing that in separate stages will keep things sane;
+
+--- Use main development branch from here ---
+
+2) Before we deploy the release, we want to update the versions of the development
+   branch, so we don't deploy what is now the new release as an older snapshot (this
+   is not super important, but procedure is good right?).
+
+   Check out all the pom.xml files (and if you are using any antlr.config parameters
+   that must change, then do that too).
+
+3) Edit the master pom.xml in the main directory and change the version from
+   3.1.3-SNAPSHOT to 3.1.4-SNAPSHOT.
+
+4) Edit the pom.xml file for antlr3-maven-plugin under the main directory and
+   change the version from 3.1.3-SNAPSHOT to 3.1.4-SNAPSHOT. Do the same for the
+   pom.xml in the gunit-maven-plugin directory.
+
+5) Now (from the main directory), run the command:
+
+         mvn versions:update-child-modules
+
+      You should see:
+
+         [INFO] [versions:update-child-modules]
+         [INFO] Module: gunit
+         [INFO]   Parent is org.antlr:antlr-master:3.1.4-SNAPSHOT
+         [INFO] Module: runtime/Java
+         [INFO]   Parent is org.antlr:antlr-master:3.1.4-SNAPSHOT
+         [INFO] Module: tool
+         [INFO]   Parent is org.antlr:antlr-master:3.1.4-SNAPSHOT
+
+6) Run a build of the main branch:
+
+         mvn -Dmaven.test.skip=true
+
+       All should be good.
+
+7) Submit the pom changes of the main branch to perforce.
+
+8) Deploy the new snapshot as a placeholder for the next release. It
+   will go to the snapshot repository of course:
+
+	  mvn -N deploy
+          mvn -Dmaven.test.skip=true deploy
+
+9) You are now finished with the main development branch and should change
+   working directories to the release branch you made earlier.
+
+--- Use release branch from here ---
+
+10) Check out all the pom.xml files in the release branch (and if you are
+    using any antlr.config parameters that must change, then do that too).
+
+11) Edit the master pom.xml in the release-3.1.3 directory and change the version from
+    3.1.3-SNAPSHOT to 3.1.3.
+
+12) Edit the pom.xml file for antlr3-maven-plugin under the release-3.1.3 directory and
+    change the version from 3.1.3-SNAPSHOT to 3.1.3. Also change the version of
+    the tool that the this pom.xml references from 3.1.3-SNAPSHOT to 3.1.3 as we
+    are now releasing the plugin of course and it needs to reference the version
+    we are about to release. You will find this reference in the dependencies
+    section of the antlr3-maven-plugin pom.xml. Also change the version references
+    in the pom for gunit-maven-plugin.
+
+13)  Now (from the release-3.1.3 directory), run the command:
+
+           mvn versions:update-child-modules
+
+        You should see:
+
+	[INFO] [versions:update-child-modules]
+	[INFO] Module: gunit
+	[INFO]   Parent was org.antlr:antlr-master:3.1.3-SNAPSHOT,
+	       now org.antlr:antlr-master:3.1.3
+	[INFO] Module: runtime/Java
+	[INFO]   Parent was org.antlr:antlr-master:3.1.3-SNAPSHOT,
+	       now org.antlr:antlr-master:3.1.3
+	[INFO] Module: tool
+	[INFO]   Parent was org.antlr:antlr-master:3.1.3-SNAPSHOT,
+	       now org.antlr:antlr-master:3.1.3
+
+14)  Run a build of the release-3.1.3 branch:
+
+           mvn   # Note I am letting unit tests run here!
+
+        All should be good, or as good as it gets ;-)
+
+15)  Submit the pom changes of the release-3.1.3 branch to perforce.
+
+16)  Deploy the new release (this is it guys, make sure you are happy):
+
+	  mvn -N deploy
+          mvn -Dmaven.test.skip=true deploy
+
+        Note that we must skip the tests as Maven will not let you deploy releases
+        that fail any junit tests.
+
+17)  The final step is that we must update the main branch pom.xml for the
+     tool to reference the newly release version of the antlr3-maven-plugin. This is
+     because each release of ANTLR is built with the prior release of ANTLR, and
+     we have just released a new version. Edit the pom.xml for the tool (main/tool/pom.xml)
+     under the main (that's the MAIN branch, not the release branch) and find
+     the dependency reference to the antlr plugin. If you just released say
+     3.1.3, then the tool should now reference version 3.1.3 of the plugin. Having done
+     this, you should probably rebuild the main branch and let it run the junit
+     tests. Later, I will automate this dependency update as mvn can do this
+     for us.
+
+18)  Having deployed the release to maven, you will want to create the
+     uber jar for the new release, to make it downloadable from the
+     antlr.org website. This is a repeat of the earlier described step
+     to build the uber jar:
+
+       mvn =Dmaven.test.skip=true package assembly:assembly
+
+     MAven will produce the uber jar in the target directory:
+
+	antlr-master-3.1.3-completejar.jar
+
+     And this is the complete jar that can be downloaded from the web site. You
+     may wish to produce an md5 checksum to go with the jar:
+
+     md5sum target/antlr-master-3.1.3-completejar.jar
+     xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  target/antlr-master-3.1.4-SNAPSHOT-completejar.jar
+
+     The command you just ran will also produce a second jar:
+
+        antlr-master-3.1.3-src.jar
+
+     This is the source code for everythign you just deployed and can be unjarred
+     and built from scratch using the very procedures described here, which means
+     you will now be reading this BUILD.txt file for ever.
+
+19)  Reward anyone around you with good beer.
+
+
+Miscellany
+-----------
+
+It was a little tricky to get all the interdependencies correct because ANTLR builds
+itself using itself and the maven plugin references the ANTLR Tool as well. Hence
+the maven tool is not a child project of the master pom.xml file, even though it is
+built by it.
+
+An observant person will not that when the assembly:assembly phase is run, that
+it invokes the build of the ANTLR tool using the version of the Maven plugin that
+it has just built, and this results in the plugin using the version of ANTLR tool that
+it has just built. This is safe because everything will already be up to date and so
+we package up the version of the tool that we expect, but the Maven plugin we
+deploy will use the correct version of ANTLR, even though there is technically
+a circular dependency.
+
+The master pom.xml does give us a way to cause the build of the ANTLR tool
+to use itself to build itself. This is because in dependencyManagement in the
+master pom.xml, we can reference the current version of the Tool and the
+Maven plugin, even though in the pom.xml for the tool itself refers to the previous
+version of the plugin.
+
+What happens is that if we first cd into the tool and maven directories and build ANTLR, it will
+build itself with the prior version and this will deploy locally (.m2). We can then
+clean build from the master pom and when ANTLR asks for the prior version of the tool,
+the master pom.xml will override it and build with the interim versions we just built manually.
+
+However, strictly speaking, we need a third build where we rebuild the tool again with
+the version of the tool that was built with itself and not deploy the version that was
+built by the version of itself that was built by a prior version of itself. I decided that this
+was not particularly useful and complicates things too much. Building with a prior
+version of the tool is fine and if there was ever a need to, we could release twice
+in quick succession.
+
+I have occasionally seen the MAven reactor screw up (or perhaps it is the ANTLR tool) when
+building. If this happens you will see an ANTLR Panic - cannot find en.stg message. If this
+happens to you, then just rerun the build and it will eventually work.
+
+Jim Idle - March 2009
+
diff --git a/antlr3-maven-plugin/pom.xml b/antlr3-maven-plugin/pom.xml
new file mode 100644
index 0000000..e592246
--- /dev/null
+++ b/antlr3-maven-plugin/pom.xml
@@ -0,0 +1,358 @@
+<!--
+
+ [The "BSD licence"]
+
+ ANTLR        - Copyright (c) 2005-2008 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  -->
+
+
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <!-- Maven model we are inheriting from
+      -->
+    <modelVersion>4.0.0</modelVersion>
+
+    <!--
+
+     Now that teh ANTLR project has adopted Maven with a vengence,
+     all ANTLR tools will be grouped under org.antlr and will be
+     controlled by a project member.
+     -->
+    <groupId>org.antlr</groupId>
+
+
+    <!--
+
+     This is the ANTLR plugin for ANTLR version 3.1.3 and above. It might
+     have been best to change the name of the plugin as the 3.1.2 plugins
+     behave a little differently, however for the sake of one transitional
+     phase to a much better plugin, it was decided that the name should
+     remain the same.
+      -->
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <packaging>maven-plugin</packaging>
+
+    <!-- Note that as this plugin depends on teh ANTLR tool itself
+         we cannot use the paren pom to control the version number
+         and MUST update <version> in this pom manually!
+         -->
+    <version>3.2</version>
+    <name>Maven plugin for ANTLR V3</name>
+    <prerequisites>
+        <maven>2.0</maven>
+    </prerequisites>
+
+    <!--
+     Where does our actual project live on the interwebs.
+      -->
+    <url>http://antlr.org</url>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <description>
+
+This is the brand new, re-written from scratch plugin for ANTLR v3.
+
+Previous valiant efforts all suffered from being unable to modify the ANTLR Tool
+itself to provide support not just for Maven oriented things but any other tool
+that might wish to invoke ANTLR without resorting to the command line interface.
+
+Rather than try to shoe-horn new code into the existing Mojo (in fact I think that
+by incorporating a patch supplied by someone I ended up with tow versions of the
+Mojo, I elected to rewrite everything from scratch, including the documentation, so
+that we might end up with a perfect Mojo that can do everything that ANTLR v3 supports
+such as imported grammar processing, proper support for library directories and
+locating token files from generated sources, and so on.
+
+In the end I decided to also change the the ANTLR Tool.java code so that it
+would be the provider of all the things that a build tool needs, rather than
+delegating things to 5 different tools. So, things like dependencies, dependency
+sorting, option tracking, generating sources and so on are all folded back
+in to ANTLR's Tool.java code, where they belong, and they now provide a
+public interface to anyone that might want to interface with them.
+
+One other goal of this rewrite was to completely document the whole thing
+to death. Hence even this pom has more comments than funcitonal elements,
+in case I get run over by a bus or fall off a cliff while skiing.
+
+Jim Idle - March 2009
+
+    </description>
+
+    <developers>
+
+        <developer>
+            <name>Jim Idle</name>
+            <url>http://www.temporal-wave.com</url>
+            <roles>
+                <role>Originator, version 3.1.3</role>
+            </roles>
+        </developer>
+
+        <developer>
+            <name>Terence Parr</name>
+            <url>http://antlr.org/wiki/display/~admin/Home</url>
+            <roles>
+                <role>Project lead - ANTLR</role>
+            </roles>
+        </developer>
+
+        <developer>
+            <name>David Holroyd</name>
+            <url>http://david.holroyd.me.uk/</url>
+            <roles>
+                <role>Originator - prior version</role>
+            </roles>
+        </developer>
+
+        <developer>
+            <name>Kenny MacDermid</name>
+            <url>mailto:kenny "at" kmdconsulting.ca</url>
+            <roles>
+                <role>Contributor - prior versions</role>
+            </roles>
+        </developer>
+
+    </developers>
+
+    <!-- Where are the continuous integration details for this project then John?
+      -->
+    <ciManagement>
+        <system>hudson</system>
+        <url>http://antlr.org/hudson/job/Maven_Plugin/lastSuccessfulBuild/</url>
+        <notifiers>
+            <notifier>
+                <type>rss</type>
+                
+                <configuration>
+                    <url>http://antlr.org/hudson/job/Maven_Plugin/rssAll</url>
+                </configuration>
+            </notifier>
+        </notifiers>
+    </ciManagement>
+
+    <!-- Where do we track bugs for this project?
+      -->
+    <issueManagement>
+        <system>JIRA</system>
+        <url>http://antlr.org/jira/browse/ANTLR</url>
+    </issueManagement>
+
+    <!-- Location of the license description for this project
+      -->
+    <licenses>
+        <license>
+            <distribution>repo</distribution>
+            <name>The BSD License</name>
+            <url>http://www.antlr.org/LICENSE.txt </url>
+        </license>
+    </licenses>
+
+    <distributionManagement>
+
+        <repository>
+            <id>antlr-repo</id>
+            <name>ANTLR Testing repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-repo</url>
+        </repository>
+
+        <snapshotRepository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-snapshot</url>
+        </snapshotRepository>
+
+        <site>
+            <id>antlr-repo</id>
+            <name>ANTLR Maven Plugin Web Site</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-maven-webs/antlr3-maven-plugin</url>
+        </site>
+    </distributionManagement>
+
+    <!--
+
+    Inform Maven of the ANTLR snapshot repository, which it will
+    need to consult to get the latest snapshot build of the runtime and tool
+    if it was not built and installed locally.
+    -->
+    <repositories>
+
+      <!--
+        This is the ANTLR repository.
+        -->
+        <repository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>http://antlr.org/antlr-snapshot</url>
+            <snapshots>
+                <enabled>true</enabled>
+                <updatePolicy>always</updatePolicy>
+            </snapshots>
+
+            <releases>
+                <enabled>false</enabled>
+            </releases>
+            
+        </repository>
+
+    </repositories>
+    
+    <!-- Ancilliary information for completeness
+      -->
+    <inceptionYear>2009</inceptionYear>
+
+    <mailingLists>
+        <mailingList>
+            <archive>http://antlr.markmail.org/</archive>
+            <otherArchives>
+                <otherArchive>http://www.antlr.org/pipermail/antlr-interest/</otherArchive>
+            </otherArchives>
+            <name>ANTLR Users</name>
+            <subscribe>http://www.antlr.org/mailman/listinfo/antlr-interest/</subscribe>
+            <unsubscribe>http://www.antlr.org/mailman/options/antlr-interest/</unsubscribe>
+            <post>antlr-interest at antlr.org</post>
+        </mailingList>
+    </mailingLists>
+
+    <organization>
+        <name>ANTLR.org</name>
+        <url>http://www.antlr.org</url>
+    </organization>
+    <!-- ============================================================================= -->
+
+    <!--
+
+     What are we depedent on for the Mojos to execute? We need the
+     plugin API itself and of course we need the ANTLR Tool and runtime
+     and any of their dependencies, which we inherit. The Tool itself provides
+     us with all the dependencies, so we need only name it here.
+      -->
+    <dependencies>
+
+        <!--
+          The things we need to build the target language recognizer
+          -->
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-plugin-api</artifactId>
+            <version>2.0</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-project</artifactId>
+            <version>2.0</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.codehaus.plexus</groupId>
+            <artifactId>plexus-compiler-api</artifactId>
+            <version>1.5.3</version>
+        </dependency>
+
+        <!--
+         The version of ANTLR tool that this version of the plugin controls.
+         We have decided that this should be in lockstep with ANTLR itself, other
+         than -1 -2 -3 etc patch releases.
+          -->
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>3.2</version>
+        </dependency>
+
+        <!--
+          Testing requirements...
+          -->
+        <dependency>
+
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.5</version>
+            <scope>test</scope>
+
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven.shared</groupId>
+            <artifactId>maven-plugin-testing-harness</artifactId>
+            <version>1.0</version>
+            <scope>test</scope>
+        </dependency>
+        
+    </dependencies>
+    
+    <build>
+
+        <defaultGoal>install</defaultGoal>
+
+        <extensions>
+            <extension>
+                <groupId>org.apache.maven.wagon</groupId>
+                <artifactId>wagon-ssh-external</artifactId>
+                <version>1.0-beta-2</version>
+            </extension>
+        </extensions>
+
+        <plugins>
+
+            <plugin>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>2.0.2</version>
+                <configuration>
+                    <source>1.5</source>
+                    <target>jsr14</target>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-site-plugin</artifactId>
+                <version>2.0</version>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-project-info-reports-plugin</artifactId>
+                <version>2.1.1</version>
+                <configuration>
+                    <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
+                </configuration>
+            </plugin>
+            
+        </plugins>
+
+    </build>
+
+</project>
diff --git a/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java
new file mode 100644
index 0000000..bf2c3c6
--- /dev/null
+++ b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java
@@ -0,0 +1,90 @@
+/**
+ [The "BSD licence"]
+
+ ANTLR        - Copyright (c) 2005-2008 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.mojo.antlr3;
+
+import org.antlr.tool.ANTLRErrorListener;
+import org.antlr.tool.Message;
+import org.antlr.tool.ToolMessage;
+import org.apache.maven.plugin.logging.Log;
+
+/**
+ * The Maven plexus container gives us a Log logging provider
+ * which we can use to install an error listener for the ANTLR
+ * tool to report errors by.
+ */
+public class Antlr3ErrorLog implements ANTLRErrorListener {
+
+    private Log log;
+
+    /**
+     * Instantiate an ANTLR ErrorListner that communicates any messages
+     * it receives to the Maven error sink.
+     *
+     * @param log The Maven Error Log
+     */
+    public Antlr3ErrorLog(Log log) {
+        this.log = log;
+    }
+
+    /**
+     * Sends an informational message to the Maven log sink.
+     * @param s The message to send to Maven
+     */
+    public void info(String message) {
+        log.info(message);
+    }
+
+    /**
+     * Sends an error message from ANTLR analysis to the Maven Log sink.
+     *
+     * @param message The message to send to Maven.
+     */
+    public void error(Message message) {
+        log.error(message.toString());
+    }
+
+    /**
+     * Sends a warning message to the Maven log sink.
+     *
+     * @param message
+     */
+    public void warning(Message message) {
+        log.warn(message.toString());
+    }
+
+    /**
+     * Sends an error message from the ANTLR tool to the Maven Log sink.
+     * @param toolMessage
+     */
+    public void error(ToolMessage toolMessage) {
+        log.error(toolMessage.toString());
+    }
+}
diff --git a/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java
new file mode 100644
index 0000000..08b45ae
--- /dev/null
+++ b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java
@@ -0,0 +1,506 @@
+/**
+[The "BSD licence"]
+
+ANTLR        - Copyright (c) 2005-2008 Terence Parr
+Maven Plugin - Copyright (c) 2009      Jim Idle
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* ========================================================================
+ * This is the definitive ANTLR3 Mojo set. All other sets are belong to us.
+ */
+package org.antlr.mojo.antlr3;
+
+import antlr.RecognitionException;
+import antlr.TokenStreamException;
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugin.MojoFailureException;
+import org.apache.maven.project.MavenProject;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import org.antlr.Tool;
+import org.apache.maven.plugin.logging.Log;
+import org.codehaus.plexus.compiler.util.scan.InclusionScanException;
+import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
+import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
+
+/**
+ * Goal that picks up all the ANTLR grammars in a project and moves those that
+ * are required for generation of the compilable sources into the location
+ * that we use to compile them, such as target/generated-sources/antlr3 ...
+ *
+ * @goal antlr
+ * 
+ * @phase process-sources
+ * @requiresDependencyResolution compile
+ * @requiresProject true
+ * 
+ * @author <a href="mailto:jimi at temporal-wave.com">Jim Idle</a>
+ */
+public class Antlr3Mojo
+        extends AbstractMojo {
+
+    // First, let's deal with the options that the ANTLR tool itself
+    // can be configured by.
+    //
+    /**
+     * If set to true, then after the tool has processed an input grammar file
+     * it will report variaous statistics about the parser, such as information
+     * on cyclic DFAs, which rules may use backtracking, and so on.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean report;
+    /**
+     * If set to true, then the ANTLR tool will print a version of the input
+     * grammar which is devoid of any actions that may be present in the input file.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean printGrammar;
+    /**
+     * If set to true, then the code generated by the ANTLR code generator will
+     * be set to debug mode. This means that when run, the code will 'hang' and
+     * wait for a debug connection on a TCP port (49100 by default).
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean debug;
+    /**
+     * If set to true, then then the generated parser will compute and report on
+     * profile information at runtime.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean profile;
+    /**
+     * If set to true then the ANTLR tool will generate a description of the nfa
+     * for each rule in <a href="http://www.graphviz.org">Dot format</a>
+     * 
+     * @parameter default-value="false"
+     */
+    protected boolean nfa;
+    /**
+     * If set to true then the ANTLR tool will generate a description of the DFA
+     * for each decision in the grammar in <a href="http://www.graphviz.org">Dot format</a>
+     * 
+     * @parameter default-value="false"
+     */
+    protected boolean dfa;
+    /**
+     * If set to true, the generated parser code will log rule entry and exit points
+     * to stdout as an aid to debugging.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean trace;
+    /**
+     * If this parameter is set, it indicates that any warning or error messages returned
+     * by ANLTR, shoould be formatted in the specified way. Currently, ANTLR suports the
+     * built-in formats of antlr, gnu and vs2005.
+     *
+     * @parameter default-value="antlr"
+     */
+    protected String messageFormat;
+    /**
+     * If this parameter is set to true, then ANTLR will report all sorts of things
+     * about what it is doing such as the names of files and the version of ANTLR and so on.
+     *
+     * @parameter default-value="true"
+     */
+    protected boolean verbose;
+    /**
+     * The number of milliseconds ANTLR will wait for analysis of each
+     * alternative in the grammar to complete before giving up. You may raise
+     * this value if ANTLR gives up on a complicated alt and tells you that
+     * there are lots of ambiguties, but you know that it just needed to spend
+     * more time on it. Note that this is an absolute time and not CPU time.
+     *
+     * @parameter default-value="10000"
+     */
+    private int conversionTimeout;
+
+    /**
+     * The number of alts, beyond which ANTLR will not generate a switch statement
+     * for the DFA.
+     *
+     * @parameter default-value="300"
+     */
+    private int maxSwitchCaseLabels;
+
+    /**
+     * The number of alts, below which ANTLR will not choose to generate a switch
+     * statement over an if statement.
+     */
+    private int minSwitchAlts;
+
+    /* --------------------------------------------------------------------
+     * The following are Maven specific parameters, rather than specificlly
+     * options that the ANTLR tool can use.
+     */
+    /**
+     * Provides an explicit list of all the grammars that should
+     * be included in the generate phase of the plugin. Note that the plugin
+     * is smart enough to realize that imported grammars should be included but
+     * not acted upon directly by the ANTLR Tool.
+     *
+     * Unless otherwise specified, the include list scans for and includes all
+     * files that end in ".g" in any directory beneath src/main/antlr3. Note that
+     * this version of the plugin looks for the directory antlr3 and not the directory
+     * antlr, so as to avoid clashes and confusion for projects that use both v2 and v3 grammars
+     * such as ANTLR itself.
+     *
+     * @parameter
+     */
+    protected Set includes = new HashSet();
+    /**
+     * Provides an explicit list of any grammars that should be excluded from
+     * the generate phase of the plugin. Files listed here will not be sent for
+     * processing by the ANTLR tool.
+     *
+     * @parameter 
+     */
+    protected Set excludes = new HashSet();
+    /**
+     * @parameter expression="${project}"
+     * @required
+     * @readonly
+     */
+    protected MavenProject project;
+    /**
+     * Specifies the Antlr directory containing grammar files. For
+     * antlr version 3.x we default this to a directory in the tree
+     * called antlr3 because the antlr directory is occupied by version
+     * 2.x grammars.
+     *
+     * @parameter default-value="${basedir}/src/main/antlr3"
+     * @required
+     */
+    private File sourceDirectory;
+    /**
+     * Location for generated Java files. For antlr version 3.x we default
+     * this to a directory in the tree called antlr3 because the antlr
+     * directory is occupied by version 2.x grammars.
+     *
+     * @parameter default-value="${project.build.directory}/generated-sources/antlr3"
+     * @required
+     */
+    private File outputDirectory;
+    /**
+     * Location for imported token files, e.g. <code>.tokens</code> and imported grammars.
+     * Note that ANTLR will not try to process grammars that it finds to be imported
+     * into other grammars (in the same processing session).
+     *
+     * @parameter default-value="${basedir}/src/main/antlr3/imports"
+     */
+    private File libDirectory;
+
+    public File getSourceDirectory() {
+        return sourceDirectory;
+    }
+
+    public File getOutputDirectory() {
+        return outputDirectory;
+    }
+
+    public File getLibDirectory() {
+        return libDirectory;
+    }
+
+    void addSourceRoot(File outputDir) {
+        project.addCompileSourceRoot(outputDir.getPath());
+    }
+    /**
+     * An instance of the ANTLR tool build
+     */
+    protected Tool tool;
+
+    /**
+     * The main entry point for this Mojo, it is responsible for converting
+     * ANTLR 3.x grammars into the target language specified by the grammar.
+     * 
+     * @throws org.apache.maven.plugin.MojoExecutionException When something is disvocered such as a missing source
+     * @throws org.apache.maven.plugin.MojoFailureException When something really bad happesn such as not being able to create the ANTLR Tool
+     */
+    public void execute()
+            throws MojoExecutionException, MojoFailureException {
+
+        Log log = getLog();
+
+        // Check to see if the user asked for debug information, then dump all the
+        // parameters we have picked up if they did.
+        //
+        if (log.isDebugEnabled()) {
+
+            // Excludes
+            //
+            for (String e : (Set<String>) excludes) {
+
+                log.debug("ANTLR: Exclude: " + e);
+            }
+
+            // Includes
+            //
+            for (String e : (Set<String>) includes) {
+
+                log.debug("ANTLR: Include: " + e);
+            }
+
+            // Output location
+            //
+            log.debug("ANTLR: Output: " + outputDirectory);
+
+            // Library directory
+            //
+            log.debug("ANTLR: Library: " + libDirectory);
+
+            // Flags
+            //
+            log.debug("ANTLR: report              : " + report);
+            log.debug("ANTLR: printGrammar        : " + printGrammar);
+            log.debug("ANTLR: debug               : " + debug);
+            log.debug("ANTLR: profile             : " + profile);
+            log.debug("ANTLR: nfa                 : " + nfa);
+            log.debug("ANTLR: dfa                 : " + dfa);
+            log.debug("ANTLR: trace               : " + trace);
+            log.debug("ANTLR: messageFormat       : " + messageFormat);
+            log.debug("ANTLR: conversionTimeout   : " + conversionTimeout);
+            log.debug("ANTLR: maxSwitchCaseLabels : " + maxSwitchCaseLabels);
+            log.debug("ANTLR: minSwitchAlts       : " + minSwitchAlts);
+            log.debug("ANTLR: verbose             : " + verbose);
+        }
+
+        // Ensure that the output directory path is all in tact so that
+        // ANTLR can just write into it.
+        //
+        File outputDir = getOutputDirectory();
+
+        if (!outputDir.exists()) {
+            outputDir.mkdirs();
+        }
+
+        // First thing we need is an instance of the ANTLR 3.1 build tool
+        //
+        try {
+            // ANTLR Tool buld interface
+            //
+            tool = new Tool();
+        } catch (Exception e) {
+            log.error("The attempt to create the ANTLR build tool failed, see exception report for details");
+
+            throw new MojoFailureException("Jim failed you!");
+        }
+
+        // Next we need to set the options given to us in the pom into the
+        // tool instance we have created.
+        //
+        tool.setConversionTimeout(conversionTimeout);
+        tool.setDebug(debug);
+        tool.setGenerate_DFA_dot(dfa);
+        tool.setGenerate_NFA_dot(nfa);
+        tool.setProfile(profile);
+        tool.setReport(report);
+        tool.setPrintGrammar(printGrammar);
+        tool.setTrace(trace);
+        tool.setVerbose(verbose);
+        tool.setMessageFormat(messageFormat);
+        tool.setMaxSwitchCaseLabels(maxSwitchCaseLabels);
+        tool.setMinSwitchAlts(minSwitchAlts);
+
+        // Where do we want ANTLR to produce its output? (Base directory)
+        //
+        if (log.isDebugEnabled())
+        {
+            log.debug("Output directory base will be " + outputDirectory.getAbsolutePath());
+        }
+        tool.setOutputDirectory(outputDirectory.getAbsolutePath());
+
+        // Tell ANTLR that we always want the output files to be produced in the output directory
+        // using the same relative path as the input file was to the input directory.
+        //
+        tool.setForceRelativeOutput(true);
+
+        // Where do we want ANTLR to look for .tokens and import grammars?
+        //
+        tool.setLibDirectory(libDirectory.getAbsolutePath());
+
+        if (!sourceDirectory.exists()) {
+            if (log.isInfoEnabled()) {
+                log.info("No ANTLR grammars to compile in " + sourceDirectory.getAbsolutePath());
+            }
+            return;
+        } else {
+            if (log.isInfoEnabled()) {
+                log.info("ANTLR: Processing source directory " + sourceDirectory.getAbsolutePath());
+            }
+        }
+
+        // Set working directory for ANTLR to be the base source directory
+        //
+        tool.setInputDirectory(sourceDirectory.getAbsolutePath());
+
+        try {
+
+            // Now pick up all the files and process them with the Tool
+            //
+            processGrammarFiles(sourceDirectory, outputDirectory);
+
+        } catch (InclusionScanException ie) {
+
+            log.error(ie);
+            throw new MojoExecutionException("Fatal error occured while evaluating the names of the grammar files to analyze");
+
+        } catch (Exception e) {
+
+            getLog().error(e);
+            throw new MojoExecutionException(e.getMessage());
+        }
+
+
+
+        tool.process();
+
+        // If any of the grammar files caused errors but did nto throw exceptions
+        // then we should have accumulated errors in the counts
+        //
+        if (tool.getNumErrors() > 0) {
+            throw new MojoExecutionException("ANTLR caught " + tool.getNumErrors() + " build errors.");
+        }
+
+        // All looks good, so we need to tel Maven about the sources that
+        // we just created.
+        //
+        if (project != null) {
+            // Tell Maven that there are some new source files underneath
+            // the output directory.
+            //
+            addSourceRoot(this.getOutputDirectory());
+        }
+
+    }
+
+
+    /**
+     *
+     * @param sourceDirectory
+     * @param outputDirectory
+     * @throws antlr.TokenStreamException
+     * @throws antlr.RecognitionException
+     * @throws java.io.IOException
+     * @throws org.codehaus.plexus.compiler.util.scan.InclusionScanException
+     */
+    private void processGrammarFiles(File sourceDirectory, File outputDirectory)
+            throws TokenStreamException, RecognitionException, IOException, InclusionScanException {
+        // Which files under the source set should we be looking for as grammar files
+        //
+        SourceMapping mapping = new SuffixMapping("g", Collections.EMPTY_SET);
+
+        // What are the sets of includes (defaulted or otherwise).
+        //
+        Set includes = getIncludesPatterns();
+
+        // Now, to the excludes, we need to add the imports directory
+        // as this is autoscanned for importd grammars and so is auto-excluded from the
+        // set of gramamr fiels we shuold be analyzing.
+        //
+        excludes.add("imports/**");
+
+        SourceInclusionScanner scan = new SimpleSourceInclusionScanner(includes, excludes);
+
+        scan.addSourceMapping(mapping);
+        Set grammarFiles = scan.getIncludedSources(sourceDirectory, null);
+
+        if (grammarFiles.isEmpty()) {
+            if (getLog().isInfoEnabled()) {
+                getLog().info("No grammars to process");
+            }
+        } else {
+
+            // Tell the ANTLR tool that we want sorted build mode
+            //
+            tool.setMake(true);
+            
+            // Iterate each grammar file we were given and add it into the tool's list of
+            // grammars to process.
+            //
+            for (File grammar : (Set<File>) grammarFiles) {
+
+                if (getLog().isDebugEnabled()) {
+                    getLog().debug("Grammar file '" + grammar.getPath() + "' detected.");
+                }
+
+
+                String relPath = findSourceSubdir(sourceDirectory, grammar.getPath()) + grammar.getName();
+
+                if (getLog().isDebugEnabled()) {
+                    getLog().debug("  ... relative path is: " + relPath);
+                }
+                tool.addGrammarFile(relPath);
+
+            }
+
+        }
+
+
+    }
+
+    public Set getIncludesPatterns() {
+        if (includes == null || includes.isEmpty()) {
+            return Collections.singleton("**/*.g");
+        }
+        return includes;
+    }
+
+    /**
+     * Given the source directory File object and the full PATH to a
+     * grammar, produce the path to the named grammar file in relative
+     * terms to the sourceDirectory. This will then allow ANTLR to
+     * produce output relative to the base of the output directory and
+     * reflect the input organization of the grammar files.
+     *
+     * @param sourceDirectory The source directory File object
+     * @param grammarFileName The full path to the input grammar file
+     * @return The path to the grammar file relative to the source directory
+     */
+    private String findSourceSubdir(File sourceDirectory, String grammarFileName) {
+        String srcPath = sourceDirectory.getPath() + File.separator;
+
+        if (!grammarFileName.startsWith(srcPath)) {
+            throw new IllegalArgumentException("expected " + grammarFileName + " to be prefixed with " + sourceDirectory);
+        }
+
+        File unprefixedGrammarFileName = new File(grammarFileName.substring(srcPath.length()));
+
+        return unprefixedGrammarFileName.getParent() + File.separator;
+    }
+}
diff --git a/antlr3-maven-plugin/src/site/apt/examples/import.apt b/antlr3-maven-plugin/src/site/apt/examples/import.apt
new file mode 100644
index 0000000..06a49f1
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/examples/import.apt
@@ -0,0 +1,8 @@
+Imported Grammar Files
+
+ In order to have the ANTLR plugin automatically locate and use grammars used
+ as imports in your main .g files, you need to place the imported grammar
+ files in the imports directory beneath the root directory of your grammar
+ files (which is <<<src/main/antlr3>>> by default of course).
+
+ For a default layout, place your import grammars in the directory: <<<src/main/antlr3/imports>>>
diff --git a/antlr3-maven-plugin/src/site/apt/examples/libraries.apt b/antlr3-maven-plugin/src/site/apt/examples/libraries.apt
new file mode 100644
index 0000000..73ce796
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/examples/libraries.apt
@@ -0,0 +1,47 @@
+Libraries
+
+ The introduction of the import directive in a grammar allows reuse of common grammar files
+ as well as the ability to divide up functional components of large grammars. However it has
+ caused some confusion in regard to the fact that generated vocab files (<<<xxx.tokens>>>) can also
+ be searched for with the <<<<libDirectory>>>> directive.
+
+ This has confused two separate functions and imposes a structure upon the layout of
+ your grammar files in certain cases. If you have grammars that both use the import
+ directive and also require the use of a vocab file then you will need to locate
+ the grammar that generates the .tokens file alongside the grammar that uses it. This
+ is because you will need to use the <<<<libDirectory>>>> directive to specify the
+ location of your imported grammars and ANTLR will not find any vocab files in
+ this directory.
+
+ The .tokens files for any grammars are generated within the same output directory structure
+ as the .java files. So, whereever the .java files are generated, you will also find the .tokens
+ files. ANTLR looks for .tokens files in both the <<<<libDirectory>>>> and the output directory
+ where it is placing the geenrated .java files. Hence when you locate the grammars that generate
+ .tokens files in the same source directory as the ones that use the .tokens files, then
+ the Maven plugin will find the expected .tokens files.
+
+ The <<<<libDirectory>>>> is specified like any other directory parameter in Maven. Here is an
+ example:
+
++--
+<plugin>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <version>3.1.3-1</version>
+
+    <executions>
+
+        <execution>
+            <configuration>
+                <goals>
+                    <goal>antlr</goal>
+                </goals>
+                <libDirectory>src/main/antlr_imports</libDirectory>
+            </configuration>
+        </execution>
+    </executions>
+</plugin>
++--
+
+
+
diff --git a/antlr3-maven-plugin/src/site/apt/examples/simple.apt b/antlr3-maven-plugin/src/site/apt/examples/simple.apt
new file mode 100644
index 0000000..3e36e84
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/examples/simple.apt
@@ -0,0 +1,40 @@
+Simple configuration
+
+ If your grammar files are organized into the default locations as described in the {{{../index.html}introduction}},
+ then configuring the pom.xml file for your project is as simple as adding this to it
+
++--
+<plugins>
+<plugin>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <version>3.1.3-1</version>
+    <executions>
+        <execution>
+            <goals>
+                <goal>antlr</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+...
+</plugins>
++--
+
+ When the mvn command is executed all grammar files under <<<src/main/antlr3>>>, except any
+ import grammars under <<<src/main/antlr3/imports>>> will be analyzed and converted to
+ java source code in the output directory <<<target/generated-sources/antlr3>>>.
+
+ Your input files under <<<antlr3>>> should be stored in sub directories that
+ reflect the package structure of your java parsers. If your grammar file parser.g contains:
+
++---
+ at header {
+package org.jimi.themuss;
+}
++---
+
+ Then the .g file should be stored in: <<<src/main/antlr3/org/jimi/themuss/parser.g>>>. THis way
+ the generated .java files will correctly reflect the package structure in which they will
+ finally rest as classes.
+
diff --git a/antlr3-maven-plugin/src/site/apt/index.apt b/antlr3-maven-plugin/src/site/apt/index.apt
new file mode 100644
index 0000000..2b2495a
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/index.apt
@@ -0,0 +1,63 @@
+         -------------
+         ANTLR v3 Maven Plugin
+         -------------
+         Jim Idle
+         -------------
+         March 2009
+         -------------
+
+ANTLR v3 Maven plugin
+
+ The ANTLR v3 Maven plugin is completely re-written as of version 3.1.3; if you are familiar
+ with prior versions, you should note that there are some behavioral differences that make
+ it worthwhile reading this documentation. 
+
+ The job of the plugin is essentially to tell the standard ANTLR parser generator where the
+ input grammar files are and where the output files should be generated. As with all Maven
+ plugins, there are defaults, which you are advised to comply to, but are not forced to
+ comply to.
+
+ This version of the plugin allows full control over ANTLR and allows configuration of all
+ options that are useful for a build system. The code required to calculate dependencies,
+ check the build order, and otherwise work with your grammar files is built into the ANTLR
+ tool as of version 3.1.3 of ANTLR and this plugin.
+
+* Plugin Versioning
+
+ The plugin version tracks the version of the ANTLR tool that it controls. Hence if you
+ use version 3.1.3 of the plugin, you will build your grammars using version 3.1.3 of the
+ ANTLR tool, version 3.2 of the plugin will use version 3.2 of the ANTLR tool and so on.
+
+ You may also find that there are patch versions of the plugin suchas 3.1.3-1 3.1.3-2 and
+ so on. Use the latest patch release of the plugin.
+
+ The current version of the plugin is shown at the top of this page after the <<Last Deployed>> date.
+ 
+
+* Default directories
+
+ As with all Maven plugins, this plugin will automatically default to standard locations
+ for your grammar and import files. Organizing your source code to reflect this standard
+ layout will greatly reduce the configuration effort required. The standard layout lookd
+ like this:
+
++--
+ src/main/
+      |
+      +--- antlr3/... .g files organized in the required package structure
+             |
+             +--- imports/  .g files that are imported by other grammars.
++--
+
+ If your grammar is intended to be part of a package called org.foo.bar then you would
+ place it in the directory <<<src/main/antlr3/org/foo/bar>>>. The plugin will then produce
+ .java and .tokens files in the output directory <<<target/generated-sources/antlr3/org/foo/bar>>>
+ When the Java files are compiled they will be in the correct location for the javac
+ compiler without any special configuration. The generated java files are automatically
+ submitted for compilation by the plugin.
+
+ The <<<src/main/antlr3/imports>>> directory is treated in a special way. It should contain
+ any grammar files that are imported by other grammar files (do not make subdirectories here.)
+ Such files are never built on their own, but the plugin will automatically tell the ANTLR
+ tool to look in this directory for library files.
+
diff --git a/antlr3-maven-plugin/src/site/apt/usage.apt.vm b/antlr3-maven-plugin/src/site/apt/usage.apt.vm
new file mode 100644
index 0000000..9b7ad0f
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/usage.apt.vm
@@ -0,0 +1,193 @@
+Usage
+
+ The Maven plugin for antlr is simple to use but is at its simplest when you use the default
+ layouts for your grammars, as so:
+
++--
+ src/main/
+      |
+      +--- antlr3/... .g files organized in the required package structure
+             |
+             +--- imports/  .g files that are imported by other grammars.
++--
+
+ However, if you are not able to use this structure for whatever reason, you
+ can configure the locations of the grammar files, where library/import files
+ are located and where the output files should be generated.
+
+* Plugin Descriptor
+
+ The current version of the plugin is shown at the top of this page after the <<Last Deployed>> date.
+
+ The full layout of the descriptor (at least, those parts that are not standard Maven things),
+ showing the default values of the configuration options, is as follows:
+
++--
+<plugin>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <version>3.1.3-1</version>
+
+    <executions>
+        
+        <execution>
+            <configuration>
+                <goals>
+                    <goal>antlr</goal>
+                </goals>
+                <conversionTimeout>10000</conversionTimeout>
+                <debug>false</debug>
+                <dfa>false</dfa>
+                <nfa>false</nfa>
+                <excludes><exclude/></excludes>
+                <includes><include/></includes>
+                <libDirectory>src/main/antlr3/imports</libDirectory>
+                <messageFormat>antlr</messageFormat>
+                <outputDirectory>target/generated-sources/antlr3</outputDirectory>
+                <printGrammar>false</printGrammar>
+                <profile>false</profile>
+                <report>false</report>
+                <sourceDirectory>src/main/antlr3</sourceDirectory>
+                <trace>false</trace>
+                <verbose>true</verbose>
+            </configuration>
+        </execution>
+    </executions>
+
+</plugin>
++--
+
+ Note that you can create multiple executions, and thus build some grammars with different
+ options to others (such as setting the debug option for instance).
+
+** Configuration parameters
+
+*** report
+
+    If set to true, then after the tool has processed an input grammar file
+    it will report variaous statistics about the parser, such as information
+    on cyclic DFAs, which rules may use backtracking, and so on.
+
+    default-value="false"
+
+*** printGrammar
+
+    If set to true, then the ANTLR tool will print a version of the input
+    grammar which is devoid of any actions that may be present in the input file.
+
+    default-value = "false"
+
+*** debug
+
+     If set to true, then the code generated by the ANTLR code generator will
+     be set to debug mode. This means that when run, the code will 'hang' and
+     wait for a debug connection on a TCP port (49100 by default).
+     
+     default-value="false"
+     
+*** profile
+
+     If set to true, then then the generated parser will compute and report on
+     profile information at runtime.
+     
+     default-value="false"
+     
+*** nfa
+
+     If set to true then the ANTLR tool will generate a description of the nfa
+     for each rule in <a href="http://www.graphviz.org">Dot format</a>
+     
+     default-value="false"
+     
+    protected boolean nfa;
+    
+*** dfa
+
+     If set to true then the ANTLR tool will generate a description of the DFA
+     for each decision in the grammar in <a href="http://www.graphviz.org">Dot format</a>
+     
+     default-value="false"
+     
+*** trace
+
+     If set to true, the generated parser code will log rule entry and exit points
+     to stdout as an aid to debugging.
+     
+     default-value="false"
+     
+*** messageFormat
+
+     If this parameter is set, it indicates that any warning or error messages returned
+     by ANLTR, shoould be formatted in the specified way. Currently, ANTLR supports the
+     built-in formats of antlr, gnu and vs2005.
+
+     default-value="antlr"
+     
+*** verbose
+
+     If this parameter is set to true, then ANTLR will report all sorts of things
+     about what it is doing such as the names of files and the version of ANTLR and so on.
+     
+     default-value="true"
+     
+*** conversionTimeout
+
+     The number of milliseconds ANTLR will wait for analysis of each
+     alternative in the grammar to complete before giving up. You may raise
+     this value if ANTLR gives up on a complicated alt and tells you that
+     there are lots of ambiguties, but you know that it just needed to spend
+     more time on it. Note that this is an absolute time and not CPU time.
+     
+     default-value="10000"
+     
+*** includes
+
+     Provides an explicit list of all the grammars that should
+     be included in the generate phase of the plugin. Note that the plugin
+     is smart enough to realize that imported grammars should be included but
+     not acted upon directly by the ANTLR Tool.
+     
+     Unless otherwise specified, the include list scans for and includes all
+     files that end in ".g" in any directory beneath src/main/antlr3. Note that
+     this version of the plugin looks for the directory antlr3 and not the directory
+     antlr, so as to avoid clashes and confusion for projects that use both v2 and v3 grammars
+     such as ANTLR itself.
+     
+*** excludes
+
+     Provides an explicit list of any grammars that should be excluded from
+     the generate phase of the plugin. Files listed here will not be sent for
+     processing by the ANTLR tool.
+     
+*** sourceDirectory
+
+     Specifies the Antlr directory containing grammar files. For
+     antlr version 3.x we default this to a directory in the tree
+     called antlr3 because the antlr directory is occupied by version
+     2.x grammars.
+
+     <<NB>> Take careful note that the default location for antlr grammars
+     is now <<antlr3>> and NOT <<antlr>>
+
+     default-value="<<<${basedir}/src/main/antlr3>>>"
+     
+*** outputDirectory
+
+     Location for generated Java files. For antlr version 3.x we default
+     this to a directory in the tree called antlr3 because the antlr
+     directory is occupied by version 2.x grammars.
+     
+     default-value="<<<${project.build.directory}/generated-sources/antlr3>>>"
+     
+*** libDirectory
+
+     Location for imported token files, e.g. <code>.tokens</code> and imported grammars.
+     Note that ANTLR will not try to process grammars that it finds in this directory, but
+     will include this directory in the search for .tokens files and import grammars.
+
+     <<NB>> If you change the lib directory from the default but the directory is
+     still under<<<${basedir}/src/main/antlr3>>>, then you will need to exclude
+     the grammars from processing specifically, using the <<<<excludes>>>> option.
+
+     default-value="<<<${basedir}/src/main/antlr3/imports>>>"
+
diff --git a/antlr3-maven-plugin/src/site/site.xml b/antlr3-maven-plugin/src/site/site.xml
new file mode 100644
index 0000000..7d0c52b
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/site.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<project name="ANTLR v3 Maven plugin">
+
+  <publishDate position="left"/>
+  <version position="left"/>
+
+  <poweredBy>
+    <logo name="ANTLR Web Site" href="http://antlr.org/"
+          img="http://www.antlr.org/wiki/download/attachments/292/ANTLR3"/>
+  </poweredBy>
+
+  <body>
+    <links>
+      <item name="Antlr Web Site" href="http://www.antlr.org/"/>
+    </links>
+
+    <menu name="Overview">
+      <item name="Introduction" href="index.html"/>
+      <item name="Usage" href="usage.html"/>
+    </menu>
+
+    <menu name="Examples">
+      <item name="Simple configurations" href="examples/simple.html"/>
+      <item name="Using library directories" href="examples/libraries.html"/>
+      <item name="Using imported grammars" href="examples/import.html"/>
+    </menu>
+
+    <menu ref="reports" />
+    <menu ref="modules" />
+
+  </body>
+</project>
diff --git a/antlrjar.xml b/antlrjar.xml
new file mode 100644
index 0000000..b9c3213
--- /dev/null
+++ b/antlrjar.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    This file defines what goes in to ANTLR Uber jar, which includes
+    all of the classes we need to run an executable jar in standalone
+    mode, other than junit, which has a non Free BSD license and so
+    we are reluctanct to include it in the jar.
+  -->
+<assembly>
+
+    <!--
+        This is the suffix that will be used to name the uber jar
+        once it is jared up.
+      -->
+    <id>completejar</id>
+
+    <!--
+        Exclude the antlr-master pom from the jar - we don't need it
+        and it causes silly things to happen.
+      -->
+    <useProjectArtifact>false</useProjectArtifact>
+
+    <!--
+        The only output format we need is the executable jar file
+      -->
+    <formats>
+        <format>jar</format>
+    </formats>
+
+    <!--
+        Make all jars unpack at the same level and don't include
+        any extranous directories.
+      -->
+    <includeBaseDirectory>false</includeBaseDirectory>
+
+    <!--
+        Which of the modules that the master pom builds do we
+        wish to include in the uber jar. We are including
+        dependencies, so we only need to name the Tool module
+        and the gunit module.
+      -->
+    <moduleSets>
+
+        <moduleSet>
+            
+            <includes>
+                
+                <include>org.antlr:antlr</include>
+          
+            </includes>
+            <!--
+                Of the binaries, such as the depencies that the
+                above modules need, which do we want and which do we not.
+                Currently we want all the dependencies in the Tool jar.
+              -->
+            <binaries>
+
+            <!--
+                Unpack the binary dependencies so we have a nice
+                uber jar that can run with java -jar and need not have
+                CLASSPATH configured and so on.
+              -->
+                        <unpack>true</unpack>
+            </binaries>
+
+        </moduleSet>
+        
+        <moduleSet>
+
+            <includes>
+
+                <include>org.antlr:gunit</include>
+
+            </includes>
+
+            <!--
+                We do not want any of the dependencies that gunit has as they
+                are inlcuded with the tool. In particular we want to exclude
+                the junit dependency.
+              -->
+            <binaries>
+                
+               <includeDependencies>false</includeDependencies>
+
+            </binaries>
+
+        </moduleSet>
+
+    </moduleSets>
+
+    <!--
+        What do we want to include in the jar from each project
+      -->
+    <fileSets>
+        <fileSet>
+
+            <!--
+                We need the output classes and resources etc.
+              -->
+            <directory>${project.build.outputDirectory}</directory>
+        </fileSet>
+    </fileSets>
+
+
+    
+</assembly>
diff --git a/antlrsources.xml b/antlrsources.xml
new file mode 100644
index 0000000..8e25c2a
--- /dev/null
+++ b/antlrsources.xml
@@ -0,0 +1,318 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+
+    This is the assembly descriptor for building a full source code
+    distribution of ANTLR and all its related components. This assmebly
+    only includes the Java oriented source code, hence only the Java runtime
+    is included in the resulting jar.
+
+    The resulting jar is distribution that can be expanded with:
+
+      jar xvf antlr-master-3.x.x-src.jar
+
+    The output directory will be antlr-master-3.x.x and in here will be
+    the BUILD.txt fie, which explains how to build ANTLR.
+
+    Jim Idle - May, 2009
+ -->
+<assembly>
+
+    <!-- The name of this assembly descriptor, which is referenced in
+         the master pom.xml using <assemblyRef> (although in fact we
+         reference the file name that contains it to avoid cluttering
+         the pom.
+      -->
+    <id>src</id>
+
+    <!-- We have elected to produce only a jar output and to use the line
+         endings of whatever platform we are running on. More formats
+         can be added for simultaneous production, such as <format>zip</format>
+      -->
+    <formats>
+        <format>jar</format>
+    </formats>
+
+    <!--
+        The resulting archives will hve a base directory named after the master
+        artifact, rather than just extract into the current directory.
+      -->
+    <includeBaseDirectory>true</includeBaseDirectory>
+
+    <!-- We need to described the source code of each of the modules we want
+         including in the archive. In the main this is because we did not store
+         the modules in perforce using directory names that match the module
+         names. This was for historic reasons as we already moved everything
+         about massively, jsut to move to Maven inthe first place.
+      -->
+    <moduleSets>
+
+        <!-- Describe the gUnit source code.
+          -->
+        <moduleSet>
+
+            <!-- The Maven artifact name tells the assembly artifact a bunch
+                 of information to start with, such as its location in the current
+                 tree and so on.
+              -->
+            <includes>
+                <include>org.antlr:gunit</include>
+            </includes>
+
+            <!-- What sources do we wish to include from this module?
+              -->
+            <sources>
+                
+                <fileSets>
+
+                    <!-- We have one file set, being the src sub-directory, which in
+                         the output archive, we also want to be called src/
+                      -->
+                    <fileSet>
+                        <directory>src</directory>
+                        <outputDirectory>src</outputDirectory>
+                    </fileSet>
+                
+
+                    <!-- In the base gUnit directory, we need to include a number
+                         of files that either document the module or control the
+                         build. These are not underneath the src directory of course
+                         so they need to be named here (which nicely documents what
+                         is included.
+                      -->
+                    <fileSet>
+                        <includes>
+                            <include>pom.xml</include>
+                            <include>CHANGES.txt</include>
+                            <include>LICENSE.txt</include>
+                            <include>README.txt</include>
+                            <include>antlr.config</include>
+                        </includes>
+                    </fileSet>
+                </fileSets>
+            </sources>
+
+        </moduleSet>
+
+        <!-- Describe the gUnit source code.
+          -->
+        <moduleSet>
+
+            <!-- The Maven artifact name tells the assembly artifact a bunch
+                 of information to start with, such as its location in the current
+                 tree and so on.
+              -->
+            <includes>
+                <include>org.antlr:antlr-runtime</include>
+            </includes>
+
+            <!-- What sources do we wish to include from this module?
+              -->
+            <sources>
+
+                <!-- Because the Java runtime source is not in a directory
+                     called antlr-runtime, directly underneath the master
+                     directory, we need to map the output directory so that
+                     instead of starting with the name of the artifact, it
+                     is in the place where the build expects it.
+                  -->
+                <outputDirectoryMapping>runtime/Java</outputDirectoryMapping>
+
+                <fileSets>
+                     <!-- We have one file set, being the src sub-directory, which in
+                          the output archive, we also want to be called src/
+                       -->
+                    <fileSet>
+                        <directory>src</directory>
+                        <outputDirectory>src</outputDirectory>
+                    </fileSet>
+
+                    <!-- In the base runtime/Java directory, we need to include a number
+                         of files that either document the module or control the
+                         build. These are not underneath the src directory of course
+                         so they need to be named here (which nicely documents what
+                         is included.
+                      -->
+                    <fileSet>
+                        <includes>
+                            <include>pom.xml</include>
+                            <include>doxyfile</include>
+                            <include>antlr.config</include>
+                        </includes>
+                    </fileSet>
+                </fileSets>
+            </sources>
+
+        </moduleSet>
+
+        <!-- Describe the ANTLR tool source code.
+          -->
+        <moduleSet>
+            <includes>
+                <include>org.antlr:antlr</include>
+            </includes>
+
+            <!-- What sources do we wish to include from this module?
+              -->
+            <sources>
+
+                <!-- Because the tool source code is not in a directory
+                     called antlr, nor directly underneath the master
+                     directory, we need to map the output directory so that
+                     instead of starting with the name of the artifact, it
+                     is in the place where the build expects it.
+                  -->
+                <outputDirectoryMapping>tool</outputDirectoryMapping>
+
+                
+                <fileSets>
+
+                    <!-- We have one file set, being the src sub-directory, which in
+                         the output archive, we also want to be called src/
+                      -->
+                    <fileSet>
+                        <directory>src</directory>
+                        <outputDirectory>src</outputDirectory>
+                    </fileSet>
+
+                    <!-- In the base tool directory, we need to include a number
+                         of files that either document the module or control the
+                         build. These are not underneath the src directory of course
+                         so they need to be named here (which nicely documents what
+                         is included.
+                      -->
+                    <fileSet>
+                        <includes>
+                            <include>pom.xml</include>
+                            <include>CHANGES.txt</include>
+                            <include>LICENSE.txt</include>
+                            <include>README.txt</include>
+                            <include>antlr.config</include>
+                        </includes>
+                    </fileSet>
+
+                </fileSets>
+
+
+            </sources>
+
+        </moduleSet>
+
+        <!-- Describe the ANTLR maven plugin source code.
+
+             Strictly speaking, this is not a sub module of the tool because it
+             relies on the tool, runtime, and gunit modules. However, it is natural
+             part of the Java source set and so I found a way to build it as a sub-module
+             and include it in the source set.
+          -->
+        <moduleSet>
+            <includes>
+                <include>org.antlr:antlr3-maven-plugin</include>
+            </includes>
+
+            <!-- What sources do we wish to include from this module?
+              -->
+            <sources>
+
+                
+                <fileSets>
+                    
+                    <!-- We have one file set, being the src sub-directory, which in
+                         the output archive, we also want to be called src/
+                      -->
+                    <fileSet>
+                        <directory>src</directory>
+                        <outputDirectory>src</outputDirectory>
+                    </fileSet>
+
+                    <!-- In the base plugin directory, we need to include a number
+                     of files that either document the module or control the
+                     build. These are not underneath the src directory of course
+                     so they need to be named here (which nicely documents what
+                     is included.
+                      -->
+                    <fileSet>
+                        <includes>
+                            <include>pom.xml</include>
+                        </includes>
+                    </fileSet>
+                </fileSets>
+
+                
+            </sources>
+
+        </moduleSet>
+
+         <!-- Describe the gUnit maven plugin source code.
+
+             Strictly speaking, this is not a sub module of the tool because it
+             relies on the tool, runtime, and gunit modules. However, it is natural
+             part of the Java source set and so I found a way to build it as a sub-module
+             and include it in the source set.
+          -->
+        <moduleSet>
+            <includes>
+                <include>org.antlr:maven-gunit-plugin</include>
+            </includes>
+
+            <!-- What sources do we wish to include from this module?
+              -->
+            <sources>
+
+                <!-- Because the tool source code is not in a directory
+                     called antlr, nor directly underneath the master
+                     directory, we need to map the output directory so that
+                     instead of starting with the name of the artifact, it
+                     is in the place where the build expects it.
+                  -->
+                <outputDirectoryMapping>gunit-maven-plugin</outputDirectoryMapping>
+
+                <!-- We have one file set, being the src sub-directory, which in
+                     the output archive, we also want to be called src/
+                  -->
+                <fileSets>
+
+                    <fileSet>
+                        <directory>src</directory>
+                        <outputDirectory>src</outputDirectory>
+                    </fileSet>
+
+                    <!-- In the base plugin directory, we need to include a number
+                         of files that either document the module or control the
+                         build. These are not underneath the src directory of course
+                         so they need to be named here (which nicely documents what
+                         is included.
+                      -->
+                    <fileSet>
+                     
+                        <includes>
+                            <include>pom.xml</include>
+                        </includes>
+                    </fileSet>
+
+                    </fileSets>
+                
+            </sources>
+
+        </moduleSet>
+
+    </moduleSets>
+
+    <!-- In the base directory of the master build directory (the root of all
+         the other sources), there are a number of files that describe or control
+         the build (such as the master pom.xml and the BUILD.txt files). Hence
+         we need to describe them in their own fileset. No output mapping is required here
+         of course.
+      -->
+    <fileSets>
+        <fileSet>
+            <includes>
+                <include>pom.xml</include>
+                <include>antlrjar.xml</include>
+                <include>antlrsources.xml</include>
+                <include>BUILD.txt</include>
+            </includes>
+        </fileSet>
+    </fileSets>
+
+</assembly>
diff --git a/build.properties b/build.properties
deleted file mode 100644
index 6db78e1..0000000
--- a/build.properties
+++ /dev/null
@@ -1,8 +0,0 @@
-compile.debug=true
-compile.debuglevel=lines
-compile.deprecation=false
-compile.optimize=false
-
-version=3.0ea9
-
-stringtemplate.jar=/usr/local/lib/stringtemplate-2.3b6.jar
diff --git a/build.xml b/build.xml
deleted file mode 100644
index 19b5001..0000000
--- a/build.xml
+++ /dev/null
@@ -1,227 +0,0 @@
-<!-- Contributed by Oliver Zeigermann
-     Modified by Jean Bovet
-     Modified by Matt Benson
-     Modified by Miguel Ping
-     Library dependency: install library http://jakarta.apache.org/bcel/ in $ANT_HOME/lib, or other supported 3rd-party lib option
--->
-
-<project name="antlr3" default="build">
-    <property file="build.properties" />
-
-    <property name="build.dir" location="build" />
-    <property name="lib.dir" location="lib" />
-
-    <property name="build.classes" location="${build.dir}/classes" />
-    <property name="build.rtclasses" location="${build.dir}/rtclasses" />
-    <property name="build.tests" location="${build.dir}/tests" />
-    <property name="build.tests.xml" location="${build.tests}/xml" />
-    <property name="build.tests.reports" location="${build.tests}/reports" />
-    <property name="temp.dir" location="${java.io.tmpdir}/antlr3" />
-    <property name="includetests" value="org/antlr/test/Test*.java" />
-
-    <property name="src.dir" location="src" />
-    <property name="src.rt" location="runtime/Java/src" />
-    <property name="codegen.dir" location="codegen" />
-
-    <property name="tool.class"
-              location="${build.classes}/org/antlr/Tool.class" />
-
-    <property name="compile.debug" value="true" />
-    <property name="compile.debuglevel" value="lines,vars,source" />
-    <property name="compile.deprecation" value="false" />
-    <property name="compile.optimize" value="false" />
-
-    <path id="src.path">
-        <pathelement location="${src.dir}" />
-        <pathelement location="${codegen.dir}" />
-    </path>
-
-    <path id="rt.classpath">
-        <fileset dir="${lib.dir}" includes="**/*.jar" />
-    </path>
-
-    <path id="classpath">
-        <path refid="rt.classpath" />
-        <pathelement location="${build.rtclasses}" />
-    </path>
-
-    <condition property="bcel.available">
-        <available classname="org.apache.bcel.Constants" />
-    </condition>
-
-    <macrodef name="generate">
-      <attribute name="grammar" />
-      <sequential>
-          <antlr target="${codegen.dir}/@{grammar}" />
-      </sequential>
-    </macrodef>
-
-    <presetdef name="myjavac">
-        <javac debug="${compile.debug}"
-               debuglevel="${compile.debuglevel}"
-               deprecation="${compile.deprecation}"
-               optimize="${compile.optimize}"
-               source="1.5" target="jsr14" />
-    </presetdef>
-
-    <target name="clean" description="Deletes all generated files">
-        <delete dir="${build.dir}" />
-        <delete dir="${codegen.dir}" />
-    </target>
-
-    <target name="generator-prepare">
-        <mkdir dir="${codegen.dir}" />
-        <copy todir="${codegen.dir}" preservelastmodified="true">
-            <fileset dir="${src.dir}">
-                <include name="org/antlr/tool/antlr.g" />
-                <include name="org/antlr/tool/antlr.print.g" />
-                <include name="org/antlr/tool/assign.types.g" />
-                <include name="org/antlr/tool/buildnfa.g" />
-                <include name="org/antlr/tool/define.g" />
-                <include name="org/antlr/codegen/codegen.g" />
-            </fileset>
-            <flattenmapper />
-        </copy>
-
-        <!--
-            make sure we rebuild anybody who uses ANTLRTokenTypes
-            by deleting all target files younger than antlr.g:
-          -->
-        <delete>
-            <fileset dir="${codegen.dir}" excludes="*.g">
-                <not>
-                    <depend targetdir="${codegen.dir}">
-                        <mapper type="merge" to="antlr.g" />
-                    </depend>
-                </not>
-            </fileset>
-        </delete>
-    </target>
-
-    <target name="generator" depends="generator-prepare">
-        <generate grammar="antlr.g" />
-        <generate grammar="antlr.print.g" />
-        <generate grammar="assign.types.g" />
-        <generate grammar="buildnfa.g" />
-        <generate grammar="define.g" />
-        <generate grammar="codegen.g" />
-    </target>
-
-    <target name="compile-rt">
-        <mkdir dir="${build.rtclasses}" />
-        <myjavac srcdir="${src.rt}" destdir="${build.rtclasses}"
-                 classpathref="rt.classpath" />
-    </target>
-
-    <target name="compile" depends="generator,compile-rt">
-        <mkdir dir="${build.classes}" />
-        <myjavac destdir="${build.classes}" classpathref="classpath">
-            <src refid="src.path" />
-        </myjavac>
-    </target>
-
-    <target name="templates">
-        <copy todir="${build.classes}">
-            <fileset dir="${src.dir}" includes="**/*.stg,**/*.st,**/*.sti" />
-        </copy>
-    </target>
-
-    <target name="jarnames" depends="version">
-        <property name="antlr3.jar"
-                  location="${build.dir}/antlr${jar.version}.jar" />
-        <property name="antlr3.rt.jar"
-                  location="${build.dir}/antlr${jar.version}-runtime.jar" />
-    </target>
-
-    <target name="build-rt" depends="compile-rt,jarnames"
-            description="Creates the ANTLR3 runtime jar">
-        <jar jarfile="${antlr3.rt.jar}" index="true" filesonly="true">
-            <fileset file="LICENSE.txt" />
-            <fileset dir="${build.rtclasses}" />
-            <manifest>
-                <attribute name="Version" value="${version}" />
-            </manifest>
-        </jar>
-    </target>
-
-    <target name="build" depends="compile,templates,jarnames"
-            description="Creates the ANTLR3 fullversion jar">
-        <jar jarfile="${antlr3.jar}" index="true" filesonly="true">
-            <fileset file="LICENSE.txt" />
-            <fileset dir="${build.classes}" excludes="org/antlr/test/**" />
-            <fileset dir="${build.rtclasses}" />
-            <manifest>
-                <attribute name="Version" value="${version}" />
-            </manifest>
-        </jar>
-    </target>
-
-    <target name="build-all" depends="build,build-rt"
-            description="Creates the ANTLR3 fullversion and runtime jars" />
-
-    <target name="version" depends="version-bcel" unless="bcel.available">
-        <echo>Install bcel in the classpath to have automatic version in jar name</echo>
-        <property name="version" value="unknown 3.x build" />
-        <property name="jar.version" value="" />
-    </target>
-
-    <target name="version-bcel" if="bcel.available">
-
-        <condition property="enc" value="ISO-8859-1" else="${file.encoding}">
-            <!-- ironically, the AntVersion condition would tell us what
-                 we need to know, so we use its absence to indicate
-                 an Ant version prior to 1.7.0: -->
-            <available classname="org.apache.tools.ant.taskdefs.condition.AntVersion" />
-        </condition>
-
-        <loadproperties srcfile="${tool.class}" encoding="${enc}">
-            <filterchain>
-                <classconstants/>
-                <prefixlines prefix="Tool." />
-            </filterchain>
-        </loadproperties>
-
-        <property name="version" value="${Tool.VERSION}" />
-        <property name="jar.version" value="-${version}" />
-    </target>
-
-    <target name="rebuild" depends="clean,build" />
-    <target name="rebuild-rt" depends="clean,build-rt" />
-    <target name="rebuild-all" depends="clean,build-all" />
-
-    <target name="run-tests" depends="compile">
-        <mkdir dir="${build.tests.xml}" />
-        <mkdir dir="${temp.dir}" />
-        <delete>
-            <fileset dir="${build.tests.xml}" />
-        </delete>
-        <junit printsummary="withOutAndErr" showoutput="true"
-               fork="true" forkmode="once" failureproperty="testfailure"
-               tempdir="${temp.dir}">
-            <formatter type="xml" />
-            <formatter type="plain" />
-            <classpath>
-                <pathelement path="${build.classes}" />
-                <path refid="classpath" />
-                <pathelement path="${antlr2.jar}" />
-            </classpath>
-            <test if="testcase" name="${testcase}" todir="${build.tests.xml}" />
-            <batchtest todir="${build.tests.xml}" unless="testcase">
-                <fileset dir="${src.dir}" includes="${includetests}" />
-            </batchtest>
-        </junit>
-    </target>
-
-    <target name="run-reports">
-        <mkdir dir="${build.tests.reports}" />
-        <junitreport todir="${build.tests.reports}">
-            <fileset dir="${build.tests.xml}" includes="TEST-*.xml" />
-            <report format="frames" todir="${build.tests.reports}" />
-        </junitreport>
-    </target>
-
-    <target name="test" description="Run tests" depends="run-tests,run-reports">
-        <fail if="testfailure">Tests failed</fail>
-    </target>
-
-</project>
diff --git a/gunit-maven-plugin/pom.xml b/gunit-maven-plugin/pom.xml
new file mode 100644
index 0000000..6295e13
--- /dev/null
+++ b/gunit-maven-plugin/pom.xml
@@ -0,0 +1,242 @@
+<!--
+
+ [The "BSD licence"]
+
+ ANTLR        - Copyright (c) 2005-2008 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <prerequisites>
+        <maven>2.0</maven>
+    </prerequisites>
+
+    <groupId>org.antlr</groupId>
+    <artifactId>maven-gunit-plugin</artifactId>
+    <packaging>maven-plugin</packaging>
+    <version>3.2</version>
+
+    <name>Maven plugin for gUnit ANTLR V3 </name>
+	<description>A Maven plugin for incorporating gUnit testing of grammars</description>
+    <url>http://antlr.org</url>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <!-- Where do we track bugs for this project?
+      -->
+    <issueManagement>
+        <system>JIRA</system>
+        <url>http://antlr.org/jira/browse/ANTLR</url>
+    </issueManagement>
+
+    <!-- Location of the license description for this project
+      -->
+    <licenses>
+        <license>
+            <distribution>repo</distribution>
+            <name>The BSD License</name>
+            <url>http://www.antlr.org/LICENSE.txt </url>
+        </license>
+    </licenses>
+
+    <distributionManagement>
+    	<!-- The main release repo for Antlr artifacts -->
+        <repository>
+            <id>antlr-repo</id>
+            <name>ANTLR Testing repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-repo</url>
+        </repository>
+		<!-- The snapshot repo for Antlr artifacts -->
+        <snapshotRepository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-snapshot</url>
+        </snapshotRepository>
+		<!-- The place where site deployment should go -->
+        <site>
+            <id>antlr-repo</id>
+            <name>ANTLR gUnit Maven Plugin Web Site</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-maven-webs/maven-gunit-plugin</url>
+        </site>
+    </distributionManagement>
+
+    <!--
+
+    Inform Maven of the ANTLR snapshot repository, which it will
+    need to consult to get the latest snapshot build of the runtime and tool
+    if it was not built and installed locally.
+    -->
+    <repositories>
+
+      <!--
+        This is the ANTLR repository.
+        -->
+        <repository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>http://antlr.org/antlr-snapshot</url>
+            <snapshots>
+                <enabled>true</enabled>
+                <updatePolicy>always</updatePolicy>
+            </snapshots>
+        </repository>
+
+    </repositories>
+    
+    <!-- Ancilliary information for completeness
+      -->
+    <inceptionYear>2009</inceptionYear>
+
+    <mailingLists>
+        <mailingList>
+            <archive>http://antlr.markmail.org/</archive>
+            <otherArchives>
+                <otherArchive>http://www.antlr.org/pipermail/antlr-interest/</otherArchive>
+            </otherArchives>
+            <name>ANTLR Users</name>
+            <subscribe>http://www.antlr.org/mailman/listinfo/antlr-interest/</subscribe>
+            <unsubscribe>http://www.antlr.org/mailman/options/antlr-interest/</unsubscribe>
+            <post>antlr-interest at antlr.org</post>
+        </mailingList>
+    </mailingLists>
+
+    <organization>
+        <name>ANTLR.org</name>
+        <url>http://www.antlr.org</url>
+    </organization>
+    <!-- ============================================================================= -->
+
+    <!--
+
+     What are we depedent on for the Mojos to execute? We need the
+     plugin API itself and of course we need the ANTLR Tool and runtime
+     and any of their dependencies, which we inherit. The Tool itself provides
+     us with all the dependencies, so we need only name it here.
+      -->
+    <dependencies>
+
+        <!--
+          The things we need to build the target language recognizer
+          -->
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-plugin-api</artifactId>
+            <version>2.0</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-project</artifactId>
+            <version>2.0</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.codehaus.plexus</groupId>
+            <artifactId>plexus-compiler-api</artifactId>
+            <version>1.5.3</version>
+        </dependency>
+
+        <!--
+         The version of ANTLR tool that this version of the plugin controls.
+         We have decided that this should be in lockstep with ANTLR itself, other
+         than -1 -2 -3 etc patch releases.
+          -->
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>3.2</version>
+        </dependency>
+
+        <!--
+         Dependency on the gUnit artifact.
+        -->
+        <dependency>
+            <groupId>${groupId}</groupId>
+            <artifactId>gunit</artifactId>
+            <version>3.2</version>
+        </dependency>
+
+        <!--
+          Testing requirements...
+          -->
+        <dependency>
+
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.5</version>
+            <scope>test</scope>
+
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven.shared</groupId>
+            <artifactId>maven-plugin-testing-harness</artifactId>
+            <version>1.0</version>
+            <scope>test</scope>
+        </dependency>
+        
+    </dependencies>
+    
+    <build>
+
+        <defaultGoal>install</defaultGoal>
+        <extensions>
+            <extension>
+                <groupId>org.apache.maven.wagon</groupId>
+                <artifactId>wagon-ssh-external</artifactId>
+                <version>1.0-beta-2</version>
+            </extension>
+        </extensions>
+        <plugins>
+            <plugin>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>2.0.2</version>
+                <configuration>
+                    <source>1.5</source>
+                    <target>1.5</target>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-project-info-reports-plugin</artifactId>
+                <configuration>
+                    <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
+                </configuration>
+            </plugin>
+        </plugins>
+
+    </build>
+
+</project>
diff --git a/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java b/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java
new file mode 100644
index 0000000..db3f569
--- /dev/null
+++ b/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java
@@ -0,0 +1,410 @@
+package org.antlr.mojo.antlr3;
+
+import java.util.List;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.io.File;
+import java.io.IOException;
+import java.io.Writer;
+import java.io.FileWriter;
+import java.io.BufferedWriter;
+import java.net.URL;
+import java.net.MalformedURLException;
+import java.net.URLClassLoader;
+
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugin.MojoFailureException;
+import org.apache.maven.project.MavenProject;
+import org.apache.maven.artifact.Artifact;
+import org.apache.maven.artifact.DependencyResolutionRequiredException;
+import org.apache.maven.artifact.versioning.ArtifactVersion;
+import org.apache.maven.artifact.versioning.DefaultArtifactVersion;
+import org.apache.maven.artifact.versioning.OverConstrainedVersionException;
+import org.codehaus.plexus.util.StringUtils;
+import org.codehaus.plexus.util.FileUtils;
+import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
+import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
+import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.InclusionScanException;
+import org.antlr.runtime.ANTLRFileStream;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.gunit.GrammarInfo;
+import org.antlr.gunit.gUnitExecutor;
+import org.antlr.gunit.AbstractTest;
+import org.antlr.gunit.Interp;
+
+/**
+ * Takes gUnit scripts and directly performs testing.
+ *
+ * @goal gunit
+ *
+ * @phase test
+ * @requiresDependencyResolution test
+ * @requiresProject true
+ *
+ * @author Steve Ebersole
+ */
+public class GUnitExecuteMojo extends AbstractMojo {
+	public static final String ANTLR_GROUP_ID = "org.antlr";
+	public static final String ANTLR_ARTIFACT_NAME = "antlr";
+	public static final String ANTLR_RUNTIME_ARTIFACT_NAME = "antlr-runtime";
+
+	/**
+     * INTERNAL : The Maven Project to which we are attached
+     *
+     * @parameter expression="${project}"
+     * @required
+     */
+    private MavenProject project;
+
+	/**
+	 * INTERNAL : The artifacts associated to the dependencies defined as part
+	 * of our configuration within the project to which we are being attached.
+	 *
+	 * @parameter expression="${plugin.artifacts}"
+     * @required
+     * @readonly
+	 */
+	private List<Artifact> pluginArtifacts;
+
+	/**
+     * Specifies the directory containing the gUnit testing files.
+     *
+     * @parameter expression="${basedir}/src/test/gunit"
+     * @required
+     */
+    private File sourceDirectory;
+
+    /**
+     * A set of patterns for matching files from the sourceDirectory that
+     * should be included as gUnit source files.
+     *
+     * @parameter
+     */
+    private Set includes;
+
+    /**
+     * A set of exclude patterns.
+     *
+     * @parameter
+     */
+    private Set excludes;
+
+	/**
+     * Specifies directory to which gUnit reports should get written.
+     *
+     * @parameter expression="${basedir}/target/gunit-report"
+     * @required
+     */
+    private File reportDirectory;
+
+	/**
+	 * Should gUnit functionality be completely by-passed?
+	 * <p/>
+	 * By default we skip gUnit tests if the user requested that all testing be skipped using 'maven.test.skip'
+	 *
+	 * @parameter expression="${maven.test.skip}"
+	 */
+	private boolean skip;
+
+	public Set getIncludePatterns() {
+		return includes == null || includes.isEmpty()
+				? Collections.singleton( "**/*.testsuite" )
+				: includes;
+	}
+
+	public Set getExcludePatterns() {
+		return excludes == null
+				? Collections.emptySet()
+				: excludes;
+	}
+
+
+	public final void execute() throws MojoExecutionException, MojoFailureException {
+		if ( skip ) {
+			getLog().info( "Skipping gUnit processing" );
+			return;
+		}
+		Artifact pluginAntlrArtifact = determinePluginAntlrArtifact();
+
+		validateProjectsAntlrVersion( determineArtifactVersion( pluginAntlrArtifact ) );
+
+		performExecution( determineProjectCompileScopeClassLoader( pluginAntlrArtifact ) );
+	}
+
+	private Artifact determinePluginAntlrArtifact() throws MojoExecutionException {
+		for ( Artifact artifact : pluginArtifacts ) {
+			boolean match = ANTLR_GROUP_ID.equals( artifact.getGroupId() )
+					&& ANTLR_ARTIFACT_NAME.equals( artifact.getArtifactId() );
+			if ( match ) {
+				return artifact;
+			}
+		}
+		throw new MojoExecutionException(
+				"Unexpected state : could not locate " + ANTLR_GROUP_ID + ':' + ANTLR_ARTIFACT_NAME +
+						" in plugin dependencies"
+		);
+	}
+
+	private ArtifactVersion determineArtifactVersion(Artifact artifact) throws MojoExecutionException {
+		try {
+			return artifact.getVersion() != null
+					? new DefaultArtifactVersion( artifact.getVersion() )
+					: artifact.getSelectedVersion();
+		}
+		catch ( OverConstrainedVersionException e ) {
+			throw new MojoExecutionException( "artifact [" + artifact.getId() + "] defined an overly constrained version range" );
+		}
+	}
+
+	private void validateProjectsAntlrVersion(ArtifactVersion pluginAntlrVersion) throws MojoExecutionException {
+		Artifact antlrArtifact = null;
+		Artifact antlrRuntimeArtifact = null;
+
+		if ( project.getCompileArtifacts() != null ) {
+			for ( Object o : project.getCompileArtifacts() ) {
+				final Artifact artifact = ( Artifact ) o;
+				if ( ANTLR_GROUP_ID.equals( artifact.getGroupId() ) ) {
+					if ( ANTLR_ARTIFACT_NAME.equals( artifact.getArtifactId() ) ) {
+						antlrArtifact = artifact;
+						break;
+					}
+					if ( ANTLR_RUNTIME_ARTIFACT_NAME.equals( artifact.getArtifactId() ) ) {
+						antlrRuntimeArtifact = artifact;
+					}
+				}
+			}
+		}
+
+		validateBuildTimeArtifact( antlrArtifact, pluginAntlrVersion );
+		validateRunTimeArtifact( antlrRuntimeArtifact, pluginAntlrVersion );
+	}
+
+	@SuppressWarnings(value = "unchecked")
+	protected void validateBuildTimeArtifact(Artifact antlrArtifact, ArtifactVersion pluginAntlrVersion)
+			throws MojoExecutionException {
+		if ( antlrArtifact == null ) {
+			validateMissingBuildtimeArtifact();
+			return;
+		}
+
+		// otherwise, lets make sure they match...
+		ArtifactVersion projectAntlrVersion = determineArtifactVersion( antlrArtifact );
+		if ( pluginAntlrVersion.compareTo( projectAntlrVersion ) != 0 ) {
+			getLog().warn(
+					"Encountered " + ANTLR_GROUP_ID + ':' + ANTLR_ARTIFACT_NAME + ':' + projectAntlrVersion.toString() +
+							" which did not match Antlr version used by plugin [" + pluginAntlrVersion.toString() + "]"
+			);
+		}
+	}
+
+	protected void validateMissingBuildtimeArtifact() {
+		// generally speaking, its ok for the project to not define a dep on the build-time artifact...
+	}
+
+	@SuppressWarnings(value = "unchecked")
+	protected void validateRunTimeArtifact(Artifact antlrRuntimeArtifact, ArtifactVersion pluginAntlrVersion)
+			throws MojoExecutionException {
+		if ( antlrRuntimeArtifact == null ) {
+			// its possible, if the project instead depends on the build-time (or full) artifact.
+			return;
+		}
+
+		ArtifactVersion projectAntlrVersion = determineArtifactVersion( antlrRuntimeArtifact );
+		if ( pluginAntlrVersion.compareTo( projectAntlrVersion ) != 0 ) {
+			getLog().warn(
+					"Encountered " + ANTLR_GROUP_ID + ':' + ANTLR_RUNTIME_ARTIFACT_NAME + ':' + projectAntlrVersion.toString() +
+							" which did not match Antlr version used by plugin [" + pluginAntlrVersion.toString() + "]"
+			);
+		}
+	}
+
+	/**
+	 * Builds the classloader to pass to gUnit.
+	 *
+	 * @param antlrArtifact The plugin's (our) Antlr dependency artifact.
+	 *
+	 * @return The classloader for gUnit to use
+	 *
+	 * @throws MojoExecutionException Problem resolving artifacts to {@link java.net.URL urls}.
+	 */
+	private ClassLoader determineProjectCompileScopeClassLoader(Artifact antlrArtifact)
+			throws MojoExecutionException {
+		ArrayList<URL> classPathUrls = new ArrayList<URL>();
+		getLog().info( "Adding Antlr artifact : " + antlrArtifact.getId() );
+		classPathUrls.add( resolveLocalURL( antlrArtifact ) );
+
+		for ( String path : classpathElements() ) {
+			try {
+				getLog().info( "Adding project compile classpath element : " + path );
+				classPathUrls.add( new File( path ).toURI().toURL() );
+			}
+			catch ( MalformedURLException e ) {
+				throw new MojoExecutionException( "Unable to build path URL [" + path + "]" );
+			}
+		}
+
+		return new URLClassLoader( classPathUrls.toArray( new URL[classPathUrls.size()] ), getClass().getClassLoader() );
+	}
+
+	protected static URL resolveLocalURL(Artifact artifact) throws MojoExecutionException {
+		try {
+			return artifact.getFile().toURI().toURL();
+		}
+		catch ( MalformedURLException e ) {
+			throw new MojoExecutionException( "Unable to resolve artifact url : " + artifact.getId(), e );
+		}
+	}
+
+	@SuppressWarnings( "unchecked" )
+	private List<String> classpathElements() throws MojoExecutionException {
+		try {
+			// todo : should we combine both compile and test scoped elements?
+			return ( List<String> ) project.getTestClasspathElements();
+		}
+		catch ( DependencyResolutionRequiredException e ) {
+			throw new MojoExecutionException( "Call to Project#getCompileClasspathElements required dependency resolution" );
+		}
+	}
+
+	private void performExecution(ClassLoader projectCompileScopeClassLoader) throws MojoExecutionException {
+		getLog().info( "gUnit report directory : " + reportDirectory.getAbsolutePath() );
+		if ( !reportDirectory.exists() ) {
+			boolean directoryCreated = reportDirectory.mkdirs();
+			if ( !directoryCreated ) {
+				getLog().warn( "mkdirs() reported problem creating report directory" );
+			}
+		}
+
+		Result runningResults = new Result();
+		ArrayList<String> failureNames = new ArrayList<String>();
+
+		System.out.println();
+		System.out.println( "-----------------------------------------------------------" );
+		System.out.println( " G U N I T   R E S U L T S" );
+		System.out.println( "-----------------------------------------------------------" );
+
+		for ( File script : collectIncludedSourceGrammars() ) {
+			final String scriptPath = script.getAbsolutePath();
+			System.out.println( "Executing script " + scriptPath );
+			try {
+				String scriptBaseName = StringUtils.chompLast( FileUtils.basename( script.getName() ), "." );
+
+				ANTLRFileStream antlrStream = new ANTLRFileStream( scriptPath );
+				GrammarInfo grammarInfo = Interp.parse( antlrStream );
+				gUnitExecutor executor = new gUnitExecutor(
+						grammarInfo,
+						projectCompileScopeClassLoader,
+						script.getParentFile().getAbsolutePath()
+				);
+
+				String report = executor.execTest();
+				writeReportFile( new File( reportDirectory, scriptBaseName + ".txt" ), report );
+
+				Result testResult = new Result();
+				testResult.tests = executor.numOfTest;
+				testResult.failures = executor.numOfFailure;
+				testResult.invalids = executor.numOfInvalidInput;
+
+				System.out.println( testResult.render() );
+
+				runningResults.add( testResult );
+				for ( AbstractTest test : executor.failures ) {
+					failureNames.add( scriptBaseName + "#" + test.getHeader() );
+				}
+			}
+			catch ( IOException e ) {
+				throw new MojoExecutionException( "Could not open specified script file", e );
+			}
+			catch ( RecognitionException e ) {
+				throw new MojoExecutionException( "Could not parse gUnit script", e );
+			}
+		}
+
+		System.out.println();
+		System.out.println( "Summary :" );
+		if ( ! failureNames.isEmpty() ) {
+			System.out.println( "  Found " + failureNames.size() + " failures" );
+			for ( String name : failureNames ) {
+				System.out.println( "    - " + name );
+			}
+		}
+		System.out.println( runningResults.render() );
+		System.out.println();
+
+		if ( runningResults.failures > 0 ) {
+			throw new MojoExecutionException( "Found gUnit test failures" );
+		}
+
+		if ( runningResults.invalids > 0 ) {
+			throw new MojoExecutionException( "Found invalid gUnit tests" );
+		}
+	}
+
+	private Set<File> collectIncludedSourceGrammars() throws MojoExecutionException {
+		SourceMapping mapping = new SuffixMapping( "g", Collections.EMPTY_SET );
+        SourceInclusionScanner scan = new SimpleSourceInclusionScanner( getIncludePatterns(), getExcludePatterns() );
+        scan.addSourceMapping( mapping );
+		try {
+			Set scanResults = scan.getIncludedSources( sourceDirectory, null );
+			Set<File> results = new HashSet<File>();
+			for ( Object result : scanResults ) {
+				if ( result instanceof File ) {
+					results.add( ( File ) result );
+				}
+				else if ( result instanceof String ) {
+					results.add( new File( ( String ) result ) );
+				}
+				else {
+					throw new MojoExecutionException( "Unexpected result type from scanning [" + result.getClass().getName() + "]" );
+				}
+			}
+			return results;
+		}
+		catch ( InclusionScanException e ) {
+			throw new MojoExecutionException( "Error determining gUnit sources", e );
+		}
+	}
+
+	private void writeReportFile(File reportFile, String results) {
+		try {
+			Writer writer = new FileWriter( reportFile );
+			writer = new BufferedWriter( writer );
+			try {
+				writer.write( results );
+				writer.flush();
+			}
+			finally {
+				try {
+					writer.close();
+				}
+				catch ( IOException ignore ) {
+				}
+			}
+		}
+		catch ( IOException e ) {
+			getLog().warn(  "Error writing gUnit report file", e );
+		}
+	}
+
+	private static class Result {
+		private int tests = 0;
+		private int failures = 0;
+		private int invalids = 0;
+
+		public String render() {
+			return String.format( "Tests run: %d,  Failures: %d,  Invalid: %d", tests, failures, invalids );
+		}
+
+		public void add(Result result) {
+			this.tests += result.tests;
+			this.failures += result.failures;
+			this.invalids += result.invalids;
+		}
+	}
+
+}
diff --git a/gunit/CHANGES.txt b/gunit/CHANGES.txt
new file mode 100644
index 0000000..14090c5
--- /dev/null
+++ b/gunit/CHANGES.txt
@@ -0,0 +1,87 @@
+gUnit 1.0.5
+Nov 25, 2008
+
+Leon, Jen-Yuan Su
+leonsu at mac com
+
+CHANGES
+
+March 21, 2009
+
+* gUnitTestSuite.java: use lexer rule name if parser rule name is null (by Shaoting)
+
+* add gunit/swingui package for gUnitEditor GUI
+
+Feb 17, 2009
+
+* added new interfaces for GUI editor
+
+* recognizes invalid input as a FAIL case instead of throwing an exception
+
+Steve Ebersole provided a patch for the following two fixes.
+* allows setting an output directory (for JUnitCodeGen)
+
+* allows providing a classloader (for both JUnitCodeGen and gUnitExecutor) 
+
+Nov 25, 2008
+
+* fixed external test file path issue. if an input test file is not found under the current dir, then try to look for it under the package dir also.
+
+* fixed multiple-line input indentation issue.
+
+* fixed bug: FileNotFoundException terminated gUnit tests due to any non-existent input test file.
+
+* display escaped text for newline characters in the test result for comparing expected and actual string.
+
+Nov 20, 2008
+
+* added new functionality of testing lexical rules
+
+* fixed bug of using PipedInput/Output Stream and changed to ByteArrayOutputStream. Jared Bunting provided a patch on this issue.
+
+* improved jUnit translation mode and moved supporting codes into gUnitBaseTest.
+
+Oct 31, 2008
+
+* fixed bug of testing a tree grammar's template output
+
+July 9, 2008
+
+* fixed bug: program exited upon InvocationTargetException 
+  Sumanto Biswas pointed out the issue and provided suggestions.
+
+* Better handle on test rule's StringTemplate output
+
+May 10, 2008
+
+* added exit code functionality
+
+* fixed string escaping bug for junit generator
+
+1.0.2 - Apr 01, 2008
+
+* fixed grammar bug: multiple-line input, AST output
+
+* adjusted the output of test result
+
+Mar 20, 2008
+
+* moved test result to string template (gUnitTestResult.stg)
+
+* added the display of line of test in the test result
+
+Feb 19, 2008
+
+* fixed bug of displaying test sequence and error message from ANTLR
+
+Feb 8, 2008
+
+* made compatible with ANTLR 3.1b1
+
+1.0.1 - Jan 11, 2008
+
+* Kenny MacDermid helps with code refactoring
+
+1.0 - Aug 20, 2007
+
+Initial early access release
diff --git a/LICENSE.txt b/gunit/LICENSE.txt
similarity index 97%
copy from LICENSE.txt
copy to gunit/LICENSE.txt
index 1d1d5d6..b6ea2eb 100644
--- a/LICENSE.txt
+++ b/gunit/LICENSE.txt
@@ -1,5 +1,5 @@
 [The "BSD licence"]
-Copyright (c) 2003-2006 Terence Parr
+Copyright (c) 2007-2008 Leon, Jen-Yuan Su
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
diff --git a/gunit/README.txt b/gunit/README.txt
new file mode 100644
index 0000000..f8d83c9
--- /dev/null
+++ b/gunit/README.txt
@@ -0,0 +1,56 @@
+gUnit 1.0.5
+Feb 21, 2009
+
+Leon, Jen-Yuan Su
+leonsu at mac com
+
+INTRODUCTION
+
+Welcome to gUnit! I've been working on gUnit from 2007 summer and 
+this is a project in USF CS, sponsored by professor Terence.
+
+You should use the latest ANTLR v3.1 with gUnit:
+
+http://www.antlr.org/download.html
+
+See the wiki document:
+
+http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing 
+
+Per the license in LICENSE.txt, this software is not guaranteed to
+work and might even destroy all life on this planet:
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+EXAMPLES
+
+See the wiki tutorial of gUnit:
+
+    http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing
+
+----------------------------------------------------------------------
+
+What is gUnit?
+
+gUnit is an unit testing framework for ANTLR grammars. It provides a 
+simple way to write and run automated tests for grammars in a manner 
+similar to what jUnit does for unit testing.
+
+----------------------------------------------------------------------
+
+How do I install gUnit?
+
+Just add gunit-1.0.5.jar to your CLASSPATH, and also make sure that
+both ANTLR and StringTemplate jars lie in CLASSPATH.
diff --git a/gunit/antlr.config b/gunit/antlr.config
new file mode 100644
index 0000000..e69de29
diff --git a/gunit/pom.xml b/gunit/pom.xml
new file mode 100644
index 0000000..07ae2d2
--- /dev/null
+++ b/gunit/pom.xml
@@ -0,0 +1,168 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.antlr</groupId>
+    <artifactId>gunit</artifactId>
+    <packaging>jar</packaging>
+  
+    <name>ANTLR gUnit</name>
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.2</version>
+    </parent>
+
+    <url>http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing</url>
+
+    <!--
+
+    Define where the ANTLR gUnit jar is deployed both for
+    the main ANTLR repository, which syncs with the maven main
+    repository, and the snapshot repository, which can be
+    used by developers that need the latest development version of
+    something, but is used here to ensure that snapshot builds of the
+    ANTLR tool pick up the latest snapshot of the runtime and string tempalte
+    and whatever else it might need in the future.
+    -->
+    <distributionManagement>
+
+        <repository>
+            <id>antlr-repo</id>
+            <name>ANTLR Testing repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-repo</url>
+        </repository>
+
+        <snapshotRepository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-snapshot</url>
+        </snapshotRepository>
+
+    </distributionManagement>
+
+  <!--
+
+    Inform Maven of the ANTLR snapshot repository, which it will
+    need to consult to get the latest snapshot build of the runtime and so on, should
+    this project need the latest builds from the antlr snapshots and not the pre-built
+    released jars from the repository.
+    -->
+    <repositories>
+
+      <!--
+        This is the ANTLR repository.
+        -->
+        <repository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>http://antlr.org/antlr-snapshot</url>
+            <snapshots>
+                <updatePolicy>always</updatePolicy>
+            </snapshots>
+        </repository>
+
+    </repositories>
+  
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build, run and test the ANTLR Tool. The ANTLR Tool uses earlier versions
+    of ANTLR at runtime (for the moment), uses the current
+    released version of ANTLR String template, but obviously is
+    reliant on the latest snapshot of the runtime, which will either be
+    taken from the antlr-snapshot repository, or your local .m2
+    repository if you built and installed that locally.
+
+    -->
+    <dependencies>
+
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.5</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+            
+        </dependency>
+        
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>stringtemplate</artifactId>
+            <version>3.2.1</version>
+            <scope>compile</scope>
+        </dependency>
+
+
+
+    </dependencies>
+
+    <build>
+
+        <defaultGoal>install</defaultGoal>
+
+        <plugins>
+
+            <plugin>
+                <groupId>org.antlr</groupId>
+                <artifactId>antlr3-maven-plugin</artifactId>
+                <version>${project.version}</version>
+                <configuration></configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>antlr</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <configuration>
+                    <source>1.5</source>
+                    <target>jsr14</target>
+                    <sourceDirectory>src</sourceDirectory>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <artifactId>maven-surefire-plugin</artifactId>
+            </plugin>
+
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>findbugs-maven-plugin</artifactId>
+                <configuration>
+                    <findbugsXmlOutput>true</findbugsXmlOutput>
+                    <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
+                    <xmlOutput>true</xmlOutput>
+                </configuration>
+            </plugin>
+
+        </plugins>
+
+        <extensions>
+            <extension>
+                <groupId>org.apache.maven.wagon</groupId>
+                <artifactId>wagon-ssh-external</artifactId>
+                <version>1.0-beta-2</version>
+            </extension>
+        </extensions>
+
+    
+    </build>
+
+</project>
diff --git a/gunit/src/main/antlr3/org/antlr/gunit/gUnit.g b/gunit/src/main/antlr3/org/antlr/gunit/gUnit.g
new file mode 100644
index 0000000..95a73c6
--- /dev/null
+++ b/gunit/src/main/antlr3/org/antlr/gunit/gUnit.g
@@ -0,0 +1,352 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2007-2008 Leon Jen-Yuan Su
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+grammar gUnit;
+options {language=Java;}
+tokens {
+	OK = 'OK';
+	FAIL = 'FAIL';
+	DOC_COMMENT;
+}
+ at header {package org.antlr.gunit;}
+ at lexer::header {
+package org.antlr.gunit;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.StringReader;
+}
+ at members {
+public GrammarInfo grammarInfo;
+public gUnitParser(TokenStream input, GrammarInfo grammarInfo) {
+	super(input);
+	this.grammarInfo = grammarInfo;
+}
+}
+
+gUnitDef:	'gunit' g1=id ('walks' g2=id)? ';' 
+		{
+		if ( $g2.text!=null ) {
+			grammarInfo.setGrammarName($g2.text);
+			grammarInfo.setTreeGrammarName($g1.text);
+		}
+		else {
+			grammarInfo.setGrammarName($g1.text);
+		}
+		}
+		optionsSpec? header? testsuite*
+	;
+	
+optionsSpec 
+	:	OPTIONS (option ';')+ '}'
+	;
+
+// Note: currently, this is the only valid option for setting customized tree adaptor	
+option	:	id '=' treeAdaptor
+		{
+		if ( $id.text.equals("TreeAdaptor") ) {
+		    grammarInfo.setAdaptor($treeAdaptor.text);
+		}
+		// TODO: need a better error logging strategy
+		else System.err.println("Invalid option detected: "+$text);
+		}
+	;
+	
+treeAdaptor
+	:	id EXT*
+	;
+
+header	:	'@header' ACTION
+		{
+		int pos1, pos2;
+		if ( (pos1=$ACTION.text.indexOf("package"))!=-1 && (pos2=$ACTION.text.indexOf(';'))!=-1 ) {
+			grammarInfo.setHeader($ACTION.text.substring(pos1+8, pos2).trim());	// substring the package path
+		}
+		else {
+			System.err.println("error(line "+$ACTION.getLine()+"): invalid header");
+		}
+		}
+	;
+		
+testsuite	// gUnit test suite based on individual rule
+scope {
+boolean isLexicalRule;
+}
+ at init {
+gUnitTestSuite ts = null;
+$testsuite::isLexicalRule = false;
+}
+	:	(	r1=RULE_REF ('walks' r2=RULE_REF)? 
+			{
+			if ( $r2==null ) ts = new gUnitTestSuite($r1.text);
+			else ts = new gUnitTestSuite($r1.text, $r2.text);
+			}
+		|	t=TOKEN_REF 
+			{
+			ts = new gUnitTestSuite();
+			ts.setLexicalRuleName($t.text);
+			$testsuite::isLexicalRule = true;
+			}
+		)
+		':'
+		testcase[ts]+ {grammarInfo.addRuleTestSuite(ts);}
+	;
+
+// TODO : currently gUnit just ignores illegal test for lexer rule, but should also emit a reminding message
+testcase[gUnitTestSuite ts]	// individual test within a (rule)testsuite
+	:	input expect {$ts.addTestCase($input.in, $expect.out);}
+	;
+
+input returns [gUnitTestInput in]
+ at init {
+String testInput = null;
+boolean inputIsFile = false;
+int line = -1;
+}
+ at after {
+in = new gUnitTestInput(testInput, inputIsFile, line);
+}
+	:	STRING 
+		{
+		testInput = $STRING.text.replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t")
+		.replace("\\b", "\b").replace("\\f", "\f").replace("\\\"", "\"").replace("\\'", "\'").replace("\\\\", "\\");
+		line = $STRING.line;
+		}
+	|	ML_STRING
+		{
+		testInput = $ML_STRING.text;
+		line = $ML_STRING.line;
+		}
+	|	file
+		{
+		testInput = $file.text;
+		inputIsFile = true;
+		line = $file.line;
+		}
+	;
+	
+expect returns [AbstractTest out]
+	:	OK {$out = new BooleanTest(true);}
+	|	FAIL {$out = new BooleanTest(false);}
+	|	'returns' RETVAL {if ( !$testsuite::isLexicalRule ) $out = new ReturnTest($RETVAL);}
+	|	'->' output {if ( !$testsuite::isLexicalRule ) $out = new OutputTest($output.token);}
+	;
+
+output returns [Token token]
+	:	STRING 
+		{
+		$STRING.setText($STRING.text.replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t")
+		.replace("\\b", "\b").replace("\\f", "\f").replace("\\\"", "\"").replace("\\'", "\'").replace("\\\\", "\\"));
+		$token = $STRING;
+		}
+	|	ML_STRING {$token = $ML_STRING;}
+	|	AST {$token = $AST;}
+	|	ACTION {$token = $ACTION;}
+	;
+
+file returns [int line]	
+	:	id EXT? {$line = $id.line;}
+	;
+
+id returns [int line] 
+	:	TOKEN_REF {$line = $TOKEN_REF.line;}
+	|	RULE_REF {$line = $RULE_REF.line;}
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {$channel=HIDDEN;} .* '*/'
+	;
+
+STRING	:	'"' ( ESC | ~('\\'|'"') )* '"' {setText(getText().substring(1, getText().length()-1));}
+	;
+
+ML_STRING
+	:	{// we need to determine the number of spaces or tabs (indentation) for multi-line input
+		StringBuffer buf = new StringBuffer();
+		int i = -1;
+		int c = input.LA(-1);
+		while ( c==' ' || c=='\t' ) {
+			buf.append((char)c);
+			c = input.LA(--i);
+		}
+		String indentation = buf.reverse().toString();
+		}
+		'<<' .* '>>' 
+		{// also determine the appropriate newline separator and get info of the first and last 2 characters (exclude '<<' and '>>')
+		String newline = System.getProperty("line.separator");
+		String front, end;
+		int oldFrontIndex = 2;
+		int oldEndIndex = getText().length()-2;
+		int newFrontIndex, newEndIndex;
+		if ( newline.length()==1 ) {
+			front = getText().substring(2, 3);
+			end = getText().substring(getText().length()-3, getText().length()-2);
+			newFrontIndex = 3;
+			newEndIndex = getText().length()-3;
+		}
+		else {// must be 2, e.g. Windows System which uses \r\n as a line separator
+			front = getText().substring(2, 4);
+			end = getText().substring(getText().length()-4, getText().length()-2);
+			newFrontIndex = 4;
+			newEndIndex = getText().length()-4;
+		}
+		// strip unwanted characters, e.g. '<<' (including a newline after it) or '>>'  (including a newline before it)
+		String temp = null;
+		if ( front.equals(newline) && end.equals(newline) ) {
+			// need to handle the special case: <<\n>> or <<\r\n>>
+			if ( newline.length()==1 && getText().length()==5 ) temp = "";
+			else if ( newline.length()==2 && getText().length()==6 ) temp = "";
+			else temp = getText().substring(newFrontIndex, newEndIndex);
+		}
+		else if ( front.equals(newline) ) {
+			temp = getText().substring(newFrontIndex, oldEndIndex);
+		}
+		else if ( end.equals(newline) ) {
+			temp = getText().substring(oldFrontIndex, newEndIndex);
+		}
+		else {
+			temp = getText().substring(oldFrontIndex, oldEndIndex);
+		}
+		// finally we need to prpcess the indentation line by line
+		BufferedReader bufReader = new BufferedReader(new StringReader(temp));
+		buf = new StringBuffer();
+		String line = null;
+		int count = 0;
+		try {
+			while((line = bufReader.readLine()) != null) {
+				if ( line.startsWith(indentation) ) line = line.substring(indentation.length());
+				if ( count>0 ) buf.append(newline);
+				buf.append(line);
+				count++;
+			}
+			setText(buf.toString());
+		}
+		catch (IOException ioe) {
+			setText(temp);
+		}
+		}
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+EXT	:	'.'('a'..'z'|'A'..'Z'|'0'..'9')+;
+
+RETVAL	:	NESTED_RETVAL {setText(getText().substring(1, getText().length()-1));}
+	;
+
+fragment
+NESTED_RETVAL :
+	'['
+	(	options {greedy=false;}
+	:	NESTED_RETVAL
+	|	.
+	)*
+	']'
+	;
+
+AST	:	NESTED_AST (' '? NESTED_AST)*;
+
+fragment
+NESTED_AST :
+	'('
+	(	options {greedy=false;}
+	:	NESTED_AST
+	|	.
+	)*
+	')'
+	;
+
+OPTIONS	:	'options' WS* '{'
+	;
+
+ACTION
+	:	NESTED_ACTION {setText(getText().substring(1, getText().length()-1));}
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=3;}
+	:	NESTED_ACTION
+	|	STRING_LITERAL
+	|	CHAR_LITERAL
+	|	.
+	)*
+	'}'
+	;
+
+fragment
+CHAR_LITERAL
+	:	'\'' ( ESC | ~('\''|'\\') ) '\''
+	;
+
+fragment
+STRING_LITERAL
+	:	'"' ( ESC | ~('\\'|'"') )* '"'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+	
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
diff --git a/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g
new file mode 100644
index 0000000..5657072
--- /dev/null
+++ b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g
@@ -0,0 +1,619 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** ANTLR v3 grammar written in ANTLR v3 with AST construction */
+grammar ANTLRv3;
+
+options {
+	output=AST;
+	ASTLabelType=CommonTree; 
+}
+
+tokens {
+	DOC_COMMENT;
+	PARSER;	
+    LEXER;
+    RULE;
+    BLOCK;
+    OPTIONAL;
+    CLOSURE;
+    POSITIVE_CLOSURE;
+    SYNPRED;
+    RANGE;
+    CHAR_RANGE;
+    EPSILON;
+    ALT;
+    EOR;
+    EOB;
+    EOA; // end of alt
+    ID;
+    ARG;
+    ARGLIST;
+    RET;
+    LEXER_GRAMMAR;
+    PARSER_GRAMMAR;
+    TREE_GRAMMAR;
+    COMBINED_GRAMMAR;
+    INITACTION;
+    LABEL; // $x used in rewrite rules
+    TEMPLATE;
+    SCOPE='scope';
+    SEMPRED;
+    GATED_SEMPRED; // {p}? =>
+    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
+    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
+    FRAGMENT='fragment';
+    TREE_BEGIN='^(';
+    ROOT='^';
+    BANG='!';
+    RANGE='..';
+    REWRITE='->';
+}
+
+ at members {
+	int gtype;
+	public List<String> rules;
+}
+
+ at header {
+package org.antlr.gunit.swingui.parsers;
+
+import java.util.List;
+}
+
+ at lexer::header {
+package org.antlr.gunit.swingui.parsers;
+} 
+
+
+grammarDef
+    :   DOC_COMMENT?
+    	(	'lexer'  {gtype=LEXER_GRAMMAR;}    // pure lexer
+    	|   'parser' {gtype=PARSER_GRAMMAR;}   // pure parser
+    	|   'tree'   {gtype=TREE_GRAMMAR;}     // a tree parser
+    	|		     {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
+    	)
+    	g='grammar' id ';' optionsSpec? tokensSpec? attrScope* action*
+    	rule+
+    	EOF
+    	-> ^( {adaptor.create(gtype,$g)}
+    		  id DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+
+    		)
+    ;
+
+tokensSpec
+	:	TOKENS tokenSpec+ '}' -> ^(TOKENS tokenSpec+)
+	;
+
+tokenSpec
+	:	TOKEN_REF
+		(	'=' (lit=STRING_LITERAL|lit=CHAR_LITERAL)	-> ^('=' TOKEN_REF $lit)
+		|												-> TOKEN_REF
+		)
+		';'
+	;
+
+attrScope
+	:	'scope' id ACTION -> ^('scope' id ACTION)
+	;
+
+/** Match stuff like @parser::members {int i;} */
+action
+	:	'@' (actionScopeName '::')? id ACTION -> ^('@' actionScopeName? id ACTION)
+	;
+
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+actionScopeName
+	:	id
+	|	l='lexer'	-> ID[$l]
+    |   p='parser'	-> ID[$p]
+	;
+
+optionsSpec
+	:	OPTIONS (option ';')+ '}' -> ^(OPTIONS option+)
+	;
+
+option
+    :   id '=' optionValue -> ^('=' id optionValue)
+ 	;
+ 	
+optionValue
+    :   id
+    |   STRING_LITERAL
+    |   CHAR_LITERAL
+    |   INT
+    |	s='*' -> STRING_LITERAL[$s]  // used for k=*
+    ;
+
+rule
+scope {
+	String name;
+}
+ at after{
+	this.rules.add($rule::name);
+}
+	:	DOC_COMMENT?
+		( modifier=('protected'|'public'|'private'|'fragment') )?
+		id {$rule::name = $id.text;}
+		'!'?
+		( arg=ARG_ACTION )?
+		( 'returns' rt=ARG_ACTION  )?
+		throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+		':'	altList	';'
+		exceptionGroup?
+	    -> ^( RULE id {modifier!=null?adaptor.create(modifier):null} ^(ARG $arg)? ^(RET $rt)?
+	    	  optionsSpec? ruleScopeSpec? ruleAction*
+	    	  altList
+	    	  exceptionGroup?
+	    	  EOR["EOR"]
+	    	)
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	'@' id ACTION -> ^('@' id ACTION)
+	;
+
+throwsSpec
+	:	'throws' id ( ',' id )* -> ^('throws' id+)
+	;
+
+ruleScopeSpec
+	:	'scope' ACTION -> ^('scope' ACTION)
+	|	'scope' id (',' id)* ';' -> ^('scope' id+)
+	|	'scope' ACTION
+		'scope' id (',' id)* ';'
+		-> ^('scope' ACTION id+ )
+	;
+
+block
+    :   lp='('
+		( (opts=optionsSpec)? ':' )?
+		a1=alternative rewrite ( '|' a2=alternative rewrite )*
+        rp=')'
+        -> ^( BLOCK[$lp,"BLOCK"] optionsSpec? (alternative rewrite?)+ EOB[$rp,"EOB"] )
+    ;
+
+altList
+ at init {
+	// must create root manually as it's used by invoked rules in real antlr tool.
+	// leave here to demonstrate use of {...} in rewrite rule
+	// it's really BLOCK[firstToken,"BLOCK"]; set line/col to previous ( or : token.
+    CommonTree blkRoot = (CommonTree)adaptor.create(BLOCK,input.LT(-1),"BLOCK");
+}
+    :   a1=alternative rewrite ( '|' a2=alternative rewrite )*
+		-> ^( {blkRoot} (alternative rewrite?)+ EOB["EOB"] )
+    ;
+
+alternative
+ at init {
+	Token firstToken = input.LT(1);
+	Token prevToken = input.LT(-1); // either : or | I think
+}
+    :   element+ -> ^(ALT[firstToken,"ALT"] element+ EOA["EOA"])
+    |   -> ^(ALT[prevToken,"ALT"] EPSILON[prevToken,"EPSILON"] EOA["EOA"])
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ ( finallyClause )?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    'catch' ARG_ACTION ACTION -> ^('catch' ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    'finally' ACTION -> ^('finally' ACTION)
+    ;
+
+element
+	:	elementNoOptionSpec
+	;
+
+elementNoOptionSpec
+	:	id (labelOp='='|labelOp='+=') atom
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id atom) EOA["EOA"]) EOB["EOB"]))
+		|				-> ^($labelOp id atom)
+		)
+	|	id (labelOp='='|labelOp='+=') block
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id block) EOA["EOA"]) EOB["EOB"]))
+		|				-> ^($labelOp id block)
+		)
+	|	atom
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] atom EOA["EOA"]) EOB["EOB"]) )
+		|				-> atom
+		)
+	|	ebnf
+	|   ACTION
+	|   SEMPRED ( '=>' -> GATED_SEMPRED | -> SEMPRED )
+	|   treeSpec
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] treeSpec EOA["EOA"]) EOB["EOB"]) )
+		|				-> treeSpec
+		)
+	;
+
+atom:   range ( (op='^'|op='!') -> ^($op range) | -> range )
+    |   terminal
+    |	notSet ( (op='^'|op='!') -> ^($op notSet) | -> notSet )
+    |   RULE_REF ( arg=ARG_ACTION )? ( (op='^'|op='!') )?
+    	-> {$arg!=null&&op!=null}?	^($op RULE_REF $arg)
+    	-> {$arg!=null}?			^(RULE_REF $arg)
+    	-> {$op!=null}?				^($op RULE_REF)
+    	-> RULE_REF
+    ;
+
+notSet
+	:	'~'
+		(	notTerminal	-> ^('~' notTerminal)
+		|	block		-> ^('~' block)
+		)
+	;
+
+treeSpec
+	:	'^(' element ( element )+ ')' -> ^(TREE_BEGIN element+)
+	;
+
+/** Matches ENBF blocks (and token sets via block rule) */
+ebnf
+ at init {
+    Token firstToken = input.LT(1);
+}
+ at after {
+	$ebnf.tree.getToken().setLine(firstToken.getLine());
+	$ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
+}
+	:	block
+		(	op='?'	-> ^(OPTIONAL[op] block)
+		|	op='*'	-> ^(CLOSURE[op] block)
+		|	op='+'	-> ^(POSITIVE_CLOSURE[op] block)
+		|   '=>'	// syntactic predicate
+					-> {gtype==COMBINED_GRAMMAR &&
+					    Character.isUpperCase($rule::name.charAt(0))}?
+					   // if lexer rule in combined, leave as pred for lexer
+					   ^(SYNPRED["=>"] block)
+					// in real antlr tool, text for SYN_SEMPRED is predname
+					-> SYN_SEMPRED
+        |			-> block
+		)
+	;
+
+range!
+	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL -> ^(CHAR_RANGE[$c1,".."] $c1 $c2)
+	;
+
+terminal
+    :   (	CHAR_LITERAL				-> CHAR_LITERAL
+    		// Args are only valid for lexer rules
+		|   TOKEN_REF
+			( ARG_ACTION				-> ^(TOKEN_REF ARG_ACTION)
+			|							-> TOKEN_REF
+			)
+		|   STRING_LITERAL				-> STRING_LITERAL
+		|   '.'							-> '.'
+		)	
+		(	'^'							-> ^('^' $terminal)
+		|	'!' 						-> ^('!' $terminal)
+		)?
+	;
+
+notTerminal
+	:   CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+	
+ebnfSuffix
+ at init {
+	Token op = input.LT(1);
+}
+	:	'?'	-> OPTIONAL[op]
+  	|	'*' -> CLOSURE[op]
+   	|	'+' -> POSITIVE_CLOSURE[op]
+	;
+	
+
+
+// R E W R I T E  S Y N T A X
+
+rewrite
+ at init {
+	Token firstToken = input.LT(1);
+}
+	:	(rew+='->' preds+=SEMPRED predicated+=rewrite_alternative)*
+		rew2='->' last=rewrite_alternative
+        -> ^($rew $preds $predicated)* ^($rew2 $last)
+	|
+	;
+
+rewrite_alternative
+options {backtrack=true;}
+	:	rewrite_template
+	|	rewrite_tree_alternative
+   	|   /* empty rewrite */ -> ^(ALT["ALT"] EPSILON["EPSILON"] EOA["EOA"])
+	;
+	
+rewrite_tree_block
+    :   lp='(' rewrite_tree_alternative ')'
+    	-> ^(BLOCK[$lp,"BLOCK"] rewrite_tree_alternative EOB[$lp,"EOB"])
+    ;
+
+rewrite_tree_alternative
+    :	rewrite_tree_element+ -> ^(ALT["ALT"] rewrite_tree_element+ EOA["EOA"])
+    ;
+
+rewrite_tree_element
+	:	rewrite_tree_atom
+	|	rewrite_tree_atom ebnfSuffix
+		-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree_atom EOA["EOA"]) EOB["EOB"]))
+	|   rewrite_tree
+		(	ebnfSuffix
+			-> ^(ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree EOA["EOA"]) EOB["EOB"]))
+		|	-> rewrite_tree
+		)
+	|   rewrite_tree_ebnf
+	;
+
+rewrite_tree_atom
+    :   CHAR_LITERAL
+	|   TOKEN_REF ARG_ACTION? -> ^(TOKEN_REF ARG_ACTION?) // for imaginary nodes
+    |   RULE_REF
+	|   STRING_LITERAL
+	|   d='$' id -> LABEL[$d,$id.text] // reference to a label in a rewrite rule
+	|	ACTION
+	;
+
+rewrite_tree_ebnf
+ at init {
+    Token firstToken = input.LT(1);
+}
+ at after {
+	$rewrite_tree_ebnf.tree.getToken().setLine(firstToken.getLine());
+	$rewrite_tree_ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
+}
+	:	rewrite_tree_block ebnfSuffix -> ^(ebnfSuffix rewrite_tree_block)
+	;
+	
+rewrite_tree
+	:	'^(' rewrite_tree_atom rewrite_tree_element* ')'
+		-> ^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
+	;
+
+/** Build a tree for a template rewrite:
+      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+    where ARGLIST is always there even if no args exist.
+    ID can be "template" keyword.  If first child is ACTION then it's
+    an indirect template ref
+
+    -> foo(a={...}, b={...})
+    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
+    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+rewrite_template
+	:   // -> template(a={...},...) "..."    inline template
+		id lp='(' rewrite_template_args	')'
+		( str=DOUBLE_QUOTE_STRING_LITERAL | str=DOUBLE_ANGLE_STRING_LITERAL )
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args $str)
+
+	|	// -> foo(a={...}, ...)
+		rewrite_template_ref
+
+	|	// -> ({expr})(a={...}, ...)
+		rewrite_indirect_template_head
+
+	|	// -> {...}
+		ACTION
+	;
+
+/** -> foo(a={...}, ...) */
+rewrite_template_ref
+	:	id lp='(' rewrite_template_args	')'
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args)
+	;
+
+/** -> ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	lp='(' ACTION ')' '(' rewrite_template_args ')'
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewrite_template_args)
+	;
+
+rewrite_template_args
+	:	rewrite_template_arg (',' rewrite_template_arg)*
+		-> ^(ARGLIST rewrite_template_arg+)
+	|	-> ARGLIST
+	;
+
+rewrite_template_arg
+	:   id '=' ACTION -> ^(ARG[$id.start] id ACTION)
+	;
+
+id	:	TOKEN_REF -> ID[$TOKEN_REF]
+	|	RULE_REF  -> ID[$RULE_REF]
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//'
+ 	 	(	' $ANTLR ' SRC // src directive
+ 		|	~('\r'|'\n')*
+		)
+		'\r'? '\n'
+		{$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {if (input.LA(1)=='*') $type=DOC_COMMENT; else $channel=HIDDEN;} .* '*/'
+	;
+
+CHAR_LITERAL
+	:	'\'' LITERAL_CHAR '\''
+	;
+
+STRING_LITERAL
+	:	'\'' LITERAL_CHAR LITERAL_CHAR* '\''
+	;
+
+fragment
+LITERAL_CHAR
+	:	ESC
+	|	~('\''|'\\')
+	;
+
+DOUBLE_QUOTE_STRING_LITERAL
+	:	'"' (ESC | ~('\\'|'"'))* '"'
+	;
+
+DOUBLE_ANGLE_STRING_LITERAL
+	:	'<<' .* '>>'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+INT	:	'0'..'9'+
+	;
+
+ARG_ACTION
+	:	NESTED_ARG_ACTION
+	;
+
+fragment
+NESTED_ARG_ACTION :
+	'['
+	(	options {greedy=false; k=1;}
+	:	NESTED_ARG_ACTION
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	']'
+	{setText(getText().substring(1, getText().length()-1));}
+	;
+
+ACTION
+	:	NESTED_ACTION ( '?' {$type = SEMPRED;} )?
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=2;}
+	:	NESTED_ACTION
+	|	SL_COMMENT
+	|	ML_COMMENT
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	'}'
+   ;
+
+fragment
+ACTION_CHAR_LITERAL
+	:	'\'' (ACTION_ESC|~('\\'|'\'')) '\''
+	;
+
+fragment
+ACTION_STRING_LITERAL
+	:	'"' (ACTION_ESC|~('\\'|'"'))* '"'
+	;
+
+fragment
+ACTION_ESC
+	:	'\\\''
+	|	'\\' '"' // ANTLR doesn't like: '\\"'
+	|	'\\' ~('\''|'"')
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+/** Match the start of an options section.  Don't allow normal
+ *  action processing on the {...} as it's not a action.
+ */
+OPTIONS
+	:	'options' WS_LOOP '{'
+	;
+	
+TOKENS
+	:	'tokens' WS_LOOP '{'
+	;
+
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+fragment
+SRC	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
+
+fragment
+WS_LOOP
+	:	(	WS
+		|	SL_COMMENT
+		|	ML_COMMENT
+		)*
+	;
+
+
diff --git a/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g
new file mode 100644
index 0000000..1701214
--- /dev/null
+++ b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g
@@ -0,0 +1,213 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2007-2008 Leon Jen-Yuan Su
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+grammar StGUnit;
+
+options {language=Java;}
+
+tokens {
+	OK = 'OK';
+	FAIL = 'FAIL';
+	DOC_COMMENT;
+}
+
+ at header {
+package org.antlr.gunit.swingui.parsers;
+import org.antlr.gunit.swingui.model.*;
+import org.antlr.gunit.swingui.runner.*;
+}
+
+ at lexer::header {package org.antlr.gunit.swingui.parsers;}
+
+ at members {
+public TestSuiteAdapter adapter ;;
+}
+
+gUnitDef
+	:	'gunit' name=id {adapter.setGrammarName($name.text);}
+	    ('walks' id)? ';' 
+		header? suite*
+	;
+
+header
+	:	'@header' ACTION
+	;
+		
+suite
+	:	(	parserRule=RULE_REF ('walks' RULE_REF)? 
+	        {adapter.startRule($parserRule.text);}
+		|	lexerRule=TOKEN_REF 
+			{adapter.startRule($lexerRule.text);}
+		)
+		':'
+		test+
+		{adapter.endRule();}
+	;
+
+test
+	:	input expect
+		{adapter.addTestCase($input.in, $expect.out);}
+	;
+	
+expect returns [ITestCaseOutput out]
+	:	OK			{$out = adapter.createBoolOutput(true);}
+	|	FAIL		{$out = adapter.createBoolOutput(false);}
+	|	'returns' RETVAL {$out = adapter.createReturnOutput($RETVAL.text);}
+	|	'->' output {$out = adapter.createStdOutput($output.text);}
+	|	'->' AST	{$out = adapter.createAstOutput($AST.text);}
+	;
+
+input returns [ITestCaseInput in]
+	:	STRING 		{$in = adapter.createStringInput($STRING.text);}
+	|	ML_STRING	{$in = adapter.createMultiInput($ML_STRING.text);}
+	|	fileInput	{$in = adapter.createFileInput($fileInput.path);}
+	;
+
+output
+	:	STRING
+	|	ML_STRING
+	|	ACTION
+	;
+	
+fileInput returns [String path]
+	:	id {$path = $id.text;} (EXT {$path += $EXT.text;})? 
+	;
+
+id 	:	TOKEN_REF
+	|	RULE_REF
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {$channel=HIDDEN;} .* '*/'
+	;
+
+STRING
+	:	'"' ( ESC | ~('\\'|'"') )* '"'
+	;
+
+ML_STRING
+	:	'<<' .* '>>' 
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+EXT	:	'.'('a'..'z'|'A'..'Z'|'0'..'9')+;
+
+RETVAL	:	NESTED_RETVAL
+	;
+
+fragment
+NESTED_RETVAL :
+	'['
+	(	options {greedy=false;}
+	:	NESTED_RETVAL
+	|	.
+	)*
+	']'
+	;
+
+AST	:	NESTED_AST (' '? NESTED_AST)*;
+
+fragment
+NESTED_AST :
+	'('
+	(	options {greedy=false;}
+	:	NESTED_AST
+	|	.
+	)*
+	')'
+	;
+
+ACTION
+	:	NESTED_ACTION
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=3;}
+	:	NESTED_ACTION
+	|	STRING_LITERAL
+	|	CHAR_LITERAL
+	|	.
+	)*
+	'}'
+	;
+
+fragment
+CHAR_LITERAL
+	:	'\'' ( ESC | ~('\''|'\\') ) '\''
+	;
+
+fragment
+STRING_LITERAL
+	:	'"' ( ESC | ~('\\'|'"') )* '"'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+	
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
diff --git a/gunit/src/main/java/org/antlr/gunit/AbstractTest.java b/gunit/src/main/java/org/antlr/gunit/AbstractTest.java
new file mode 100644
index 0000000..158bf04
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/AbstractTest.java
@@ -0,0 +1,83 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+public abstract class AbstractTest implements ITestCase {
+	// store essential individual test result for string template
+	protected String header;
+	protected String actual;
+	
+	protected boolean hasErrorMsg;
+	
+	private String testedRuleName;
+	private int testCaseIndex;
+	
+	// TODO: remove these. They're only used as part of a refactor to keep the
+	//       code cleaner. It is a mock-instanceOf() replacement.
+	public abstract int getType();
+	public abstract String getText();
+	
+	public abstract String getExpected();
+	// return an escaped string of the expected result
+	public String getExpectedResult() {
+		String expected = getExpected();
+		if ( expected!=null ) expected = JUnitCodeGen.escapeForJava(expected);
+		return expected;
+	}
+	public abstract String getResult(gUnitTestResult testResult);
+	public String getHeader() { return this.header; }
+	public String getActual() { return this.actual; }
+	// return an escaped string of the actual result
+	public String getActualResult() {
+		String actual = getActual();
+		// there is no need to escape the error message from ANTLR 
+		if ( actual!=null && !hasErrorMsg ) actual = JUnitCodeGen.escapeForJava(actual);
+		return actual;
+	}
+	
+	public String getTestedRuleName() { return this.testedRuleName; }
+	public int getTestCaseIndex() { return this.testCaseIndex; }
+	
+	public void setHeader(String rule, String lexicalRule, String treeRule, int numOfTest, int line) {
+		StringBuffer buf = new StringBuffer();
+		buf.append("test" + numOfTest + " (");
+		if ( treeRule!=null ) {
+			buf.append(treeRule+" walks ");
+		}
+		if ( lexicalRule!=null ) {
+			buf.append(lexicalRule + ", line"+line+")" + " - ");
+		}
+		else buf.append(rule + ", line"+line+")" + " - ");
+		this.header = buf.toString();
+	}
+	public void setActual(String actual) { this.actual = actual; }
+	
+	public void setTestedRuleName(String testedRuleName) { this.testedRuleName = testedRuleName; }
+	public void setTestCaseIndex(int testCaseIndex) { this.testCaseIndex = testCaseIndex; }
+	
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java b/gunit/src/main/java/org/antlr/gunit/BooleanTest.java
similarity index 64%
copy from runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
copy to gunit/src/main/java/org/antlr/gunit/BooleanTest.java
index 0aaa6e9..4426e77 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
+++ b/gunit/src/main/java/org/antlr/gunit/BooleanTest.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,30 +25,39 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit;
 
-/** A generic doubly-linked tree implementation with no payload.
- *  You must subclass to actually have any user data.
- *  TODO: do we really need/want this?
- */
-public abstract class DoubleLinkTree extends BaseTree {
-	protected DoubleLinkTree parent;
-
-	public DoubleLinkTree getParent() {
-		return parent;
+public class BooleanTest extends AbstractTest {
+	private boolean ok;
+	
+	public BooleanTest(boolean ok) {
+		this.ok = ok;
 	}
 
-	public void setParent(DoubleLinkTree t) {
-		parent = t;
+	@Override
+	public String getText() {
+		return (ok)? "OK" : "FAIL";
+	}
+	
+	@Override
+	public int getType() {
+		return (ok)? gUnitParser.OK : gUnitParser.FAIL;
 	}
 
-	public void addChild(BaseTree t) {
-		super.addChild(t);
-		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
+	@Override
+	public String getResult(gUnitTestResult testResult) {
+		if ( testResult.isLexerTest() ) {
+			if ( testResult.isSuccess() ) return "OK";
+			else {
+				hasErrorMsg = true;	// return error message for boolean test of lexer
+				return testResult.getError();
+			}
+		}
+		return (testResult.isSuccess())? "OK" : "FAIL";
 	}
 
-	public void setChild(int i, BaseTree t) {
-		super.setChild(i, t);
-		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
+	@Override
+	public String getExpected() {
+		return (ok)? "OK" : "FAIL";
 	}
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/GrammarInfo.java b/gunit/src/main/java/org/antlr/gunit/GrammarInfo.java
new file mode 100644
index 0000000..4a28248
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/GrammarInfo.java
@@ -0,0 +1,96 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon, Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class GrammarInfo {
+
+	private String grammarName;					// targeted grammar for unit test
+	private String treeGrammarName = null;		// optional, required for testing tree grammar
+	private String header = null;				// optional, required if using java package
+	private String adaptor = null;              // optional, required if using customized tree adaptor
+	private List<gUnitTestSuite> ruleTestSuites = new ArrayList<gUnitTestSuite>();	// testsuites for each testing rule
+	private StringBuffer unitTestResult = new StringBuffer();
+	
+	public String getGrammarName() {
+		return grammarName;
+	}
+	
+	public void setGrammarName(String grammarName) {
+		this.grammarName = grammarName;
+	}
+
+	public String getTreeGrammarName() {
+		return treeGrammarName;
+	}
+
+	public void setTreeGrammarName(String treeGrammarName) {
+		this.treeGrammarName = treeGrammarName;
+	}
+
+	public String getHeader() {
+		return header;
+	}
+
+	public void setHeader(String header) {
+		this.header = header;
+	}
+	
+	public String getAdaptor() {
+		return adaptor;
+	}
+	
+	public void setAdaptor(String adaptor) {
+		this.adaptor = adaptor;
+	}
+
+	public List<gUnitTestSuite> getRuleTestSuites() {
+		// Make this list unmodifiable so that we can refactor knowing it's not changed.
+		return Collections.unmodifiableList(ruleTestSuites);
+	}
+	
+	public void addRuleTestSuite(gUnitTestSuite testSuite) {
+		this.ruleTestSuites.add(testSuite);
+	}
+	
+	public void appendUnitTestResult(String result) {
+		this.unitTestResult.append(result);
+	}
+
+	// We don't want people messing with the string buffer here, so don't return it.
+	public String getUnitTestResult() {
+		return unitTestResult.toString();
+	}
+
+	public void setUnitTestResult(StringBuffer unitTestResult) {
+		this.unitTestResult = unitTestResult;
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/CharStreamState.java b/gunit/src/main/java/org/antlr/gunit/ITestCase.java
similarity index 60%
copy from runtime/Java/src/org/antlr/runtime/CharStreamState.java
copy to gunit/src/main/java/org/antlr/gunit/ITestCase.java
index 5bcf116..437b789 100644
--- a/runtime/Java/src/org/antlr/runtime/CharStreamState.java
+++ b/gunit/src/main/java/org/antlr/gunit/ITestCase.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,21 +25,39 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.gunit;
 
-/** When walking ahead with cyclic DFA or for syntactic predicates,
- *  we need to record the state of the input stream (char index,
- *  line, etc...) so that we can rewind the state after scanning ahead.
+/**
+ * ITestCase object locates one test case in a gUnit script by specifying the
+ * tested rule and the index number of the test case in that group.
  *
- *  This is the complete state of a stream.
+ * For example:
+ * ----------------------
+ * ...
+ * varDef:
+ * "int i;" OK
+ * "float 2f;" FAIL
+ * ...
+ * ----------------------
+ * The "testedRuleName" for these two test cases will be "varDef".
+ * The "index" for the "int"-test will be 0.
+ * The "index" for the "float"-test will be 1.  And so on.
+ *
+ * @see ITestSuite
  */
-public class CharStreamState {
-	/** Index into the char stream of next lookahead char */
-	int p;
+public interface ITestCase {
+
+    /**
+     * Get the name of the rule that is tested by this test case.
+     * @return name of the tested rule.
+     */
+    public String getTestedRuleName();
 
-	/** What line number is the scanner at before processing buffer[p]? */
-	int line;
+    /**
+     * Get the index of the test case in the test group for a rule. Starting
+     * from 0.
+     * @return index number of the test case.
+     */
+    public int getTestCaseIndex();
 	
-	/** What char position 0..n-1 in line is scanner before processing buffer[p]? */
-	int charPositionInLine;
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java b/gunit/src/main/java/org/antlr/gunit/ITestSuite.java
similarity index 73%
copy from runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java
copy to gunit/src/main/java/org/antlr/gunit/ITestSuite.java
index eca2c59..c01673f 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java
+++ b/gunit/src/main/java/org/antlr/gunit/ITestSuite.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,16 +25,21 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit;
 
-import org.antlr.runtime.RuleReturnScope;
-
-/** This is identical to the ParserRuleReturnScope except that
- *  the start property is a tree nodes not Token object
- *  when you are parsing trees.  To be generic the tree node types
- *  have to be Object.
+/**
+ * A gUnit script file is an Antlr "test suite".  The interface is defined to
+ * allow the Swing GUI test runner be notified when gUnit interpreter runner
+ * runs a passed/failed test case.
+ *
+ * CHANGES:
+ * 2009-03-01: SHAOTING
+ *      - change method return void, parameter test object.
  */
-public class TreeRuleReturnScope extends RuleReturnScope {
-	/** First node or root node of tree matched for this rule. */
-	public Object start;
+public interface ITestSuite {
+	
+    public void onPass(ITestCase passTest);
+    
+    public void onFail(ITestCase failTest);
+
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/Interp.java b/gunit/src/main/java/org/antlr/gunit/Interp.java
new file mode 100644
index 0000000..07001b0
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/Interp.java
@@ -0,0 +1,91 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+import java.io.File;
+import java.io.IOException;
+
+import org.antlr.runtime.*;
+
+/** The main gUnit interpreter entry point. 
+ * 	Read a gUnit script, run unit tests or generate a junit file. 
+ */
+public class Interp {
+
+	public static void main(String[] args) throws IOException, ClassNotFoundException, RecognitionException {
+		/** Pull char from where? */
+		CharStream input = null;
+		/** If the input source is a testsuite file, where is it? */
+		String testsuiteDir = System.getProperty("user.dir");
+		
+	    /** Generate junit codes */
+		if ( args.length>0 && args[0].equals("-o") ) {
+			if ( args.length==2 ) {
+				input = new ANTLRFileStream(args[1]);
+				File f = new File(args[1]);
+				testsuiteDir = getTestsuiteDir(f.getCanonicalPath(), f.getName());
+			}
+			else
+				input = new ANTLRInputStream(System.in);
+			JUnitCodeGen generater = new JUnitCodeGen(parse(input), testsuiteDir);
+			generater.compile();
+			return;
+		}
+		
+		
+		/** Run gunit tests */
+		if ( args.length==1 ) {
+			input = new ANTLRFileStream(args[0]);
+			File f = new File(args[0]);
+			testsuiteDir = getTestsuiteDir(f.getCanonicalPath(), f.getName());
+		}
+		else
+			input = new ANTLRInputStream(System.in);
+		
+		gUnitExecutor executer = new gUnitExecutor(parse(input), testsuiteDir);
+		
+		System.out.print(executer.execTest());	// unit test result
+		
+		//return an error code of the number of failures
+		System.exit(executer.failures.size() + executer.invalids.size()); 
+	}
+	
+	public static GrammarInfo parse(CharStream input) throws RecognitionException {
+		gUnitLexer lexer = new gUnitLexer(input);
+		CommonTokenStream tokens = new CommonTokenStream(lexer);
+		
+		GrammarInfo grammarInfo = new GrammarInfo();
+		gUnitParser parser = new gUnitParser(tokens, grammarInfo);
+		parser.gUnitDef();	// parse gunit script and save elements to grammarInfo
+		return grammarInfo;
+	}
+	
+	public static String getTestsuiteDir(String fullPath, String fileName) {
+		return fullPath.substring(0, fullPath.length()-fileName.length());
+	}
+	
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/gunit/src/main/java/org/antlr/gunit/InvalidInputException.java
similarity index 80%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to gunit/src/main/java/org/antlr/gunit/InvalidInputException.java
index 815b4e6..d6e9dfc 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/gunit/src/main/java/org/antlr/gunit/InvalidInputException.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,10 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit;
+
+public class InvalidInputException extends Exception {
+
+	private static final long serialVersionUID = 1L;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
-	}
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java b/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java
new file mode 100644
index 0000000..140134d
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java
@@ -0,0 +1,325 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.StringTemplateGroupLoader;
+import org.antlr.stringtemplate.CommonGroupLoader;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.io.*;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.logging.ConsoleHandler;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+public class JUnitCodeGen {
+	public GrammarInfo grammarInfo;
+	public Map<String, String> ruleWithReturn;
+	private final String testsuiteDir;
+	private String outputDirectoryPath = ".";
+	
+	private final static Handler console = new ConsoleHandler();
+	private static final Logger logger = Logger.getLogger(JUnitCodeGen.class.getName());
+	static {
+		logger.addHandler(console);
+	}
+	
+	public JUnitCodeGen(GrammarInfo grammarInfo, String testsuiteDir) throws ClassNotFoundException {
+		this( grammarInfo, determineClassLoader(), testsuiteDir);
+	}
+	
+	private static ClassLoader determineClassLoader() {
+		ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+		if ( classLoader == null ) {
+			classLoader = JUnitCodeGen.class.getClassLoader();
+		}
+		return classLoader;
+	}
+	
+	public JUnitCodeGen(GrammarInfo grammarInfo, ClassLoader classLoader, String testsuiteDir) throws ClassNotFoundException {
+		this.grammarInfo = grammarInfo;
+		this.testsuiteDir = testsuiteDir;
+		/** Map the name of rules having return value to its return type */
+		ruleWithReturn = new HashMap<String, String>();
+		Class parserClass = locateParserClass( grammarInfo, classLoader );
+		Method[] methods = parserClass.getDeclaredMethods();
+        for(Method method : methods) {
+        	if ( !method.getReturnType().getName().equals("void") ) {
+        		ruleWithReturn.put(method.getName(), method.getReturnType().getName().replace('$', '.'));
+        	}
+        }
+	}
+	
+	private Class locateParserClass(GrammarInfo grammarInfo, ClassLoader classLoader) throws ClassNotFoundException {
+		String parserClassName = grammarInfo.getGrammarName() + "Parser";
+		if ( grammarInfo.getHeader() != null ) {
+			parserClassName = grammarInfo.getHeader()+ "." + parserClassName;
+		}
+		return classLoader.loadClass( parserClassName );
+	}
+	
+	public String getOutputDirectoryPath() {
+		return outputDirectoryPath;
+	}
+	
+	public void setOutputDirectoryPath(String outputDirectoryPath) {
+		this.outputDirectoryPath = outputDirectoryPath;
+	}
+
+	public void compile() throws IOException{
+		String junitFileName;
+		if ( grammarInfo.getTreeGrammarName()!=null ) {
+			junitFileName = "Test"+grammarInfo.getTreeGrammarName();
+		}
+		else {
+			junitFileName = "Test"+grammarInfo.getGrammarName();
+		}
+		String lexerName = grammarInfo.getGrammarName()+"Lexer";
+		String parserName = grammarInfo.getGrammarName()+"Parser";
+		
+		StringTemplateGroupLoader loader = new CommonGroupLoader("org/antlr/gunit", null);
+		StringTemplateGroup.registerGroupLoader(loader);
+		StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
+		StringBuffer buf = compileToBuffer(junitFileName, lexerName, parserName);
+		writeTestFile(".", junitFileName+".java", buf.toString());
+	}
+
+	public StringBuffer compileToBuffer(String className, String lexerName, String parserName) {
+		StringTemplateGroup group = StringTemplateGroup.loadGroup("junit");
+		StringBuffer buf = new StringBuffer();
+		buf.append(genClassHeader(group, className, lexerName, parserName));
+		buf.append(genTestRuleMethods(group));
+		buf.append("\n\n}");
+		return buf;
+	}
+	
+	protected String genClassHeader(StringTemplateGroup group, String junitFileName, String lexerName, String parserName) {
+		StringTemplate classHeaderST = group.getInstanceOf("classHeader");
+		if ( grammarInfo.getHeader()!=null ) {	// Set up class package if there is
+			classHeaderST.setAttribute("header", "package "+grammarInfo.getHeader()+";");
+		}
+		classHeaderST.setAttribute("junitFileName", junitFileName);
+		
+		String lexerPath = null;
+		String parserPath = null;
+		String treeParserPath = null;
+		String packagePath = null;
+		boolean isTreeGrammar = false;
+		boolean hasPackage = false;
+		/** Set up appropriate class path for parser/tree parser if using package */
+		if ( grammarInfo.getHeader()!=null ) {
+			hasPackage = true;
+			packagePath = "./"+grammarInfo.getHeader().replace('.', '/');
+			lexerPath = grammarInfo.getHeader()+"."+lexerName;
+			parserPath = grammarInfo.getHeader()+"."+parserName;
+			if ( grammarInfo.getTreeGrammarName()!=null ) {
+				treeParserPath = grammarInfo.getHeader()+"."+grammarInfo.getTreeGrammarName();
+				isTreeGrammar = true;
+			}
+		}
+		else {
+			lexerPath = lexerName;
+			parserPath = parserName;
+			if ( grammarInfo.getTreeGrammarName()!=null ) {
+				treeParserPath = grammarInfo.getTreeGrammarName();
+				isTreeGrammar = true;
+			}
+		}
+		classHeaderST.setAttribute("hasPackage", hasPackage);
+		classHeaderST.setAttribute("packagePath", packagePath);
+		classHeaderST.setAttribute("lexerPath", lexerPath);
+		classHeaderST.setAttribute("parserPath", parserPath);
+		classHeaderST.setAttribute("treeParserPath", treeParserPath);
+		classHeaderST.setAttribute("isTreeGrammar", isTreeGrammar);
+		return classHeaderST.toString();
+	}
+	
+	protected String genTestRuleMethods(StringTemplateGroup group) {
+		StringBuffer buf = new StringBuffer();
+		if ( grammarInfo.getTreeGrammarName()!=null ) {	// Generate junit codes of for tree grammar rule
+			for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
+				int i = 0;
+				for ( gUnitTestInput input: ts.testSuites.keySet() ) {	// each rule may contain multiple tests
+					i++;
+					StringTemplate testRuleMethodST;
+					/** If rule has multiple return values or ast*/
+					if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION && ruleWithReturn.containsKey(ts.getTreeRuleName()) ) {
+						testRuleMethodST = group.getInstanceOf("testTreeRuleMethod2");
+						String inputString = escapeForJava(input.testInput);
+						String outputString = ts.testSuites.get(input).getText();
+						testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getTreeRuleName())+"_walks_"+ 
+								changeFirstCapital(ts.getRuleName())+i);
+						testRuleMethodST.setAttribute("testTreeRuleName", '"'+ts.getTreeRuleName()+'"');
+						testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
+						testRuleMethodST.setAttribute("testInput", '"'+inputString+'"');
+						testRuleMethodST.setAttribute("returnType", ruleWithReturn.get(ts.getTreeRuleName()));
+						testRuleMethodST.setAttribute("isFile", input.inputIsFile);
+						testRuleMethodST.setAttribute("expecting", outputString);
+					}
+					else {
+						testRuleMethodST = group.getInstanceOf("testTreeRuleMethod");
+						String inputString = escapeForJava(input.testInput);
+						String outputString = ts.testSuites.get(input).getText();
+						testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getTreeRuleName())+"_walks_"+ 
+								changeFirstCapital(ts.getRuleName())+i);
+						testRuleMethodST.setAttribute("testTreeRuleName", '"'+ts.getTreeRuleName()+'"');
+						testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
+						testRuleMethodST.setAttribute("testInput", '"'+inputString+'"');
+						testRuleMethodST.setAttribute("isFile", input.inputIsFile);
+						testRuleMethodST.setAttribute("tokenType", getTypeString(ts.testSuites.get(input).getType()));
+						
+						if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// trim ';' at the end of ACTION if there is...
+							//testRuleMethodST.setAttribute("expecting", outputString.substring(0, outputString.length()-1));
+							testRuleMethodST.setAttribute("expecting", outputString);
+						}
+						else if ( ts.testSuites.get(input).getType()==gUnitParser.RETVAL ) {	// Expected: RETVAL
+							testRuleMethodST.setAttribute("expecting", outputString);
+						}
+						else {	// Attach "" to expected STRING or AST
+							testRuleMethodST.setAttribute("expecting", '"'+escapeForJava(outputString)+'"');
+						}
+					}
+					buf.append(testRuleMethodST.toString());
+				}
+			}
+		}
+		else {	// Generate junit codes of for grammar rule
+			for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
+				int i = 0;
+				for ( gUnitTestInput input: ts.testSuites.keySet() ) {	// each rule may contain multiple tests
+					i++;
+					StringTemplate testRuleMethodST;
+					/** If rule has multiple return values or ast*/
+					if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION && ruleWithReturn.containsKey(ts.getRuleName()) ) {
+						testRuleMethodST = group.getInstanceOf("testRuleMethod2");
+						String inputString = escapeForJava(input.testInput);
+						String outputString = ts.testSuites.get(input).getText();
+						testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getRuleName())+i);
+						testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
+						testRuleMethodST.setAttribute("testInput", '"'+inputString+'"');
+						testRuleMethodST.setAttribute("returnType", ruleWithReturn.get(ts.getRuleName()));
+						testRuleMethodST.setAttribute("isFile", input.inputIsFile);
+						testRuleMethodST.setAttribute("expecting", outputString);
+					}
+					else {
+						String testRuleName;
+						// need to determine whether it's a test for parser rule or lexer rule
+						if ( ts.isLexicalRule() ) testRuleName = ts.getLexicalRuleName();
+						else testRuleName = ts.getRuleName();
+						testRuleMethodST = group.getInstanceOf("testRuleMethod");
+						String inputString = escapeForJava(input.testInput);
+						String outputString = ts.testSuites.get(input).getText();
+						testRuleMethodST.setAttribute("isLexicalRule", ts.isLexicalRule());
+						testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(testRuleName)+i);
+						testRuleMethodST.setAttribute("testRuleName", '"'+testRuleName+'"');
+						testRuleMethodST.setAttribute("testInput", '"'+inputString+'"');
+						testRuleMethodST.setAttribute("isFile", input.inputIsFile);
+						testRuleMethodST.setAttribute("tokenType", getTypeString(ts.testSuites.get(input).getType()));
+						
+						if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// trim ';' at the end of ACTION if there is...
+							//testRuleMethodST.setAttribute("expecting", outputString.substring(0, outputString.length()-1));
+							testRuleMethodST.setAttribute("expecting", outputString);
+						}
+						else if ( ts.testSuites.get(input).getType()==gUnitParser.RETVAL ) {	// Expected: RETVAL
+							testRuleMethodST.setAttribute("expecting", outputString);
+						}
+						else {	// Attach "" to expected STRING or AST
+							testRuleMethodST.setAttribute("expecting", '"'+escapeForJava(outputString)+'"');
+						}
+					}
+					buf.append(testRuleMethodST.toString());
+				}
+			}
+		}
+		return buf.toString();
+	}
+
+	// return a meaningful gUnit token type name instead of using the magic number
+	public String getTypeString(int type) {
+		String typeText;
+		switch (type) {
+			case gUnitParser.OK :
+				typeText = "org.antlr.gunit.gUnitParser.OK";
+				break;
+			case gUnitParser.FAIL :
+				typeText = "org.antlr.gunit.gUnitParser.FAIL";
+				break;
+			case gUnitParser.STRING :
+				typeText = "org.antlr.gunit.gUnitParser.STRING";
+				break;
+			case gUnitParser.ML_STRING :
+				typeText = "org.antlr.gunit.gUnitParser.ML_STRING";
+				break;
+			case gUnitParser.RETVAL :
+				typeText = "org.antlr.gunit.gUnitParser.RETVAL";
+				break;
+			case gUnitParser.AST :
+				typeText = "org.antlr.gunit.gUnitParser.AST";
+				break;
+			default :
+				typeText = "org.antlr.gunit.gUnitParser.EOF";
+				break;
+		}
+		return typeText;
+	}
+	
+	protected void writeTestFile(String dir, String fileName, String content) {
+		try {
+			File f = new File(dir, fileName);
+			FileWriter w = new FileWriter(f);
+			BufferedWriter bw = new BufferedWriter(w);
+			bw.write(content);
+			bw.close();
+			w.close();
+		}
+		catch (IOException ioe) {
+			logger.log(Level.SEVERE, "can't write file", ioe);
+		}
+	}
+
+	public static String escapeForJava(String inputString) {
+		// Gotta escape literal backslash before putting in specials that use escape.
+		inputString = inputString.replace("\\", "\\\\");
+		// Then double quotes need escaping (singles are OK of course).
+		inputString = inputString.replace("\"", "\\\"");
+		// note: replace newline to String ".\n", replace tab to String ".\t"
+		inputString = inputString.replace("\n", "\\n").replace("\t", "\\t").replace("\r", "\\r").replace("\b", "\\b").replace("\f", "\\f");
+		
+		return inputString;
+	}
+	
+	protected String changeFirstCapital(String ruleName) {
+		String firstChar = String.valueOf(ruleName.charAt(0));
+		return firstChar.toUpperCase()+ruleName.substring(1);
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/Parser.java b/gunit/src/main/java/org/antlr/gunit/OutputTest.java
similarity index 61%
copy from runtime/Java/src/org/antlr/runtime/Parser.java
copy to gunit/src/main/java/org/antlr/gunit/OutputTest.java
index 1000a52..71359e5 100644
--- a/runtime/Java/src/org/antlr/runtime/Parser.java
+++ b/gunit/src/main/java/org/antlr/gunit/OutputTest.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,41 +25,43 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.gunit;
 
-/** A parser for TokenStreams.  "parser grammars" result in a subclass
- *  of this.
- */
-public class Parser extends BaseRecognizer {
-    protected TokenStream input;
-
-	public Parser(TokenStream input) {
-        setTokenStream(input);
-    }
+import org.antlr.runtime.Token;
 
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
+/** OutputTest represents a test for not only standard output string, 
+ *  but also AST output which is actually a return value from a parser.
+ */
+public class OutputTest extends AbstractTest {
+	private final Token token;
+	
+	public OutputTest(Token token) {
+		this.token = token;
 	}
 
-	/** Set the token stream and reset the parser */
-	public void setTokenStream(TokenStream input) {
-		this.input = null;
-		reset();
-		this.input = input;
+	@Override
+	public String getText() {
+		return token.getText();
 	}
 
-    public TokenStream getTokenStream() {
-		return input;
+	@Override
+	public int getType() {
+		return token.getType();
 	}
 
-	public void traceIn(String ruleName, int ruleIndex)  {
-		super.traceIn(ruleName, ruleIndex, input.LT(1));
+	@Override
+	// return ANTLR error msg if test failed
+	public String getResult(gUnitTestResult testResult) {
+		// Note: we treat the standard output string as a return value also
+		if ( testResult.isSuccess() ) return testResult.getReturned();
+		else {
+			hasErrorMsg = true;
+			return testResult.getError();
+		}
 	}
 
-	public void traceOut(String ruleName, int ruleIndex)  {
-		super.traceOut(ruleName, ruleIndex, input.LT(1));
+	@Override
+	public String getExpected() {
+		return token.getText();
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java b/gunit/src/main/java/org/antlr/gunit/ReturnTest.java
similarity index 62%
copy from runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
copy to gunit/src/main/java/org/antlr/gunit/ReturnTest.java
index 0aaa6e9..8ec5a4e 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
+++ b/gunit/src/main/java/org/antlr/gunit/ReturnTest.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,30 +25,45 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit;
 
-/** A generic doubly-linked tree implementation with no payload.
- *  You must subclass to actually have any user data.
- *  TODO: do we really need/want this?
- */
-public abstract class DoubleLinkTree extends BaseTree {
-	protected DoubleLinkTree parent;
+import org.antlr.runtime.Token;
 
-	public DoubleLinkTree getParent() {
-		return parent;
+public class ReturnTest extends AbstractTest {
+	private final Token retval;
+	
+	public ReturnTest(Token retval) {
+		this.retval = retval;
 	}
 
-	public void setParent(DoubleLinkTree t) {
-		parent = t;
+	@Override
+	public String getText() {
+		return retval.getText();
 	}
 
-	public void addChild(BaseTree t) {
-		super.addChild(t);
-		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
+	@Override
+	public int getType() {
+		return retval.getType();
 	}
 
-	public void setChild(int i, BaseTree t) {
-		super.setChild(i, t);
-		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
+	@Override
+	// return ANTLR error msg if test failed
+	public String getResult(gUnitTestResult testResult) {
+		if ( testResult.isSuccess() ) return testResult.getReturned();
+		else {
+			hasErrorMsg = true;
+			return testResult.getError();
+		}
+	}
+
+	@Override
+	public String getExpected() {
+		String expect = retval.getText();
+		
+		if ( expect.charAt(0)=='"' && expect.charAt(expect.length()-1)=='"' ) {
+			expect = expect.substring(1, expect.length()-1);
+		}
+		
+		return expect;
 	}
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java b/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java
new file mode 100644
index 0000000..ee04549
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java
@@ -0,0 +1,457 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon, Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.PrintStream;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import org.antlr.runtime.ANTLRFileStream;
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.Lexer;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.stringtemplate.StringTemplate;
+
+import junit.framework.TestCase;
+
+/** All gUnit-generated JUnit class should extend this class 
+ *  which implements the essential methods for triggering
+ *  ANTLR parser/tree walker
+ */
+public abstract class gUnitBaseTest extends TestCase {
+	
+	public String packagePath;
+	public String lexerPath;
+	public String parserPath;
+	public String treeParserPath;
+	
+	protected String stdout;
+	protected String stderr;
+	
+	private PrintStream console = System.out;
+	private PrintStream consoleErr = System.err;
+	
+	// Invoke target lexer.rule
+	public String execLexer(String testRuleName, String testInput, boolean isFile) throws Exception {
+		CharStream input;
+		/** Set up ANTLR input stream based on input source, file or String */
+		if ( isFile ) {
+			String filePath = testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, also try to look for it under the package dir
+			if ( !testInputFile.exists() && packagePath!=null ) {
+				testInputFile = new File(packagePath, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput);
+		}
+		Class lexer = null;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+        try {
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = Class.forName(lexerPath);
+            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
+            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
+            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
+            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
+            
+            Method ruleName = lexer.getMethod("m"+testRuleName, new Class[0]);
+            
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke lexer rule, and get the current index in CharStream */
+            ruleName.invoke(lexObj, new Object[0]);
+            Method ruleName2 = lexer.getMethod("getCharIndex", new Class[0]);
+            int currentIndex = (Integer) ruleName2.invoke(lexObj, new Object[0]);
+            if ( currentIndex!=input.size() ) {
+            	ps2.println("extra text found, '"+input.substring(currentIndex, input.size()-1)+"'");
+            }
+			
+            this.stdout = null;
+			this.stderr = null;
+            
+			if ( err.toString().length()>0 ) {
+				this.stderr = err.toString();
+				return this.stderr;
+			}
+			if ( out.toString().length()>0 ) {
+				this.stdout = out.toString();
+			}
+			if ( err.toString().length()==0 && out.toString().length()==0 ) {
+				return null;
+			}
+        } catch (ClassNotFoundException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (SecurityException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (NoSuchMethodException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalArgumentException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InstantiationException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalAccessException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
+        	if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
+			else this.stderr = e.toString();
+        	return this.stderr;
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        return this.stdout;
+	}
+	
+	// Invoke target parser.rule
+	public Object execParser(String testRuleName, String testInput, boolean isFile) throws Exception {
+		CharStream input;
+		/** Set up ANTLR input stream based on input source, file or String */
+		if ( isFile ) {
+			String filePath = testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, also try to look for it under the package dir
+			if ( !testInputFile.exists() && packagePath!=null ) {
+				testInputFile = new File(packagePath, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput);
+		}
+		Class lexer = null;
+		Class parser = null;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Use Reflection to create instances of lexer and parser */
+			lexer = Class.forName(lexerPath);
+            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
+            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);
+            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
+            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
+            
+            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
+            parser = Class.forName(parserPath);
+            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
+            Constructor parConstructor = parser.getConstructor(parArgTypes);
+            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args  
+            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser      
+            
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+			/** Invoke grammar rule, and store if there is a return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+            String astString = null;
+            String stString = null;
+            /** If rule has return value, determine if it contains an AST or a ST */
+            if ( ruleReturn!=null ) {
+                if ( ruleReturn.getClass().toString().indexOf(testRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class _return = Class.forName(parserPath+"$"+testRuleName+"_return");
+                		Method[] methods = _return.getDeclaredMethods();
+                		for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method returnName = _return.getMethod("getTree");
+		                    	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+		                    	astString = tree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method returnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) returnName.invoke(ruleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+
+			this.stdout = null;
+			this.stderr = null;
+			
+			/** Invalid input */
+            if ( tokens.index()!=tokens.size() ) {
+            	//throw new InvalidInputException();
+            	this.stderr = "Invalid input";
+            }
+            
+			// retVal could be actual return object from rule, stderr or stdout
+			if ( err.toString().length()>0 ) {
+				this.stderr = err.toString();
+				return this.stderr;
+			}
+			if ( out.toString().length()>0 ) {
+				this.stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return astString;
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return stString;
+			}
+			if ( ruleReturn!=null ) {
+				return ruleReturn;
+			}
+			if ( err.toString().length()==0 && out.toString().length()==0 ) {
+				return null;
+			}
+		} catch (ClassNotFoundException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (SecurityException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (NoSuchMethodException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (IllegalAccessException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (InvocationTargetException e) {
+			if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
+			else this.stderr = e.toString();
+        	return this.stderr;
+		} finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+		return this.stdout;
+	}
+	
+	// Invoke target parser.rule
+	public Object execTreeParser(String testTreeRuleName, String testRuleName, String testInput, boolean isFile) throws Exception {
+		CharStream input;
+		if ( isFile ) {
+			String filePath = testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, also try to look for it under the package dir
+			if ( !testInputFile.exists() && packagePath!=null ) {
+				testInputFile = new File(packagePath, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput);
+		}
+		Class lexer = null;
+		Class parser = null;
+		Class treeParser = null;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Use Reflection to create instances of lexer and parser */
+        	lexer = Class.forName(lexerPath);
+            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
+            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
+            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
+            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
+            
+            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
+            
+            parser = Class.forName(parserPath);
+            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
+            Constructor parConstructor = parser.getConstructor(parArgTypes);
+            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args  
+            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser      
+            
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke grammar rule, and get the return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+            
+            Class _return = Class.forName(parserPath+"$"+testRuleName+"_return");            	
+        	Method returnName = _return.getMethod("getTree");
+        	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+
+        	// Walk resulting tree; create tree nodes stream first
+        	CommonTreeNodeStream nodes = new CommonTreeNodeStream(tree);
+        	// AST nodes have payload that point into token stream
+        	nodes.setTokenStream(tokens);
+        	// Create a tree walker attached to the nodes stream
+        	treeParser = Class.forName(treeParserPath);
+            Class[] treeParArgTypes = new Class[]{TreeNodeStream.class};		// assign type to tree parser's args
+            Constructor treeParConstructor = treeParser.getConstructor(treeParArgTypes);
+            Object[] treeParArgs = new Object[]{nodes};							// assign value to tree parser's args  
+            Object treeParObj = treeParConstructor.newInstance(treeParArgs);	// makes new instance of tree parser      
+        	// Invoke the tree rule, and store the return value if there is
+            Method treeRuleName = treeParser.getMethod(testTreeRuleName);
+            Object treeRuleReturn = treeRuleName.invoke(treeParObj);
+            
+            String astString = null;
+            String stString = null;
+            /** If tree rule has return value, determine if it contains an AST or a ST */
+            if ( treeRuleReturn!=null ) {
+                if ( treeRuleReturn.getClass().toString().indexOf(testTreeRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class _treeReturn = Class.forName(treeParserPath+"$"+testTreeRuleName+"_return");
+                		Method[] methods = _treeReturn.getDeclaredMethods();
+			            for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method treeReturnName = _treeReturn.getMethod("getTree");
+		                    	CommonTree returnTree = (CommonTree) treeReturnName.invoke(treeRuleReturn);
+		                        astString = returnTree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method treeReturnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) treeReturnName.invoke(treeRuleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+
+			this.stdout = null;
+			this.stderr = null;
+			
+			/** Invalid input */
+            if ( tokens.index()!=tokens.size() ) {
+            	throw new InvalidInputException();
+            }
+			
+			// retVal could be actual return object from rule, stderr or stdout
+			if ( err.toString().length()>0 ) {
+				this.stderr = err.toString();
+				return this.stderr;
+			}
+			if ( out.toString().length()>0 ) {
+				this.stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return astString;
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return stString;
+			}
+			if ( treeRuleReturn!=null ) {
+				return treeRuleReturn;
+			}
+			if ( err.toString().length()==0 && out.toString().length()==0 ) {
+				return null;
+			}
+		} catch (ClassNotFoundException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (SecurityException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (NoSuchMethodException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (IllegalAccessException e) {
+			e.printStackTrace(); System.exit(1);
+		} catch (InvocationTargetException e) {
+			if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
+			else this.stderr = e.toString();
+        	return this.stderr;
+		} finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+		return stdout;
+	}
+	
+	// Modify the return value if the expected token type is OK or FAIL
+	public Object examineExecResult(int tokenType, Object retVal) {	
+		if ( tokenType==gUnitParser.OK ) {	// expected Token: OK
+			if ( this.stderr==null ) {
+				return "OK";
+			}
+			else {
+				return "FAIL, "+this.stderr;
+			}
+		}
+		else if ( tokenType==gUnitParser.FAIL ) {	// expected Token: FAIL
+			if ( this.stderr!=null ) {
+				return "FAIL";
+			}
+			else {
+				return "OK";
+			}
+		}
+		else {	// return the same object for the other token types
+			return retVal;
+		}		
+	}
+	
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java b/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java
new file mode 100644
index 0000000..fb93ca1
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java
@@ -0,0 +1,629 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import java.io.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.lang.reflect.*;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.*;
+import org.antlr.stringtemplate.CommonGroupLoader;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.StringTemplateGroupLoader;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+public class gUnitExecutor implements ITestSuite {
+	public GrammarInfo grammarInfo;
+	
+	private final ClassLoader grammarClassLoader;
+	
+	private final String testsuiteDir;
+	
+	public int numOfTest;
+
+	public int numOfSuccess;
+
+	public int numOfFailure;
+
+	private String title;
+
+	public int numOfInvalidInput;
+
+	private String parserName;
+
+	private String lexerName;
+	
+	public List<AbstractTest> failures;
+	public List<AbstractTest> invalids;
+	
+	private PrintStream console = System.out;
+    private PrintStream consoleErr = System.err;
+    
+    public gUnitExecutor(GrammarInfo grammarInfo, String testsuiteDir) {
+    	this( grammarInfo, determineClassLoader(), testsuiteDir);
+    }
+    
+    private static ClassLoader determineClassLoader() {
+    	ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    	if ( classLoader == null ) {
+    		classLoader = gUnitExecutor.class.getClassLoader();
+    	}
+    	return classLoader;
+    }
+    
+	public gUnitExecutor(GrammarInfo grammarInfo, ClassLoader grammarClassLoader, String testsuiteDir) {
+		this.grammarInfo = grammarInfo;
+		this.grammarClassLoader = grammarClassLoader;
+		this.testsuiteDir = testsuiteDir;
+		numOfTest = 0;
+		numOfSuccess = 0;
+		numOfFailure = 0;
+		numOfInvalidInput = 0;
+		failures = new ArrayList<AbstractTest>();
+		invalids = new ArrayList<AbstractTest>();
+	}
+	
+	protected ClassLoader getGrammarClassLoader() {
+		return grammarClassLoader;
+	}
+	
+	protected final Class classForName(String name) throws ClassNotFoundException {
+		return getGrammarClassLoader().loadClass( name );
+	}
+	
+	public String execTest() throws IOException{
+		// Set up string template for testing result
+		StringTemplate testResultST = getTemplateGroup().getInstanceOf("testResult");
+		try {
+			/** Set up appropriate path for parser/lexer if using package */
+			if (grammarInfo.getHeader()!=null ) {
+				parserName = grammarInfo.getHeader()+"."+grammarInfo.getGrammarName()+"Parser";
+				lexerName = grammarInfo.getHeader()+"."+grammarInfo.getGrammarName()+"Lexer";
+			}
+			else {
+				parserName = grammarInfo.getGrammarName()+"Parser";
+				lexerName = grammarInfo.getGrammarName()+"Lexer";
+			}
+			
+			/*** Start Unit/Functional Testing ***/
+			// Execute unit test of for parser, lexer and tree grammar
+			if ( grammarInfo.getTreeGrammarName()!=null ) {
+				title = "executing testsuite for tree grammar:"+grammarInfo.getTreeGrammarName()+" walks "+parserName;
+			}
+			else {
+				title = "executing testsuite for grammar:"+grammarInfo.getGrammarName();
+			}
+			executeTests();
+			// End of exection of unit testing
+			
+			// Fill in the template holes with the test results
+			testResultST.setAttribute("title", title);
+			testResultST.setAttribute("num_of_test", numOfTest);
+			testResultST.setAttribute("num_of_failure", numOfFailure);
+			if ( numOfFailure>0 ) {
+				testResultST.setAttribute("failure", failures);
+			}
+			if ( numOfInvalidInput>0 ) {
+				testResultST.setAttribute("has_invalid", true);
+				testResultST.setAttribute("num_of_invalid", numOfInvalidInput);
+				testResultST.setAttribute("invalid", invalids);
+			}
+		}
+		catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+		return testResultST.toString();
+	}
+	
+	private StringTemplateGroup getTemplateGroup() {
+		StringTemplateGroupLoader loader = new CommonGroupLoader("org/antlr/gunit", null);
+		StringTemplateGroup.registerGroupLoader(loader);
+		StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
+		StringTemplateGroup group = StringTemplateGroup.loadGroup("gUnitTestResult");
+		return group;
+	}
+	
+	// TODO: throw more specific exceptions
+	private gUnitTestResult runCorrectParser(String parserName, String lexerName, String rule, String lexicalRule, String treeRule, gUnitTestInput input) throws Exception
+	{
+		if ( lexicalRule!=null ) return runLexer(lexerName, lexicalRule, input);
+		else if ( treeRule!=null ) return runTreeParser(parserName, lexerName, rule, treeRule, input);
+		else return runParser(parserName, lexerName, rule, input);
+	}
+
+	private void executeTests() throws Exception {
+		for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
+			String rule = ts.getRuleName();
+			String lexicalRule = ts.getLexicalRuleName();
+			String treeRule = ts.getTreeRuleName();
+			for ( gUnitTestInput input: ts.testSuites.keySet() ) {	// each rule may contain multiple tests
+				numOfTest++;
+				// Run parser, and get the return value or stdout or stderr if there is
+				gUnitTestResult result = null;
+				AbstractTest test = ts.testSuites.get(input);
+				try {
+					// TODO: create a -debug option to turn on logging, which shows progress of running tests
+					//System.out.print(numOfTest + ". Running rule: " + rule + "; input: '" + input.testInput + "'");
+					result = runCorrectParser(parserName, lexerName, rule, lexicalRule, treeRule, input);
+					// TODO: create a -debug option to turn on logging, which shows progress of running tests
+					//System.out.println("; Expecting " + test.getExpected() + "; Success?: " + test.getExpected().equals(test.getResult(result)));
+				} catch ( InvalidInputException e) {
+					numOfInvalidInput++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.getLine());
+					test.setActual(input.testInput);
+					invalids.add(test);
+					continue;
+				}	// TODO: ensure there's no other exceptions required to be handled here...
+				
+				String expected = test.getExpected();
+				String actual = test.getResult(result);
+				test.setActual(actual);
+				
+				if (actual == null) {
+					numOfFailure++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.getLine());
+					test.setActual("null");
+					failures.add(test);
+					onFail(test);
+				}
+				// the 2nd condition is used for the assertFAIL test of lexer rule because BooleanTest return err msg instead of 'FAIL' if isLexerTest
+				else if ( expected.equals(actual) || (expected.equals("FAIL")&&!actual.equals("OK") ) ) {
+					numOfSuccess++;
+					onPass(test);
+				}
+				// TODO: something with ACTIONS - at least create action test type and throw exception.
+				else if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// expected Token: ACTION
+					numOfFailure++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.getLine());
+					test.setActual("\t"+"{ACTION} is not supported in the grammarInfo yet...");
+					failures.add(test);
+					onFail(test);
+				}
+				else {
+					numOfFailure++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.getLine());
+					failures.add(test);
+					onFail(test);
+				}
+			}	// end of 2nd for-loop: tests for individual rule
+		}	// end of 1st for-loop: testsuites for grammar
+	}
+
+	// TODO: throw proper exceptions
+	protected gUnitTestResult runLexer(String lexerName, String testRuleName, gUnitTestInput testInput) throws Exception {
+		CharStream input;
+		Class lexer = null;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Set up ANTLR input stream based on input source, file or String */
+			input = getANTLRInputStream(testInput);
+		
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = classForName(lexerName);
+            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
+            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
+            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
+            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
+            
+            Method ruleName = lexer.getMethod("m"+testRuleName, new Class[0]);
+            
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke lexer rule, and get the current index in CharStream */
+            ruleName.invoke(lexObj, new Object[0]);
+            Method ruleName2 = lexer.getMethod("getCharIndex", new Class[0]);
+            int currentIndex = (Integer) ruleName2.invoke(lexObj, new Object[0]);
+            if ( currentIndex!=input.size() ) {
+            	ps2.print("extra text found, '"+input.substring(currentIndex, input.size()-1)+"'");
+            }
+			
+			if ( err.toString().length()>0 ) {
+				gUnitTestResult testResult = new gUnitTestResult(false, err.toString(), true);
+				testResult.setError(err.toString());
+				return testResult;
+			}
+			String stdout = null;
+			if ( out.toString().length()>0 ) {
+				stdout = out.toString();
+			}
+			return new gUnitTestResult(true, stdout, true);
+		} catch (IOException e) {
+			return getTestExceptionResult(e);
+        } catch (ClassNotFoundException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (SecurityException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (NoSuchMethodException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalArgumentException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InstantiationException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalAccessException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
+        	return getTestExceptionResult(e);
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        // TODO: verify this:
+        throw new Exception("This should be unreachable?");
+	}
+	
+	// TODO: throw proper exceptions
+	protected gUnitTestResult runParser(String parserName, String lexerName, String testRuleName, gUnitTestInput testInput) throws Exception {
+		CharStream input;
+		Class lexer = null;
+		Class parser = null;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Set up ANTLR input stream based on input source, file or String */
+			input = getANTLRInputStream(testInput);
+			
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = classForName(lexerName);
+            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
+            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
+            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
+            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
+            
+            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
+            
+            parser = classForName(parserName);
+            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
+            Constructor parConstructor = parser.getConstructor(parArgTypes);
+            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args  
+            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser      
+            
+            // set up customized tree adaptor if necessary
+            if ( grammarInfo.getAdaptor()!=null ) {
+            	parArgTypes = new Class[]{TreeAdaptor.class};
+            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", parArgTypes);
+            	classForName("llvm.CC");
+            	Class _treeAdaptor = classForName(grammarInfo.getAdaptor());
+            	_setTreeAdaptor.invoke(parObj, _treeAdaptor.newInstance());
+            }
+            
+            Method ruleName = parser.getMethod(testRuleName);
+            
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke grammar rule, and store if there is a return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+            String astString = null;
+            String stString = null;
+            /** If rule has return value, determine if it contains an AST or a ST */
+            if ( ruleReturn!=null ) {
+                if ( ruleReturn.getClass().toString().indexOf(testRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class _return = classForName(parserName+"$"+testRuleName+"_return");
+                		Method[] methods = _return.getDeclaredMethods();
+                		for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method returnName = _return.getMethod("getTree");
+		                    	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+		                    	astString = tree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method returnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) returnName.invoke(ruleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+            
+            /** Invalid input */
+            if ( tokens.index()!=tokens.size() ) {
+            	//throw new InvalidInputException();
+            	ps2.print("Invalid input");
+            }
+			
+			if ( err.toString().length()>0 ) {
+				gUnitTestResult testResult = new gUnitTestResult(false, err.toString());
+				testResult.setError(err.toString());
+				return testResult;
+			}
+			String stdout = null;
+			// TODO: need to deal with the case which has both ST return value and stdout
+			if ( out.toString().length()>0 ) {
+				stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return new gUnitTestResult(true, stdout, astString);
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return new gUnitTestResult(true, stdout, stString);
+			}
+			
+			if ( ruleReturn!=null ) {
+				// TODO: currently only works for a single return with int or String value
+				return new gUnitTestResult(true, stdout, String.valueOf(ruleReturn));
+			}
+			return new gUnitTestResult(true, stdout, stdout);
+		} catch (IOException e) {
+			return getTestExceptionResult(e);
+		} catch (ClassNotFoundException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (SecurityException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (NoSuchMethodException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalArgumentException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InstantiationException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalAccessException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
+        	return getTestExceptionResult(e);
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        // TODO: verify this:
+        throw new Exception("This should be unreachable?");
+	}
+	
+	protected gUnitTestResult runTreeParser(String parserName, String lexerName, String testRuleName, String testTreeRuleName, gUnitTestInput testInput) throws Exception {
+		CharStream input;
+		String treeParserPath;
+		Class lexer = null;
+		Class parser = null;
+		Class treeParser = null;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Set up ANTLR input stream based on input source, file or String */
+			input = getANTLRInputStream(testInput);
+			
+			/** Set up appropriate path for tree parser if using package */
+			if ( grammarInfo.getHeader()!=null ) {
+				treeParserPath = grammarInfo.getHeader()+"."+grammarInfo.getTreeGrammarName();
+			}
+			else {
+				treeParserPath = grammarInfo.getTreeGrammarName();
+			}
+		
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = classForName(lexerName);
+            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
+            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
+            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
+            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
+            
+            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
+            
+            parser = classForName(parserName);
+            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
+            Constructor parConstructor = parser.getConstructor(parArgTypes);
+            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args  
+            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser      
+            
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke grammar rule, and get the return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+            
+            Class _return = classForName(parserName+"$"+testRuleName+"_return");            	
+        	Method returnName = _return.getMethod("getTree");
+        	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+
+        	// Walk resulting tree; create tree nodes stream first
+        	CommonTreeNodeStream nodes = new CommonTreeNodeStream(tree);
+        	// AST nodes have payload that point into token stream
+        	nodes.setTokenStream(tokens);
+        	// Create a tree walker attached to the nodes stream
+        	treeParser = classForName(treeParserPath);
+            Class[] treeParArgTypes = new Class[]{TreeNodeStream.class};		// assign type to tree parser's args
+            Constructor treeParConstructor = treeParser.getConstructor(treeParArgTypes);
+            Object[] treeParArgs = new Object[]{nodes};							// assign value to tree parser's args  
+            Object treeParObj = treeParConstructor.newInstance(treeParArgs);	// makes new instance of tree parser      
+        	// Invoke the tree rule, and store the return value if there is
+            Method treeRuleName = treeParser.getMethod(testTreeRuleName);
+            Object treeRuleReturn = treeRuleName.invoke(treeParObj);
+
+            String astString = null;
+            String stString = null;
+            /** If tree rule has return value, determine if it contains an AST or a ST */
+            if ( treeRuleReturn!=null ) {
+                if ( treeRuleReturn.getClass().toString().indexOf(testTreeRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class _treeReturn = classForName(treeParserPath+"$"+testTreeRuleName+"_return");
+                		Method[] methods = _treeReturn.getDeclaredMethods();
+			            for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method treeReturnName = _treeReturn.getMethod("getTree");
+		                    	CommonTree returnTree = (CommonTree) treeReturnName.invoke(treeRuleReturn);
+		                        astString = returnTree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method treeReturnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) treeReturnName.invoke(treeRuleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+          
+            /** Invalid input */
+            if ( tokens.index()!=tokens.size() ) {
+            	//throw new InvalidInputException();
+            	ps2.print("Invalid input");
+            }
+
+			if ( err.toString().length()>0 ) {
+				gUnitTestResult testResult = new gUnitTestResult(false, err.toString());
+				testResult.setError(err.toString());
+				return testResult;
+			}
+			
+			String stdout = null;
+			// TODO: need to deal with the case which has both ST return value and stdout
+			if ( out.toString().length()>0 ) {
+				stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return new gUnitTestResult(true, stdout, astString);
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return new gUnitTestResult(true, stdout, stString);
+			}
+			
+			if ( treeRuleReturn!=null ) {
+				// TODO: again, currently only works for a single return with int or String value
+				return new gUnitTestResult(true, stdout, String.valueOf(treeRuleReturn));
+			}
+			return new gUnitTestResult(true, stdout, stdout);
+		} catch (IOException e) {
+			return getTestExceptionResult(e);
+		} catch (ClassNotFoundException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (SecurityException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (NoSuchMethodException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalArgumentException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InstantiationException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (IllegalAccessException e) {
+        	e.printStackTrace(); System.exit(1);
+        } catch (InvocationTargetException e) {	// note: This exception could be caused from ANTLR Runtime Exception...
+        	return getTestExceptionResult(e);
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        // TODO: verify this:
+        throw new Exception("Should not be reachable?");
+	}
+	
+	// Create ANTLR input stream based on input source, file or String
+	private CharStream getANTLRInputStream(gUnitTestInput testInput) throws IOException {
+		CharStream input;
+		if ( testInput.inputIsFile ) {
+			String filePath = testInput.testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, try to look for it from dir where the testsuite file locates
+			if ( !testInputFile.exists() ) {
+				testInputFile = new File(this.testsuiteDir, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+				// if still not found, also try to look for it under the package dir
+				else if ( grammarInfo.getHeader()!=null ) {
+					testInputFile = new File("."+File.separator+grammarInfo.getHeader().replace(".", File.separator), filePath);
+					if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+				}
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput.testInput);
+		}
+		return input;
+	}
+	
+	// set up the cause of exception or the exception name into a gUnitTestResult instance
+	private gUnitTestResult getTestExceptionResult(Exception e) {
+		gUnitTestResult testResult;
+    	if ( e.getCause()!=null ) {
+    		testResult = new gUnitTestResult(false, e.getCause().toString(), true);
+    		testResult.setError(e.getCause().toString());
+    	}
+    	else {
+    		testResult = new gUnitTestResult(false, e.toString(), true);
+    		testResult.setError(e.toString());
+    	}
+    	return testResult;
+	}
+
+
+    public void onPass(ITestCase passTest) {
+
+    }
+
+    public void onFail(ITestCase failTest) {
+        
+    }
+	
+}
diff --git a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java b/gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java
similarity index 70%
copy from runtime/Java/src/org/antlr/runtime/EarlyExitException.java
copy to gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java
index 29f0865..4e3519f 100644
--- a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2007-2008 Leon, Jen-Yuan Su
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,21 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.gunit;
 
-/**  The recognizer did not match anything for a (..)+ loop. */
-public class EarlyExitException extends RecognitionException {
-	public int decisionNumber;
-
-	/** Used for remote debugger deserialization */
-	public EarlyExitException() {;}
+/** A class which contains input information of an individual testuite */
+public class gUnitTestInput {
+	protected String testInput;		// a test input string for a testsuite
+	
+	protected boolean inputIsFile;	// if true, the testInput represents a filename
 	
-	public EarlyExitException(int decisionNumber, IntStream input) {
-		super(input);
-		this.decisionNumber = decisionNumber;
+	protected int line;				// number of line in the script
+	
+	public gUnitTestInput(String testInput, boolean inputIsFile, int line) {
+		this.testInput = testInput;
+		this.inputIsFile = inputIsFile;
+		this.line = line;
 	}
+	
+	public int getLine() { return this.line; }
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java b/gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java
similarity index 58%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
copy to gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java
index 4e6e843..f8263b6 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java
@@ -1,6 +1,6 @@
 /*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,43 +25,53 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit;
 
-import org.antlr.runtime.Token;
-
-import java.util.List;
-
-public class RewriteRuleTokenStream extends RewriteRuleElementStream {
-
-	public RewriteRuleTokenStream(TreeAdaptor adaptor, String elementDescription) {
-		super(adaptor, elementDescription);
+public class gUnitTestResult {
+	
+	private boolean success;
+	private String output;		// stdout
+	private String error;		// stderr
+	private String returned;	// AST (toStringTree) or ST (toString)
+	private boolean isLexerTest;
+	
+	public gUnitTestResult(boolean success, String output) {
+		this.success = success;
+		this.output = output;
 	}
-
-	/** Create a stream with one element */
-	public RewriteRuleTokenStream(TreeAdaptor adaptor,
-								  String elementDescription,
-								  Object oneElement)
-	{
-		super(adaptor, elementDescription, oneElement);
+	
+	public gUnitTestResult(boolean success, String output, boolean isLexerTest) {
+		this(success, output);
+		this.isLexerTest = isLexerTest;
 	}
-
-	/** Create a stream, but feed off an existing list */
-	public RewriteRuleTokenStream(TreeAdaptor adaptor,
-								  String elementDescription,
-								  List elements)
-	{
-		super(adaptor, elementDescription, elements);
+	
+	public gUnitTestResult(boolean success, String output, String returned) {
+		this(success, output);
+		this.returned = returned;
 	}
-
-	public Object next() {
-		return _next();
+		
+	public boolean isSuccess() {
+		return success;
 	}
-
-	protected Object toTree(Object el) {
-		return adaptor.create((Token)el);
+	
+	public String getOutput() {
+		return output;
 	}
-
-	protected Object dup(Object el) {
-		throw new UnsupportedOperationException("dup can't be called for a token stream.");
+	
+	public String getError() {
+		return error;
+	}
+	
+	public String getReturned() {
+		return returned;
+	}
+	
+	public boolean isLexerTest() {
+		return isLexerTest;
+	}
+	
+	public void setError(String error) {
+		this.error = error;
 	}
+	
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java b/gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java
new file mode 100644
index 0000000..336f84f
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java
@@ -0,0 +1,80 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+/** A class which wraps all testsuites for an individual rule */
+import java.util.Map;
+import java.util.LinkedHashMap;
+
+public class gUnitTestSuite {
+	protected String rule = null;			// paeser rule name for unit testing
+	protected String lexicalRule = null;	// lexical rule name
+	protected String treeRule = null;		// optional, required for testing tree grammar rule
+	protected boolean isLexicalRule = false;
+	
+	/** A map which stores input/output pairs (individual testsuites). 
+	 *  In other words, it maps input data for unit test (gUnitTestInput object)
+	 *  to an expected output (Token object).
+	 */
+	protected Map<gUnitTestInput, AbstractTest> testSuites = new LinkedHashMap<gUnitTestInput, AbstractTest>();
+	
+	public gUnitTestSuite() {
+	}
+	
+	public gUnitTestSuite(String rule) {
+		this.rule = rule;
+	}
+	
+	public gUnitTestSuite(String treeRule, String rule) {
+		this.rule = rule;
+		this.treeRule = treeRule;
+	}
+	
+	public void setRuleName(String ruleName) { this.rule = ruleName; }
+	public void setLexicalRuleName(String lexicalRule) { this.lexicalRule = lexicalRule; this.isLexicalRule = true; }
+	public void setTreeRuleName(String treeRuleName) { this.treeRule = treeRuleName; }
+	
+	public String getRuleName() { return this.rule; }
+	public String getLexicalRuleName() { return this.lexicalRule; }
+	public String getTreeRuleName() { return this.treeRule; }
+	public boolean isLexicalRule() { return this.isLexicalRule; }
+	
+	public void addTestCase(gUnitTestInput input, AbstractTest expect) {
+		if ( input!=null && expect!=null ) {
+            /*
+             * modified by shaoting
+             * if rule is null, use lexRule name
+             */
+            //expect.setTestedRuleName(this.rule);
+			expect.setTestedRuleName(this.rule ==null ? this.lexicalRule : this.rule);
+			expect.setTestCaseIndex(this.testSuites.size());
+			this.testSuites.put(input, expect);
+		}
+	}
+	
+}
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java
similarity index 73%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java
index c31e9e2..f8b04dd 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,27 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+package org.antlr.gunit.swingui;
+
+import org.antlr.gunit.swingui.model.ITestCaseInput;
+import java.awt.event.ActionListener;
+import javax.swing.JComponent;
+
+/**
+ *
+ * @author scai
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public abstract class AbstractInputEditor {
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    protected ITestCaseInput input;
+    public void setInput(ITestCaseInput input) {
+        this.input = input;
     }
+
+    protected JComponent comp;
+    public JComponent getControl() { return comp; }
+
+    abstract public void addActionListener(ActionListener l) ;
+
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/gunit/src/main/java/org/antlr/gunit/swingui/IController.java
similarity index 81%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/IController.java
index 815b4e6..cd273d0 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/IController.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,11 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit.swingui;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
-	}
+import java.awt.Component;
+
+public interface IController {
+    public Object getModel() ;
+    public Component getView() ;
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java b/gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java
new file mode 100644
index 0000000..59ce231
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java
@@ -0,0 +1,83 @@
+package org.antlr.gunit.swingui;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import javax.swing.ImageIcon;
+
+public class ImageFactory {
+
+    private static ImageFactory singleton ;
+
+    public static ImageFactory getSingleton() {
+        if(singleton == null) singleton = new ImageFactory();
+        return singleton;
+    }
+
+    private ImageFactory() {
+        ACCEPT = getImage("accept.png");
+        ADD = getImage("add.png");
+        DELETE = getImage("delete24.png");
+        TEXTFILE = getImage("textfile24.png");
+        TEXTFILE16 = getImage("textfile16.png");
+        ADDFILE = getImage("addfile24.png");
+        WINDOW16 = getImage("windowb16.png");
+        FAV16 = getImage("favb16.png");
+        SAVE = getImage("floppy24.png");
+        OPEN = getImage("folder24.png");
+        EDIT16 = getImage("edit16.png");
+        FILE16 = getImage("file16.png");
+        RUN_PASS = getImage("runpass.png");
+        RUN_FAIL = getImage("runfail.png");
+        TESTSUITE = getImage("testsuite.png");
+        TESTGROUP = getImage("testgroup.png");
+        TESTGROUPX = getImage("testgroupx.png");
+        NEXT = getImage("next24.png");
+    }
+    
+    private ImageIcon getImage(String name) {
+        name = IMG_DIR + name;
+        try {
+            final ClassLoader loader = ImageFactory.class.getClassLoader();
+            final InputStream in = loader.getResourceAsStream(name);
+            final byte[] data = new byte[in.available()];
+            in.read(data);
+            in.close();
+            return new ImageIcon(data);
+        } catch (IOException ex) {
+            System.err.println("Can't load image file: " + name);
+            System.exit(1);
+        } catch(RuntimeException e) {
+            System.err.println("Can't load image file: " + name);
+            System.exit(1);
+        }
+        return null;
+    }
+    
+    private static final String IMG_DIR = "org/antlr/gunit/swingui/images/";
+
+    public ImageIcon ACCEPT;
+    public ImageIcon ADD;
+    public ImageIcon DELETE;
+    public ImageIcon TEXTFILE ;
+    public ImageIcon ADDFILE;
+
+    public ImageIcon TEXTFILE16 ;
+    public ImageIcon WINDOW16;
+    public ImageIcon FAV16;
+    public ImageIcon SAVE ;
+
+    public ImageIcon OPEN ;
+    public ImageIcon EDIT16;
+    public ImageIcon FILE16;
+    public ImageIcon NEXT;
+
+    public ImageIcon RUN_PASS;
+    public ImageIcon RUN_FAIL;
+    public ImageIcon TESTSUITE;
+    public ImageIcon TESTGROUP ;
+    public ImageIcon TESTGROUPX;
+
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java b/gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java
new file mode 100644
index 0000000..46275a4
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java
@@ -0,0 +1,160 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.gunit.swingui;
+
+import javax.swing.event.ListDataListener;
+import org.antlr.gunit.swingui.model.Rule;
+import org.antlr.gunit.swingui.ImageFactory;
+import java.awt.Color;
+import java.awt.Component;
+import java.awt.Dimension;
+import java.awt.event.MouseAdapter;
+import java.awt.event.MouseEvent;
+import java.util.List;
+import javax.swing.BorderFactory;
+import javax.swing.DefaultListModel;
+import javax.swing.JLabel;
+import javax.swing.JList;
+import javax.swing.JScrollPane;
+import javax.swing.ListCellRenderer;
+import javax.swing.ListModel;
+import javax.swing.ListSelectionModel;
+import javax.swing.event.ListSelectionListener;
+import org.antlr.gunit.swingui.model.TestSuite;
+
+public class RuleListController implements IController {
+
+    /* Sub-controls */
+    private final JList list = new JList();
+    private final JScrollPane scroll = new JScrollPane( list,
+            JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+            JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED);
+
+    /* Model */
+    private ListModel model = null;
+    private TestSuite testSuite = null;
+
+    public RuleListController() {
+        this.initComponents();
+    }
+
+    public JScrollPane getView() {
+        return scroll;
+    }
+
+    private void setTestSuite(TestSuite newTestSuite) {
+        testSuite = newTestSuite;
+        model = new RuleListModel();
+        list.setModel(model);
+    }
+    
+    public void initialize(TestSuite ts) {
+        setTestSuite(ts);
+        if(model.getSize() > 0) list.setSelectedIndex(0);
+        list.updateUI();
+    }
+
+
+    /**
+     * Initialize view.
+     */
+    private void initComponents() {
+
+        scroll.setViewportBorder(BorderFactory.createEtchedBorder());
+        scroll.setBorder(BorderFactory.createTitledBorder(
+                BorderFactory.createEmptyBorder(), "Rules"));
+        scroll.setOpaque(false);
+
+        list.setOpaque(false);
+        list.setSelectionMode(ListSelectionModel.SINGLE_INTERVAL_SELECTION);
+        list.setLayoutOrientation(JList.VERTICAL);
+        list.setCellRenderer(new RuleListItemRenderer());
+    }
+
+    public void setListSelectionListener(ListSelectionListener l) {
+        this.list.addListSelectionListener(l);
+    }
+
+    public Object getModel() {
+        return model;
+    }
+
+
+    /* ITEM RENDERER */
+
+    private class RuleListItemRenderer extends JLabel implements ListCellRenderer{
+
+        public RuleListItemRenderer() {
+            this.setPreferredSize(new Dimension(50, 18));
+        }
+
+        public Component getListCellRendererComponent(
+                JList list, Object value, int index,
+                boolean isSelected, boolean hasFocus) {
+
+            if(value instanceof Rule) {
+                final Rule item = (Rule) value;
+                setText(item.toString());
+                setForeground(list.getForeground());
+
+                setIcon(item.getNotEmpty() ? ImageFactory.getSingleton().FAV16 : null);
+
+                if(list.getSelectedValue() == item ) {
+                    setBackground(Color.LIGHT_GRAY);
+                    setOpaque(true);
+                } else {
+                    setOpaque(false);
+                }
+
+            } else {
+                this.setText("Error!");
+            }
+            return this;
+        }
+    }
+
+    private class RuleListModel implements ListModel {
+        
+        public RuleListModel() {
+            if(testSuite == null) 
+                throw new NullPointerException("Null test suite");
+        } 
+        
+        public int getSize() {
+            return testSuite.getRuleCount();
+        }
+
+        public Object getElementAt(int index) {
+            return testSuite.getRule(index);
+        }
+
+        public void addListDataListener(ListDataListener l) {}
+        public void removeListDataListener(ListDataListener l) {}
+    }
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java b/gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java
new file mode 100644
index 0000000..3cd90c4
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java
@@ -0,0 +1,240 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.gunit.swingui;
+
+import java.awt.BorderLayout;
+import java.awt.Color;
+import java.awt.Component;
+import java.awt.Dimension;
+import javax.swing.BorderFactory;
+import javax.swing.BoxLayout;
+import javax.swing.JLabel;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JScrollPane;
+import javax.swing.JTextArea;
+import javax.swing.JTree;
+import javax.swing.event.TreeModelListener;
+import javax.swing.tree.DefaultMutableTreeNode;
+import javax.swing.tree.DefaultTreeModel;
+import javax.swing.tree.TreeCellRenderer;
+import javax.swing.tree.TreeModel;
+import javax.swing.tree.TreePath;
+import org.antlr.gunit.swingui.ImageFactory;
+import org.antlr.gunit.swingui.model.*;
+
+/**
+ *
+ * @author scai
+ */
+public class RunnerController implements IController {
+
+    /* MODEL */
+    //private TestSuite testSuite;
+
+    /* VIEW */
+    private RunnerView view = new RunnerView();
+    public class RunnerView extends JPanel {
+        
+        private JTextArea textArea = new JTextArea();
+
+        private JTree tree = new JTree();
+
+        private JScrollPane scroll = new JScrollPane(tree,
+                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+                JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED);
+
+        public void initComponents() {
+            //textArea.setOpaque(false);
+            tree.setOpaque(false);
+            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            scroll.setOpaque(false);
+            this.setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
+            this.add(scroll);
+            this.setBorder(BorderFactory.createEmptyBorder());
+            this.setOpaque(false);
+        }
+
+    };
+
+    public RunnerController() {
+    }
+
+    public Object getModel() {
+        return null;
+    }
+
+    public Component getView() {
+        return view;
+    }
+
+    public void update() {
+        view.initComponents();
+    }
+
+    public void OnShowSuiteResult(TestSuite suite) {
+        update();
+        view.tree.setModel(new RunnerTreeModel(suite));
+        view.tree.setCellRenderer(new RunnerTreeRenderer());
+    }
+
+    public void OnShowRuleResult(Rule rule) {
+        update();
+        
+        
+        
+        /*
+        StringBuffer result = new StringBuffer();
+
+        result.append("Testing result for rule: " + rule.getName());
+        result.append("\n--------------------\n\n");
+
+        for(TestCase testCase: rule.getTestCases()){
+            result.append(testCase.isPass() ? "PASS" : "FAIL");
+            result.append("\n");
+        }
+        result.append("\n--------------------\n");
+        view.textArea.setText(result.toString());
+                  */
+    }
+
+
+
+    private class TestSuiteTreeNode extends DefaultMutableTreeNode {
+
+        private TestSuite data ;
+
+        public TestSuiteTreeNode(TestSuite suite) {
+            super(suite.getGrammarName());
+            for(int i=0; i<suite.getRuleCount(); ++i) {
+                final Rule rule = suite.getRule(i);
+                if(rule.getNotEmpty()) this.add(new TestGroupTreeNode(rule));
+            }
+            data = suite;
+        }
+
+        @Override
+        public String toString() {
+            return String.format("%s (%d test groups)",
+                    data.getGrammarName(),
+                    this.getChildCount());
+        }
+
+    } ;
+
+    private class TestGroupTreeNode extends DefaultMutableTreeNode {
+
+        private Rule data;
+        private boolean hasFail = false;
+
+        private TestGroupTreeNode(Rule rule) {
+            super(rule.getName());
+            for(TestCase tc: rule.getTestCases()) {
+                this.add(new TestCaseTreeNode(tc));
+            }
+
+            data = rule;
+         }
+
+        @Override
+        public String toString() {
+            int iPass = 0;
+            int iFail = 0;
+            for(TestCase tc: data.getTestCases()) {
+                if(tc.isPass())
+                    ++iPass;
+                else
+                    ++iFail;
+            }
+
+            hasFail = iFail > 0;
+
+            return String.format("%s (pass %d, fail %d)",
+                data.getName(), iPass, iFail);
+        }
+    } ;
+
+    private class TestCaseTreeNode extends DefaultMutableTreeNode {
+
+        private TestCase data;
+
+        private TestCaseTreeNode(TestCase tc) {
+            super(tc.toString());
+            data = tc;
+        }
+    } ;
+
+    private class RunnerTreeModel extends DefaultTreeModel {
+
+        public RunnerTreeModel(TestSuite testSuite) {
+            super(new TestSuiteTreeNode(testSuite));
+        }
+    }
+
+    private class RunnerTreeRenderer implements TreeCellRenderer {
+
+        public Component getTreeCellRendererComponent(JTree tree, Object value,
+                boolean selected, boolean expanded, boolean leaf, int row,
+                boolean hasFocus) {
+
+            JLabel label = new JLabel();
+
+            if(value instanceof TestSuiteTreeNode) {
+
+                label.setText(value.toString());
+                label.setIcon(ImageFactory.getSingleton().TESTSUITE);
+
+            } else if(value instanceof TestGroupTreeNode) {
+
+                TestGroupTreeNode node = (TestGroupTreeNode) value;
+                label.setText(value.toString());
+                label.setIcon( node.hasFail ? 
+                    ImageFactory.getSingleton().TESTGROUPX :
+                    ImageFactory.getSingleton().TESTGROUP);
+
+            } else if(value instanceof TestCaseTreeNode) {
+
+                TestCaseTreeNode node = (TestCaseTreeNode) value;
+                label.setIcon( (node.data.isPass())?
+                    ImageFactory.getSingleton().RUN_PASS :
+                    ImageFactory.getSingleton().RUN_FAIL);
+                label.setText(value.toString());
+
+            } else {
+                throw new IllegalArgumentException(
+                    "Invalide tree node type + " + value.getClass().getName());
+            }
+
+            return label;
+            
+        }
+        
+    }
+
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java b/gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java
new file mode 100644
index 0000000..e3f4eab
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java
@@ -0,0 +1,93 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui;
+
+import java.awt.Dimension;
+import java.awt.FlowLayout;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JProgressBar;
+
+public class StatusBarController implements IController {
+
+    private final JPanel panel = new JPanel();
+
+    private final JLabel labelText = new JLabel("Ready");
+    private final JLabel labelRuleName = new JLabel("");
+    private final JProgressBar progress = new JProgressBar();
+    
+    public StatusBarController() {
+        initComponents();
+    }
+
+    private void initComponents() {
+        labelText.setPreferredSize(new Dimension(300, 20));
+        labelText.setHorizontalTextPosition(JLabel.LEFT);
+        progress.setPreferredSize(new Dimension(100, 15));
+
+        final JLabel labRuleHint = new JLabel("Rule: ");
+
+        FlowLayout layout = new FlowLayout();
+        layout.setAlignment(FlowLayout.LEFT);
+        panel.setLayout(layout);
+        panel.add(labelText);
+        panel.add(progress);
+        panel.add(labRuleHint);
+        panel.add(labelRuleName);
+        panel.setOpaque(false);
+        panel.setBorder(javax.swing.BorderFactory.createEmptyBorder());
+
+    }
+
+    public void setText(String text) {
+        labelText.setText(text);
+    }
+
+    public void setRule(String name) {
+        this.labelRuleName.setText(name);
+    }
+
+    public Object getModel() {
+        throw new UnsupportedOperationException("Not supported yet.");
+    }
+
+    public JPanel getView() {
+        return panel;
+    }
+
+    public void setProgressIndetermined(boolean value) {
+        this.progress.setIndeterminate(value);
+    }
+    
+    public void setProgress(int value) {
+        this.progress.setIndeterminate(false);
+        this.progress.setValue(value);
+    }
+
+
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java b/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java
new file mode 100644
index 0000000..30cf0ae
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java
@@ -0,0 +1,633 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.gunit.swingui;
+
+import org.antlr.gunit.swingui.model.*;
+import org.antlr.gunit.swingui.ImageFactory;
+import java.awt.*;
+import java.awt.event.*;
+import java.util.HashMap;
+import javax.swing.*;
+import javax.swing.event.*;
+
+/**
+ *
+ * @author scai
+ */
+public class TestCaseEditController implements IController {
+
+    private JPanel view = new JPanel();
+
+    private JScrollPane scroll;
+    private JPanel paneDetail;
+    private AbstractEditorPane paneDetailInput, paneDetailOutput;
+    private JToolBar toolbar;
+    private JList listCases;
+    private ListModel listModel ;
+
+    public ActionListener onTestCaseNumberChange;
+
+    /* EDITORS */
+    private InputFileEditor editInputFile;
+    private InputStringEditor editInputString;
+    private InputMultiEditor editInputMulti;
+    private OutputResultEditor editOutputResult;
+    private OutputAstEditor editOutputAST;
+    private OutputStdEditor editOutputStd;
+    private OutputReturnEditor editOutputReturn;
+    
+    private JComboBox comboInputType, comboOutputType;
+
+    /* TYPE NAME */
+    private static final String IN_TYPE_STRING = "Single-line Text";
+    private static final String IN_TYPE_MULTI = "Multi-line Text";
+    private static final String IN_TYPE_FILE = "Disk File";
+    private static final String OUT_TYPE_BOOL = "OK or Fail";
+    private static final String OUT_TYPE_AST = "AST";
+    private static final String OUT_TYPE_STD = "Standard Output";
+    private static final String OUT_TYPE_RET = "Return Value";
+
+    private static final String DEFAULT_IN_SCRIPT = "";
+    private static final String DEFAULT_OUT_SCRIPT = "";
+
+    private static final Object[] INPUT_TYPE =  {
+        IN_TYPE_STRING, IN_TYPE_MULTI, IN_TYPE_FILE
+    };
+
+    private static final Object[] OUTPUT_TYPE = {
+        OUT_TYPE_BOOL, OUT_TYPE_AST, OUT_TYPE_STD, OUT_TYPE_RET
+    };
+
+    /* SIZE */
+    private static final int TEST_CASE_DETAIL_WIDTH = 300;
+    private static final int TEST_EDITOR_WIDTH = 280;
+    private static final int TEST_CASE_DETAIL_HEIGHT = 250;
+    private static final int TEST_EDITOR_HEIGHT = 120;
+
+    /* MODEL */
+    private Rule currentRule = null;
+    private TestCase currentTestCase = null;
+
+    /* END OF MODEL*/
+
+    private static final HashMap<Class, String> TypeNameTable;
+    static {
+        TypeNameTable = new HashMap<Class, String> ();
+        TypeNameTable.put(TestCaseInputString.class, IN_TYPE_STRING);
+        TypeNameTable.put(TestCaseInputMultiString.class, IN_TYPE_MULTI);
+        TypeNameTable.put(TestCaseInputFile.class, IN_TYPE_FILE);
+
+        TypeNameTable.put(TestCaseOutputResult.class, OUT_TYPE_BOOL);
+        TypeNameTable.put(TestCaseOutputAST.class, OUT_TYPE_AST);
+        TypeNameTable.put(TestCaseOutputStdOut.class, OUT_TYPE_STD);
+        TypeNameTable.put(TestCaseOutputReturn.class, OUT_TYPE_RET);
+    }
+
+    //private WorkSpaceView owner;
+
+    public TestCaseEditController(WorkSpaceView workspace) {
+        //this.owner = workspace;
+        initComponents();
+    }
+
+    public TestCaseEditController() {
+        initComponents();
+    }
+
+    public void OnLoadRule(Rule rule) {
+        if(rule == null) throw new IllegalArgumentException("Null");
+        this.currentRule = rule;
+        this.currentTestCase = null;
+        this.listModel = rule;
+        this.listCases.setModel(this.listModel);      
+    }
+
+    public void setCurrentTestCase(TestCase testCase) {
+        if(testCase == null) throw new IllegalArgumentException("Null");
+        this.listCases.setSelectedValue(testCase, true);
+        this.currentTestCase = testCase;
+    }
+
+    public Rule getCurrentRule() {
+        return this.currentRule;
+    }
+    
+    private void initComponents() {
+
+        /* CASE LIST */
+        listCases = new JList();
+        listCases.addListSelectionListener(new TestCaseListSelectionListener());
+        listCases.setCellRenderer(listRenderer);
+        listCases.setOpaque(false);
+        
+        scroll = new JScrollPane(listCases);
+        scroll.setBorder(BorderFactory.createTitledBorder(
+                BorderFactory.createEmptyBorder(), "Test Cases"));
+        scroll.setOpaque(false);
+        scroll.setViewportBorder(BorderFactory.createEtchedBorder());
+
+        /* CASE DETAIL */
+
+        editInputString = new InputStringEditor();
+        editInputMulti = new InputMultiEditor();
+        editInputFile = new InputFileEditor();
+
+        editOutputResult = new OutputResultEditor();
+        editOutputAST = new OutputAstEditor();
+        editOutputStd = new OutputStdEditor();
+        editOutputReturn = new OutputReturnEditor();
+        
+        paneDetail = new JPanel();
+        paneDetail.setBorder(BorderFactory.createEmptyBorder());
+        paneDetail.setOpaque(false);
+
+        comboInputType = new JComboBox(INPUT_TYPE);
+        comboInputType.addActionListener(new ActionListener() {
+            public void actionPerformed(ActionEvent event) {
+                OnInputTestCaseTypeChanged(comboInputType.getSelectedItem());
+            }
+        });
+        comboOutputType = new JComboBox(OUTPUT_TYPE);
+        comboOutputType.addActionListener(new ActionListener() {
+            public void actionPerformed(ActionEvent event) {
+                OnOutputTestCaseTypeChanged(comboOutputType.getSelectedItem());
+            }
+        });
+        paneDetailInput = new InputEditorPane(comboInputType);
+        paneDetailOutput = new OutputEditorPane(comboOutputType);
+
+        BoxLayout layout = new BoxLayout(paneDetail, BoxLayout.PAGE_AXIS);
+        paneDetail.setLayout(layout);
+        
+        paneDetail.add(this.paneDetailInput);
+        paneDetail.add(this.paneDetailOutput);
+
+        /* TOOLBAR */
+        toolbar = new JToolBar("Edit TestCases", JToolBar.VERTICAL);
+        toolbar.setFloatable(false);
+        toolbar.add(new AddTestCaseAction());
+        toolbar.add(new RemoveTestCaseAction());
+
+        /* COMPOSITE */
+        view.setLayout(new BorderLayout());
+        view.setBorder(BorderFactory.createEmptyBorder());
+        view.setOpaque(false);
+        view.add(toolbar, BorderLayout.WEST);
+        view.add(scroll, BorderLayout.CENTER);
+        view.add(paneDetail, BorderLayout.EAST);
+    }
+
+    private void updateInputEditor() {
+        JComponent editor = null;
+
+        if(currentTestCase != null ) {
+            ITestCaseInput input = this.currentTestCase.getInput();
+            if(input instanceof TestCaseInputString) {
+                this.editInputString.setText(input.getScript());
+                editor = this.editInputString;
+                comboInputType.setSelectedItem(IN_TYPE_STRING);
+            } else if(input instanceof TestCaseInputMultiString) {
+                this.editInputMulti.setText(input.getScript());
+                editor = this.editInputMulti.getView();
+                comboInputType.setSelectedItem(IN_TYPE_MULTI);
+            } else if(input instanceof TestCaseInputFile) {
+                this.editInputFile.setText(input.getScript());
+                editor = this.editInputFile;
+                comboInputType.setSelectedItem(IN_TYPE_FILE);
+            } else {
+                throw new Error("Wrong type");
+            }
+        }
+        
+        paneDetailInput.setEditor(editor);
+    }
+
+    private void updateOutputEditor() {
+        JComponent editor = null;
+        
+        if(currentTestCase != null) {
+            
+            ITestCaseOutput output = this.currentTestCase.getOutput();
+
+            if(output instanceof TestCaseOutputAST) {
+
+                this.editOutputAST.setText(output.getScript());
+                editor = this.editOutputAST.getView();
+                comboOutputType.setSelectedItem(OUT_TYPE_AST);
+
+            } else if(output instanceof TestCaseOutputResult) {
+
+                this.editOutputResult.setValue(output.getScript());
+                editor = this.editOutputResult;
+                comboOutputType.setSelectedItem(OUT_TYPE_BOOL);
+
+            } else if(output instanceof TestCaseOutputStdOut) {
+
+                this.editOutputStd.setText(output.getScript());
+                editor = this.editOutputStd.getView();
+                comboOutputType.setSelectedItem(OUT_TYPE_STD);
+
+            } else if(output instanceof TestCaseOutputReturn) {
+
+                this.editOutputReturn.setText(output.getScript());
+                editor = this.editOutputReturn.getView();
+                comboOutputType.setSelectedItem(OUT_TYPE_RET);
+
+            } else {
+
+                throw new Error("Wrong type");
+                
+            }
+
+        }
+        this.paneDetailOutput.setEditor(editor);
+    }
+
+    private void OnInputTestCaseTypeChanged(Object inputTypeStr) {
+        if(this.currentTestCase != null) {
+            ITestCaseInput input ;
+            if(inputTypeStr == IN_TYPE_STRING) {
+                input = new TestCaseInputString(DEFAULT_IN_SCRIPT);
+            } else if(inputTypeStr == IN_TYPE_MULTI) {
+                input = new TestCaseInputMultiString(DEFAULT_IN_SCRIPT);
+            } else if(inputTypeStr == IN_TYPE_FILE) {
+                input = new TestCaseInputFile(DEFAULT_IN_SCRIPT);
+            } else {
+                throw new Error("Wrong Type");
+            }
+
+            if(input.getClass().equals(this.currentTestCase.getInput().getClass()))
+                return ;
+
+            this.currentTestCase.setInput(input);
+        }
+        this.updateInputEditor();
+    }
+
+    private void OnOutputTestCaseTypeChanged(Object outputTypeStr) {
+        if(this.currentTestCase != null) {
+
+            ITestCaseOutput output ;
+            if(outputTypeStr == OUT_TYPE_AST) {
+                output = new TestCaseOutputAST(DEFAULT_OUT_SCRIPT);
+            } else if(outputTypeStr == OUT_TYPE_BOOL) {
+                output = new TestCaseOutputResult(false);
+            } else if(outputTypeStr == OUT_TYPE_STD) {
+                output = new TestCaseOutputStdOut(DEFAULT_OUT_SCRIPT);
+            } else if(outputTypeStr == OUT_TYPE_RET) {
+                output = new TestCaseOutputReturn(DEFAULT_OUT_SCRIPT);
+            } else {
+                throw new Error("Wrong Type");
+            }
+
+            if(output.getClass().equals(this.currentTestCase.getOutput().getClass()))
+                return ;
+
+            this.currentTestCase.setOutput(output);
+        }
+        this.updateOutputEditor();
+    }
+
+
+    private void OnTestCaseSelected(TestCase testCase) {
+        //if(testCase == null) throw new RuntimeException("Null TestCase");
+        this.currentTestCase = testCase;
+        updateInputEditor();
+        updateOutputEditor();
+
+    }
+
+    private void OnAddTestCase() {
+        if(currentRule == null) return;
+        
+        final TestCase newCase = new TestCase(
+                new TestCaseInputString(""),
+                new TestCaseOutputResult(true));
+        this.currentRule.addTestCase(newCase);
+        setCurrentTestCase(newCase);
+
+        this.listCases.setSelectedValue(newCase, true);
+        this.listCases.updateUI();
+        this.OnTestCaseSelected(newCase);
+        this.onTestCaseNumberChange.actionPerformed(null);
+    }
+
+    private void OnRemoveTestCase() {
+        if(currentTestCase == null) return;
+        currentRule.removeElement(currentTestCase);
+        listCases.updateUI();
+
+        final TestCase nextActiveCase = listCases.isSelectionEmpty() ?
+            null : (TestCase) listCases.getSelectedValue() ;
+        OnTestCaseSelected(nextActiveCase);
+        this.onTestCaseNumberChange.actionPerformed(null);
+    }
+
+    public Object getModel() {
+        return currentRule;
+    }
+
+    public Component getView() {
+        return view;
+    }
+
+    /* EDITOR CONTAINER */
+
+    abstract public class AbstractEditorPane extends JPanel {
+
+        private JComboBox combo;
+        private JComponent editor;
+        private String title;
+        private JLabel placeHolder = new JLabel();
+
+        public AbstractEditorPane(JComboBox comboBox, String title) {
+            this.combo = comboBox;
+            this.editor = placeHolder;
+            this.title = title;
+            this.initComponents();
+        }
+
+        private void initComponents() {
+            placeHolder.setPreferredSize(new Dimension(
+                    TEST_CASE_DETAIL_WIDTH, TEST_CASE_DETAIL_HEIGHT));
+            this.setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
+            this.add(combo, BorderLayout.NORTH);
+            this.add(editor, BorderLayout.CENTER);
+            this.setOpaque(false);
+            this.setBorder(BorderFactory.createTitledBorder(title));
+            this.setPreferredSize(new Dimension(
+                    TEST_CASE_DETAIL_WIDTH, TEST_CASE_DETAIL_HEIGHT));
+        }
+
+        public void setEditor(JComponent newEditor) {
+            if(newEditor == null) newEditor = placeHolder;
+            this.remove(editor);
+            this.add(newEditor);
+            this.editor = newEditor;
+            this.updateUI();
+        }
+    }
+
+    public class InputEditorPane extends AbstractEditorPane {
+        public InputEditorPane(JComboBox comboBox) {
+            super(comboBox, "Input");
+        }
+    }
+
+    public class OutputEditorPane extends AbstractEditorPane {
+        public OutputEditorPane(JComboBox comboBox) {
+            super(comboBox, "Output");
+        }
+    }
+
+    /* INPUT EDITORS */
+
+    public class InputStringEditor extends JTextField implements CaretListener {
+        public InputStringEditor() {
+            super();
+
+            this.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            this.addCaretListener(this);
+        }
+
+        public void caretUpdate(CaretEvent arg0) {
+            currentTestCase.getInput().setScript(getText());
+            listCases.updateUI();
+        }
+    }
+
+    public class InputMultiEditor implements CaretListener {
+        private JTextArea textArea = new JTextArea(20, 30);
+        private JScrollPane scroll = new JScrollPane(textArea,
+                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+                JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
+
+        public InputMultiEditor() {
+            super();
+            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            textArea.addCaretListener(this);
+        }
+
+        public void caretUpdate(CaretEvent arg0) {
+            currentTestCase.getInput().setScript(getText());
+            listCases.updateUI();
+        }
+
+        public String getText() {
+            return textArea.getText();
+        }
+
+        public void setText(String text) {
+            textArea.setText(text);
+        }
+
+        public JComponent getView() {
+            return scroll;
+        }
+    }
+
+    public class InputFileEditor extends InputStringEditor {};
+
+    public class OutputResultEditor extends JPanel implements ActionListener {
+        
+        private JToggleButton tbFail, tbOk;
+
+        public OutputResultEditor() {
+            super();
+
+            tbFail = new JToggleButton("Fail");
+            tbOk = new JToggleButton("OK");
+            ButtonGroup group = new ButtonGroup();
+            group.add(tbFail);
+            group.add(tbOk);
+
+            this.add(tbFail);
+            this.add(tbOk);
+
+            this.tbFail.addActionListener(this);
+            this.tbOk.addActionListener(this);
+
+            this.setPreferredSize(
+                    new Dimension(TEST_EDITOR_WIDTH, 100));
+        }
+
+        public void actionPerformed(ActionEvent e) {
+            TestCaseOutputResult output =
+                    (TestCaseOutputResult) currentTestCase.getOutput();
+
+            if(e.getSource() == tbFail) {
+                output.setScript(false);
+            } else {
+                output.setScript(true);
+            }
+
+            listCases.updateUI();
+        }
+
+        public void setValue(String value) {
+            if(TestCaseOutputResult.OK.equals(value)) {
+                this.tbOk.setSelected(true);
+            } else {
+                this.tbFail.setSelected(true);
+            }
+        }
+    }
+    
+
+    public class OutputAstEditor implements CaretListener {
+        private JTextArea textArea = new JTextArea(20, 30);
+        private JScrollPane scroll = new JScrollPane(textArea,
+                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+                JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
+
+        public OutputAstEditor() {
+            super();
+            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            textArea.addCaretListener(this);
+        }
+
+        public void caretUpdate(CaretEvent arg0) {
+            currentTestCase.getOutput().setScript(getText());
+            listCases.updateUI();
+        }
+
+        public void setText(String text) {
+            this.textArea.setText(text);
+        }
+
+        public String getText() {
+            return this.textArea.getText();
+        }
+
+        public JScrollPane getView() {
+            return this.scroll;
+        }
+    }
+
+
+    public class OutputStdEditor extends OutputAstEditor {}
+    public class OutputReturnEditor extends OutputAstEditor {}
+
+    /* EVENT HANDLERS */
+
+    private class TestCaseListSelectionListener implements ListSelectionListener {
+
+        public void valueChanged(ListSelectionEvent e) {
+            
+            if(e.getValueIsAdjusting()) return;
+            final JList list = (JList) e.getSource();
+            final TestCase value = (TestCase) list.getSelectedValue();
+            if(value != null) OnTestCaseSelected(value);
+            
+        }
+
+    }
+
+    /* ACTIONS */
+
+    private class AddTestCaseAction extends AbstractAction {
+        public AddTestCaseAction() {
+            super("Add", ImageFactory.getSingleton().ADD);
+            putValue(SHORT_DESCRIPTION, "Add a gUnit test case.");
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnAddTestCase();
+        }
+    }
+
+    private class RemoveTestCaseAction extends AbstractAction {
+        public RemoveTestCaseAction() {
+            super("Remove", ImageFactory.getSingleton().DELETE);
+            putValue(SHORT_DESCRIPTION, "Remove a gUnit test case.");
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnRemoveTestCase();
+        }
+    }
+
+    /* CELL RENDERERS */
+
+    private static TestCaseListRenderer listRenderer
+            = new TestCaseListRenderer();
+
+    private static class TestCaseListRenderer implements ListCellRenderer {
+
+        private static Font IN_FONT = new Font("mono", Font.PLAIN, 12);
+        private static Font OUT_FONT = new Font("default", Font.BOLD, 12);
+
+        public static String clamp(String text, int len) {
+            if(text.length() > len) {
+                return text.substring(0, len - 3).concat("...");
+            } else {
+                return text;
+            }
+        }
+
+        public static String clampAtNewLine(String text) {
+            int pos = text.indexOf('\n');
+            if(pos >= 0) {
+                return text.substring(0, pos).concat("...");
+            } else {
+                return text;
+            }
+        }
+
+        public Component getListCellRendererComponent(
+                JList list, Object value, int index,
+                boolean isSelected, boolean hasFocus) {
+
+            final JPanel pane = new JPanel();
+            
+            if (value instanceof TestCase) {
+                final TestCase item = (TestCase) value;
+
+                // create components
+                final JLabel labIn = new JLabel(
+                        clamp(clampAtNewLine(item.getInput().getScript()), 18));
+                final JLabel labOut = new JLabel(
+                        clamp(clampAtNewLine(item.getOutput().getScript()), 18));
+                labOut.setFont(OUT_FONT);
+                labIn.setFont(IN_FONT);
+
+                labIn.setIcon(item.getInput() instanceof TestCaseInputFile ?
+                    ImageFactory.getSingleton().FILE16 :
+                    ImageFactory.getSingleton().EDIT16);
+
+                pane.setBorder(BorderFactory.createEtchedBorder());
+                pane.setLayout(new BoxLayout(pane, BoxLayout.Y_AXIS));
+                pane.add(labIn);
+                pane.add(labOut);
+                pane.setBackground(isSelected ? Color.LIGHT_GRAY : Color.WHITE);
+            } 
+
+            return pane;
+        }
+    }
+
+}
diff --git a/src/org/antlr/analysis/State.java b/gunit/src/main/java/org/antlr/gunit/swingui/Tool.java
similarity index 63%
copy from src/org/antlr/analysis/State.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/Tool.java
index 9c56124..8ea2791 100644
--- a/src/org/antlr/analysis/State.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/Tool.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,30 +25,37 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
+package org.antlr.gunit.swingui;
 
-/** A generic state machine state. */
-public abstract class State {
-    public static final int INVALID_STATE_NUMBER = -1;
+import java.io.IOException;
+import javax.swing.SwingUtilities;
+import javax.swing.UIManager;
 
-    public int stateNumber = INVALID_STATE_NUMBER;
+public class Tool {
 
-    /** An accept state is an end of rule state for lexers and
-     *  parser grammar rules.
-	 */
-	protected boolean acceptState = false;
+    public static void main(String[] args) throws IOException {
 
-    public abstract int getNumberOfTransitions();
+        if(args.length == 1 && "-version".equals(args[0])) {
+            System.out.println("gUnitEditor Swing GUI\nby Shaoting Cai\n");
+        } else {
+            showUI();
+        }
+    }
 
-    public abstract void addTransition(Transition e);
+    private static void showUI() {
+        try {
+            UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
+        } catch (Exception e) {
+        }
+        SwingUtilities.invokeLater(new Runnable() {
+
+            public void run() {
+                WorkSpaceController control = new WorkSpaceController();
+                control.show();
+            }
+        });
+    }
 
-    public abstract Transition transition(int i);
 
-	public boolean isAcceptState() {
-		return acceptState;
-	}
 
-	public void setAcceptState(boolean acceptState) {
-		this.acceptState = acceptState;
-	}
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java b/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java
new file mode 100644
index 0000000..c75ae21
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java
@@ -0,0 +1,288 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.gunit.swingui;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.antlr.gunit.swingui.runner.gUnitAdapter;
+import java.awt.*;
+import java.io.IOException;
+import org.antlr.gunit.swingui.model.*;
+import org.antlr.gunit.swingui.ImageFactory;
+import java.awt.event.*;
+import java.io.File;
+import javax.swing.*;
+import javax.swing.event.*;
+import javax.swing.filechooser.FileFilter;
+
+/**
+ *
+ * @author scai
+ */
+public class WorkSpaceController implements IController{
+
+    /* MODEL */
+    private TestSuite currentTestSuite;
+    private String testSuiteFileName = null;    // path + file
+
+    /* VIEW */
+    private final WorkSpaceView view = new WorkSpaceView();
+
+    /* SUB-CONTROL */
+    private final RunnerController runner = new RunnerController();
+
+    public WorkSpaceController() {
+        view.resultPane = (JPanel) runner.getView();
+        view.initComponents();
+        this.initEventHandlers();
+        this.initToolbar();
+    }
+
+    public void show() {
+        this.view.setTitle("gUnitEditor");
+        this.view.setVisible(true);
+        this.view.pack();
+    }
+
+    public Component getEmbeddedView() {
+        return view.paneEditor.getView();
+    }
+
+    private void initEventHandlers() {
+        this.view.tabEditors.addChangeListener(new TabChangeListener());
+        this.view.listRules.setListSelectionListener(new RuleListSelectionListener());
+        this.view.paneEditor.onTestCaseNumberChange = new ActionListener() {
+            public void actionPerformed(ActionEvent e) {
+                view.listRules.getView().updateUI();
+            }
+        };
+    }
+
+    private void OnCreateTest() {
+        JFileChooser jfc = new JFileChooser();
+        jfc.setDialogTitle("Create test suite from grammar");
+        jfc.setDialogType(JFileChooser.OPEN_DIALOG);
+        jfc.setFileFilter(new FileFilter() {
+            @Override
+            public boolean accept(File f) {
+                return f.isDirectory() || f.getName().toLowerCase().endsWith(TestSuiteFactory.GRAMMAR_EXT);
+            }
+
+            @Override
+            public String getDescription() {
+                return "ANTLR grammar file (*.g)";
+            }
+        });        
+        if( jfc.showOpenDialog(view) != JFileChooser.APPROVE_OPTION ) return;
+
+        view.paneStatus.setProgressIndetermined(true);
+        final File grammarFile = jfc.getSelectedFile();
+
+        currentTestSuite = TestSuiteFactory.createTestSuite(grammarFile);
+
+        view.listRules.initialize(currentTestSuite);
+        view.tabEditors.setSelectedIndex(0);
+        view.paneStatus.setText("Grammar: " + currentTestSuite.getGrammarName());
+        view.paneStatus.setProgressIndetermined(false);
+
+        testSuiteFileName = null;
+    }
+
+    private void OnSaveTest() {
+        TestSuiteFactory.saveTestSuite(currentTestSuite);
+        JOptionPane.showMessageDialog(view, "Testsuite saved to:\n" + currentTestSuite.getTestSuiteFile().getAbsolutePath());
+    }
+
+    private void OnOpenTest()  {
+
+        JFileChooser jfc = new JFileChooser();
+        jfc.setDialogTitle("Open existing gUnit test suite");
+        jfc.setDialogType(JFileChooser.OPEN_DIALOG);
+        jfc.setFileFilter(new FileFilter() {
+
+            @Override
+            public boolean accept(File f) {
+                return f.isDirectory() || f.getName().toLowerCase().endsWith(TestSuiteFactory.TEST_SUITE_EXT);
+            }
+
+            @Override
+            public String getDescription() {
+                return "ANTLR unit test file (*.gunit)";
+            }
+        });
+        if( jfc.showOpenDialog(view) != JFileChooser.APPROVE_OPTION ) return;
+
+        final File testSuiteFile = jfc.getSelectedFile();
+        try {
+            testSuiteFileName = testSuiteFile.getCanonicalPath();
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+
+        view.paneStatus.setProgressIndetermined(true);
+
+        currentTestSuite = TestSuiteFactory.loadTestSuite(testSuiteFile);
+        view.listRules.initialize(currentTestSuite);
+        view.paneStatus.setText(currentTestSuite.getGrammarName());
+        view.tabEditors.setSelectedIndex(0);
+
+        view.paneStatus.setProgressIndetermined(false);
+    }
+
+    private void OnSelectRule(Rule rule) {
+        if(rule == null) throw new IllegalArgumentException("Null");
+        this.view.paneEditor.OnLoadRule(rule);
+        this.view.paneStatus.setRule(rule.getName());
+
+        // run result
+        this.runner.OnShowRuleResult(rule);
+    }
+
+    private void OnSelectTextPane() {
+        Thread worker = new Thread () {
+            @Override
+            public void run() {
+                view.paneStatus.setProgressIndetermined(true);
+                view.txtEditor.setText(
+                    TestSuiteFactory.getScript(currentTestSuite));
+                view.paneStatus.setProgressIndetermined(false);
+            }
+        };
+
+        worker.start();
+    }
+
+    private void OnRunTest() {
+        // save before run
+        TestSuiteFactory.saveTestSuite(currentTestSuite);
+
+        // run
+        try {
+            final gUnitAdapter adapter = new gUnitAdapter(currentTestSuite);
+            if(currentTestSuite == null) return;
+            adapter.run();
+            
+            runner.OnShowSuiteResult(currentTestSuite);
+            view.tabEditors.addTab("Test Result", ImageFactory.getSingleton().FILE16, runner.getView());
+            view.tabEditors.setSelectedComponent(runner.getView());
+        } catch (Exception ex) {
+            JOptionPane.showMessageDialog(view, "Fail to run test:\n" + ex.getMessage(), "Error", JOptionPane.ERROR_MESSAGE);
+        } 
+
+    }
+
+    private void initToolbar() {
+        view.toolbar.add(new JButton(new CreateAction()));
+        view.toolbar.add(new JButton(new OpenAction()));
+        view.toolbar.add(new JButton(new SaveAction()));
+        view.toolbar.add(new JButton(new RunAction()));
+
+    }
+
+    public Object getModel() {
+        throw new UnsupportedOperationException("Not supported yet.");
+    }
+
+    public Component getView() {
+        return view;
+    }
+
+
+    /** Event handler for rule list selection. */
+    private class RuleListSelectionListener implements ListSelectionListener {
+        public void valueChanged(ListSelectionEvent event) {
+            if(event.getValueIsAdjusting()) return;
+            final JList list = (JList) event.getSource();
+            final Rule rule = (Rule) list.getSelectedValue();
+            if(rule != null) OnSelectRule(rule);
+        }
+    }
+
+
+    /** Event handler for switching between editor view and script view. */
+    public class TabChangeListener implements ChangeListener {
+        public void stateChanged(ChangeEvent evt) {
+            if(view.tabEditors.getSelectedIndex() == 1) {
+                OnSelectTextPane();
+            }
+        }
+        
+    }
+    
+
+    /** Create test suite action. */
+    private class CreateAction extends AbstractAction {
+        public CreateAction() {
+            super("Create", ImageFactory.getSingleton().ADDFILE);
+            putValue(SHORT_DESCRIPTION, "Create a test suite from an ANTLR grammar");
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnCreateTest();
+        }
+    }
+
+
+    /** Save test suite action. */
+    private class SaveAction extends AbstractAction {
+        public SaveAction() {
+            super("Save", ImageFactory.getSingleton().SAVE);
+            putValue(SHORT_DESCRIPTION, "Save the test suite");
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnSaveTest();
+        }
+    }
+
+
+    /** Open test suite action. */
+    private class OpenAction extends AbstractAction {
+        public OpenAction() {
+            super("Open", ImageFactory.getSingleton().OPEN);
+            putValue(SHORT_DESCRIPTION, "Open an existing test suite");
+            putValue(ACCELERATOR_KEY, KeyStroke.getKeyStroke(
+                    KeyEvent.VK_O, InputEvent.CTRL_MASK));
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnOpenTest();
+        }
+    }
+
+    /** Run test suite action. */
+    private class RunAction extends AbstractAction {
+        public RunAction() {
+            super("Run", ImageFactory.getSingleton().NEXT);
+            putValue(SHORT_DESCRIPTION, "Run the current test suite");
+            putValue(ACCELERATOR_KEY, KeyStroke.getKeyStroke(
+                    KeyEvent.VK_R, InputEvent.CTRL_MASK));
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnRunTest();
+        }
+    }
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java b/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java
new file mode 100644
index 0000000..4d220d2
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java
@@ -0,0 +1,103 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package org.antlr.gunit.swingui;
+
+import org.antlr.gunit.swingui.ImageFactory;
+import java.awt.*;
+import javax.swing.*;
+
+/**
+ *
+ * @author scai
+ */
+public class WorkSpaceView extends JFrame {
+
+    protected JSplitPane splitListClient ;
+    protected JTabbedPane tabEditors;
+    protected JPanel paneToolBar;
+    protected StatusBarController paneStatus;
+    protected TestCaseEditController paneEditor;
+    protected JToolBar toolbar;
+    protected JTextArea txtEditor;
+    protected RuleListController listRules;
+    protected JMenuBar menuBar;
+    protected JScrollPane scrollCode;
+    protected JPanel resultPane;
+
+    protected JButton btnOpenGrammar;
+
+    public WorkSpaceView() {
+        super();
+    }
+
+    protected void initComponents() {
+
+        this.paneEditor = new TestCaseEditController(this);
+        this.paneStatus = new StatusBarController();
+
+        this.toolbar = new JToolBar();
+        this.toolbar.setBorder(BorderFactory.createEmptyBorder());
+        this.toolbar.setFloatable(false);
+        this.toolbar.setBorder(BorderFactory.createEmptyBorder());
+
+        this.txtEditor = new JTextArea();
+        this.txtEditor.setLineWrap(false);
+        this.txtEditor.setFont(new Font("Courier New", Font.PLAIN, 13));
+        this.scrollCode = new JScrollPane(txtEditor,
+                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+                JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED);
+        this.scrollCode.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+
+        this.tabEditors = new JTabbedPane();
+        this.tabEditors.addTab("Case Editor", ImageFactory.getSingleton().TEXTFILE16, this.paneEditor.getView());
+        this.tabEditors.addTab("Script Source", ImageFactory.getSingleton().WINDOW16, this.scrollCode);
+
+        this.listRules = new RuleListController();
+
+        this.splitListClient = new JSplitPane( JSplitPane.HORIZONTAL_SPLIT,
+                this.listRules.getView(), this.tabEditors);
+        this.splitListClient.setResizeWeight(0.4);
+        this.splitListClient.setBorder(BorderFactory.createEmptyBorder());
+
+
+        
+        this.getContentPane().add(this.toolbar, BorderLayout.NORTH);
+        this.getContentPane().add(this.splitListClient, BorderLayout.CENTER);
+        this.getContentPane().add(this.paneStatus.getView(), BorderLayout.SOUTH);
+
+        // self
+        this.setPreferredSize(new Dimension(900, 500));
+        this.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+    }
+
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java
similarity index 81%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java
index 815b4e6..8bbba0c 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,10 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.gunit.swingui.model;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
-	}
+public interface ITestCaseInput {
+
+    public void setScript(String script);
+    public String getScript();
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java
similarity index 81%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java
index 815b4e6..0f0e860 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,18 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package org.antlr.gunit.swingui.model;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
-	}
+/**
+ *
+ * @author scai
+ */
+public interface ITestCaseOutput {
+    public void setScript(String script);
+    public String getScript() ;
 }
diff --git a/runtime/Java/src/org/antlr/runtime/Parser.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java
similarity index 61%
copy from runtime/Java/src/org/antlr/runtime/Parser.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java
index 1000a52..2a1aeb8 100644
--- a/runtime/Java/src/org/antlr/runtime/Parser.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,41 +25,47 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
 
-/** A parser for TokenStreams.  "parser grammars" result in a subclass
- *  of this.
+
+package org.antlr.gunit.swingui.model;
+
+import java.util.ArrayList;
+import java.util.List;
+import javax.swing.DefaultListModel;
+
+/**
+ * ANTLR v3 Rule Information.
+ * @author scai
  */
-public class Parser extends BaseRecognizer {
-    protected TokenStream input;
+public class Rule extends DefaultListModel {
 
-	public Parser(TokenStream input) {
-        setTokenStream(input);
-    }
+    private String name;
 
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
-	}
+    public Rule(String name) {
+        this.name = name;
+    }
 
-	/** Set the token stream and reset the parser */
-	public void setTokenStream(TokenStream input) {
-		this.input = null;
-		reset();
-		this.input = input;
-	}
+    public String getName() { return name; }
 
-    public TokenStream getTokenStream() {
-		return input;
-	}
+    public boolean getNotEmpty() {
+        return !this.isEmpty();
+    }
 
-	public void traceIn(String ruleName, int ruleIndex)  {
-		super.traceIn(ruleName, ruleIndex, input.LT(1));
-	}
+    @Override
+    public String toString() {
+        return this.name;
+    }
 
-	public void traceOut(String ruleName, int ruleIndex)  {
-		super.traceOut(ruleName, ruleIndex, input.LT(1));
-	}
+    public void addTestCase(TestCase newItem) {
+        this.addElement(newItem);
+    }
+    
+    // for string template
+    public List<TestCase> getTestCases() {
+        List<TestCase> result = new ArrayList<TestCase>();
+        for(int i=0; i<this.size(); i++) {
+            result.add((TestCase)this.getElementAt(i));
+        }
+        return result;
+    }
 }
diff --git a/runtime/Java/src/org/antlr/runtime/Parser.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java
similarity index 58%
copy from runtime/Java/src/org/antlr/runtime/Parser.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java
index 1000a52..40e0f7b 100644
--- a/runtime/Java/src/org/antlr/runtime/Parser.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,41 +25,51 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.gunit.swingui.model;
 
-/** A parser for TokenStreams.  "parser grammars" result in a subclass
- *  of this.
- */
-public class Parser extends BaseRecognizer {
-    protected TokenStream input;
+public class TestCase {
 
-	public Parser(TokenStream input) {
-        setTokenStream(input);
+    private ITestCaseInput input;
+    private ITestCaseOutput output;
+    private boolean pass;
+
+    public boolean isPass() {
+        return pass;
     }
 
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
-	}
+    public void setPass(boolean value) {
+        pass = value;
+    }
 
-	/** Set the token stream and reset the parser */
-	public void setTokenStream(TokenStream input) {
-		this.input = null;
-		reset();
-		this.input = input;
-	}
+    public ITestCaseInput getInput() {
+        return this.input;
+    }
 
-    public TokenStream getTokenStream() {
-		return input;
-	}
+    public ITestCaseOutput getOutput() {
+        return this.output;
+    }
 
-	public void traceIn(String ruleName, int ruleIndex)  {
-		super.traceIn(ruleName, ruleIndex, input.LT(1));
-	}
+    public TestCase(ITestCaseInput input, ITestCaseOutput output) {
+        this.input = input;
+        this.output = output;
+    }
+
+    @Override
+    public String toString() {
+        return String.format("[%s]->[%s]", input.getScript(), output.getScript());
+    }
+
+    public void setInput(ITestCaseInput in) {
+        this.input = in;
+    }
+
+    public void setOutput(ITestCaseOutput out) {
+        this.output = out;
+    }
+
+    public static String convertPreservedChars(String input) {
+        //return input.replace("\"", "\\\"");
+        return input;
+    }
 
-	public void traceOut(String ruleName, int ruleIndex)  {
-		super.traceOut(ruleName, ruleIndex, input.LT(1));
-	}
 }
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java
similarity index 69%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java
index c31e9e2..9b36175 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,38 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+package org.antlr.gunit.swingui.model;
+
+import javax.swing.JComponent;
+import javax.swing.JLabel;
+
+/**
+ *
+ * @author scai
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public class TestCaseInputFile implements ITestCaseInput {
+
+    private String fileName;
+
+    public TestCaseInputFile(String file) {
+        this.fileName = file;
+    }
+
+    public String getLabel() {
+        return "FILE:" + fileName;
+    }
+
+    public void setScript(String script) {
+        this.fileName = script;
+    }
+
+    @Override
+    public String toString() {
+        return fileName;
+    }
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    public String getScript() {
+        return this.fileName;
     }
-}
+}
\ No newline at end of file
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java
similarity index 72%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java
index c31e9e2..34e696e 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,33 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+package org.antlr.gunit.swingui.model;
+
+
+/**
+ *
+ * @author scai
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public class TestCaseInputMultiString implements ITestCaseInput {
+
+    private String script;
+
+    public TestCaseInputMultiString(String text) {
+        this.script = text;
+    }
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    @Override
+    public String toString() {
+        return "<<" + TestCase.convertPreservedChars(script) + ">>";
     }
+
+    public void setScript(String script) {
+        this.script = script;
+    }
+
+    public String getScript() {
+        return this.script;
+    }
+
 }
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java
similarity index 72%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java
index c31e9e2..b6cf06d 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,35 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+package org.antlr.gunit.swingui.model;
+
+/**
+ *
+ * @author scai
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public class TestCaseInputString implements ITestCaseInput {
+
+    private String script;
+
+    public TestCaseInputString(String text) {
+        this.script = text;
+    }
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    @Override
+    public String toString() {
+        return '"' + TestCase.convertPreservedChars(script) + '"';
     }
+
+
+
+    public void setScript(String script) {
+        this.script = script;
+    }
+
+    public String getScript() {
+        return this.script;
+    }
+
+    
 }
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java
similarity index 72%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java
index c31e9e2..83b2b6c 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,33 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+package org.antlr.gunit.swingui.model;
+
+/**
+ *
+ * @author scai
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public class TestCaseOutputAST implements ITestCaseOutput {
+
+    private String treeString;
+
+    public TestCaseOutputAST(String script) {
+        this.treeString = script;
+    }
+
+    public void setScript(String script) {
+        this.treeString = script;
+    }
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    public String getScript() {
+        return this.treeString;
     }
+
+
+    @Override
+    public String toString() {
+        return String.format(" -> %s", treeString);
+    }
+
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java
similarity index 67%
rename from runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java
index 0aaa6e9..75aa4d2 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/DoubleLinkTree.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,30 +25,39 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
 
-/** A generic doubly-linked tree implementation with no payload.
- *  You must subclass to actually have any user data.
- *  TODO: do we really need/want this?
+package org.antlr.gunit.swingui.model;
+
+/**
+ *
+ * @author scai
  */
-public abstract class DoubleLinkTree extends BaseTree {
-	protected DoubleLinkTree parent;
-
-	public DoubleLinkTree getParent() {
-		return parent;
-	}
-
-	public void setParent(DoubleLinkTree t) {
-		parent = t;
-	}
-
-	public void addChild(BaseTree t) {
-		super.addChild(t);
-		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
-	}
-
-	public void setChild(int i, BaseTree t) {
-		super.setChild(i, t);
-		((DoubleLinkTree)t).setParent((DoubleLinkTree)this);
-	}
+public class TestCaseOutputResult implements ITestCaseOutput {
+
+    public static String OK = "OK";
+    public static String FAIL = "FAIL";
+
+    private boolean success ;
+
+    public TestCaseOutputResult(boolean result) {
+        this.success = result;
+    }
+
+    @Override
+    public String toString() {
+        return getScript();
+    }
+
+    public String getScript() {
+        return success ? OK : FAIL;
+    }
+
+    public void setScript(boolean value) {
+        this.success = value;
+    }
+
+    public void setScript(String script) {
+        success = Boolean.parseBoolean(script);
+    }
+
 }
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java
similarity index 74%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java
index c31e9e2..717c311 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,25 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
+package org.antlr.gunit.swingui.model;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
- */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public class TestCaseOutputReturn implements ITestCaseOutput {
+    private String script;
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    public TestCaseOutputReturn(String text) {
+        this.script = text;
     }
-}
+
+    @Override
+    public String toString() {
+        return String.format(" returns [%s]", script);
+    }
+
+    public void setScript(String script) {
+        this.script = script;
+    }
+
+    public String getScript() {
+        return this.script;
+    }
+}
\ No newline at end of file
diff --git a/src/org/antlr/analysis/StateCluster.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java
similarity index 73%
copy from src/org/antlr/analysis/StateCluster.java
copy to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java
index c31e9e2..b1d9742 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2009 Shaoting Cai
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,29 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
+package org.antlr.gunit.swingui.model;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+/**
+ *
+ * @author scai
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
+public class TestCaseOutputStdOut implements ITestCaseOutput {
+    private String script;
 
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
+    public TestCaseOutputStdOut(String text) {
+        this.script = text;
+    }
+
+    @Override
+    public String toString() {
+        return String.format(" -> \"%s\"", script);
+    }
+
+    public void setScript(String script) {
+        this.script = script;
+    }
+
+    public String getScript() {
+        return this.script;
     }
 }
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java
new file mode 100644
index 0000000..06e5227
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java
@@ -0,0 +1,100 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.model;
+
+import java.io.*;
+import java.util.*;
+import org.antlr.runtime.*;
+
+public class TestSuite {
+
+    protected List<Rule> rules ;
+    protected String grammarName ;
+    protected CommonTokenStream tokens;
+    protected File testSuiteFile;      
+
+    protected TestSuite(String gname, File testFile) {
+        grammarName = gname;
+        testSuiteFile = testFile;
+        rules = new ArrayList<Rule>();
+    }
+    
+    /* Get the gUnit test suite file name. */
+    public File getTestSuiteFile() {
+        return testSuiteFile;
+    }       
+
+    public void addRule(Rule currentRule) {
+        if(currentRule == null) throw new IllegalArgumentException("Null rule");
+        rules.add(currentRule);
+    }
+
+    // test rule name
+    public boolean hasRule(Rule rule) {
+        for(Rule r: rules) {
+            if(r.getName().equals(rule.getName())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public int getRuleCount() {
+        return rules.size();
+    }
+    
+    public void setRules(List<Rule> newRules) {
+        rules.clear();
+        rules.addAll(newRules);
+    }
+
+    /* GETTERS AND SETTERS */
+
+    public void setGrammarName(String name) { grammarName = name;}
+
+    public String getGrammarName() { return grammarName; }
+
+    public Rule getRule(int index) { return rules.get(index); }
+
+    public CommonTokenStream getTokens() { return tokens; }
+    
+    public void setTokens(CommonTokenStream ts) { tokens = ts; }
+
+    public Rule getRule(String name) {
+        for(Rule rule: rules) {
+            if(rule.getName().equals(name)) {
+                return rule;
+            }
+        }
+        return null;
+    }
+    
+    // only for stringtemplate use
+    public List getRulesForStringTemplate() {return rules;}
+    
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java
new file mode 100644
index 0000000..fcf5b06
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java
@@ -0,0 +1,193 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.model;
+
+import org.antlr.gunit.swingui.runner.TestSuiteAdapter;
+import java.io.*;
+import java.util.*;
+import org.antlr.gunit.swingui.Tool;
+import org.antlr.gunit.swingui.parsers.*;
+import org.antlr.runtime.*;
+import org.antlr.stringtemplate.*;
+
+public class TestSuiteFactory {
+    
+    private static String TEMPLATE_FILE = "org/antlr/gunit/swingui/gunit.stg";
+    private static StringTemplateGroup templates;
+    public static final String TEST_SUITE_EXT = ".gunit";
+    public static final String GRAMMAR_EXT = ".g";
+    
+    static  {
+        ClassLoader loader = TestSuiteFactory.class.getClassLoader();
+        InputStream in = loader.getResourceAsStream(TEMPLATE_FILE);
+        Reader rd = new InputStreamReader(in);
+        templates = new StringTemplateGroup(rd);
+    }
+    
+    /**
+     * Factory method: create a testsuite from ANTLR grammar.  Save the test 
+     * suite file in the same directory of the grammar file.
+     * @param grammarFile ANTLRv3 grammar file.
+     * @return test suite object
+     */
+    public static TestSuite createTestSuite(File grammarFile) {
+        if(grammarFile != null && grammarFile.exists() && grammarFile.isFile()) {
+            
+            final String fileName = grammarFile.getName();
+            final String grammarName = fileName.substring(0, fileName.lastIndexOf('.'));
+            final String grammarDir = grammarFile.getParent();
+            final File testFile = new File(grammarDir + File.separator + grammarName + TEST_SUITE_EXT);
+            
+            final TestSuite result = new TestSuite(grammarName, testFile);
+            result.rules = loadRulesFromGrammar(grammarFile);
+            
+            if(saveTestSuite(result)) {
+                return result;
+            } else {
+                throw new RuntimeException("Can't save test suite file.");
+            }
+        } else {
+            throw new RuntimeException("Invalid grammar file.");
+        }
+    }
+
+    
+    /* Load rules from an ANTLR grammar file. */
+    private static List<Rule> loadRulesFromGrammar(File grammarFile) {
+        
+        // get all the rule names
+        final List<String> ruleNames = new ArrayList<String>();
+        try {
+            final Reader reader = new BufferedReader(new FileReader(grammarFile));
+            final ANTLRv3Lexer lexer = new ANTLRv3Lexer(new ANTLRReaderStream(reader));
+            final CommonTokenStream tokens = new CommonTokenStream(lexer);
+            final ANTLRv3Parser parser = new ANTLRv3Parser(tokens);
+            parser.rules = ruleNames;
+            parser.grammarDef();
+            reader.close();
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+
+        // convert to rule object
+        final List<Rule> ruleList = new ArrayList<Rule>();
+        for(String str: ruleNames) {
+            ruleList.add(new Rule(str));
+        }
+
+        return ruleList;
+    }    
+
+    /* Save testsuite to *.gunit file. */
+    public static boolean saveTestSuite(TestSuite testSuite) {
+        final String data = getScript(testSuite);
+        try {
+            FileWriter fw = new FileWriter(testSuite.getTestSuiteFile());
+            fw.write(data);
+            fw.flush();
+            fw.close();    
+        } catch (IOException e) {
+            e.printStackTrace();
+            return false;
+        }
+        return true;
+    }
+    
+    /**
+     * Get the text script from the testSuite.
+     * @param testSuite
+     * @return test script
+     */
+    public static String getScript(TestSuite testSuite) {
+        if(testSuite == null) return null;
+        StringTemplate gUnitScript = templates.getInstanceOf("gUnitFile");
+        gUnitScript.setAttribute("testSuite", testSuite);
+        
+        return gUnitScript.toString();        
+    }
+    
+    /**
+     * From textual script to program model.
+     * @param file testsuite file (.gunit)
+     * @return test suite object
+     */
+    public static TestSuite loadTestSuite(File file) {
+        // check grammar file
+        final File grammarFile = getGrammarFile(file);
+        if(grammarFile == null) 
+            throw new RuntimeException("Can't find grammar file.");
+            
+        TestSuite result = new TestSuite("", file);
+        
+        // read in test suite
+        try {
+            final Reader reader = new BufferedReader(new FileReader(file));
+            final StGUnitLexer lexer = new StGUnitLexer(new ANTLRReaderStream(reader));
+            final CommonTokenStream tokens = new CommonTokenStream(lexer);
+            final StGUnitParser parser = new StGUnitParser(tokens);
+            final TestSuiteAdapter adapter = new TestSuiteAdapter(result);
+            parser.adapter = adapter;
+            parser.gUnitDef();
+            result.setTokens(tokens);
+            reader.close();            
+        } catch (Exception ex) {
+            throw new RuntimeException("Error reading test suite file.\n" + ex.getMessage());
+        }
+        
+        // load un-tested rules from grammar
+        final List<Rule> completeRuleList = loadRulesFromGrammar(grammarFile);
+        for(Rule rule: completeRuleList) {
+            if(!result.hasRule(rule)) {
+                result.addRule(rule);
+                //System.out.println("Add rule:" + rule);
+            }
+        }
+
+        return result;
+    }
+    
+    /**
+     * Get the grammar file of the testsuite file in the same directory.
+     * @param testsuiteFile
+     * @return grammar file or null
+     */
+    private static File getGrammarFile(File testsuiteFile) {
+        final String sTestFile;
+        try {
+            sTestFile = testsuiteFile.getCanonicalPath();
+        } catch (IOException e) {
+            return null;
+        }
+        final String sGrammarFile = sTestFile.substring(0, sTestFile.lastIndexOf('.')) + GRAMMAR_EXT;
+        final File fileGrammar = new File(sGrammarFile); 
+        if(fileGrammar.exists() && fileGrammar.isFile())
+            return fileGrammar;
+        else
+            return null;
+    }
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java
new file mode 100644
index 0000000..0df1048
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java
@@ -0,0 +1,73 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.runner;
+
+import org.antlr.gunit.*;
+import org.antlr.gunit.swingui.model.*;
+
+/**
+ * The gUnit test executer that will respond to the fail/pass event during the
+ * execution.  The executer is passed into gUnit Interp for execution.
+ * @author scai
+ */
+public class NotifiedTestExecuter extends gUnitExecutor {
+
+    private TestSuite testSuite ;
+
+    public NotifiedTestExecuter(GrammarInfo grammarInfo, ClassLoader loader, String testsuiteDir, TestSuite suite) {
+    	super(grammarInfo, loader, testsuiteDir);
+        
+        testSuite = suite;
+    }
+
+    @Override
+    public void onFail(ITestCase failTest) {
+        if(failTest == null) throw new IllegalArgumentException("Null fail test");
+
+        final String ruleName = failTest.getTestedRuleName();
+        if(ruleName == null) throw new NullPointerException("Null rule name");
+
+        final Rule rule = testSuite.getRule(ruleName);
+        final TestCase failCase = (TestCase) rule.getElementAt(failTest.getTestCaseIndex());
+        failCase.setPass(false);
+        //System.out.println(String.format("[FAIL] %s (%d) ", failTest.getTestedRuleName(), failTest.getTestCaseIndex()));
+    }
+
+    @Override
+    public void onPass(ITestCase passTest) {
+        if(passTest == null) throw new IllegalArgumentException("Null pass test");
+
+        final String ruleName = passTest.getTestedRuleName();
+        if(ruleName == null) throw new NullPointerException("Null rule name");
+        
+        final Rule rule = testSuite.getRule(ruleName);
+        final TestCase passCase = (TestCase) rule.getElementAt(passTest.getTestCaseIndex());
+        passCase.setPass(true);
+        //System.out.println(String.format("[PASS] %s (%d) ", passTest.getTestedRuleName(), passTest.getTestCaseIndex()));
+    }
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java
new file mode 100644
index 0000000..23f5aa0
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java
@@ -0,0 +1,124 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.runner;
+
+import java.io.*;
+import java.util.HashMap;
+
+/**
+ * Class loader for parser & lexer generated by antlr.
+ * @author Shaoting
+ */
+public class ParserLoader extends ClassLoader {
+
+    private HashMap<String, Class> classList;
+    private String grammar;
+
+    /**
+     * Create a class loader for antlr parser/lexer.
+     * @param grammarName
+     * @param classDir
+     */
+    public ParserLoader(String grammarName, String classDir) throws IOException, ClassNotFoundException {
+
+        final String lexerName = grammarName + "Lexer";
+
+        // load all the class files in the "classDir" related to the grammarName
+        File dir = new File(classDir);
+        if(dir.isDirectory()) {
+            classList = new HashMap<String, Class>();
+            grammar = grammarName;
+            File[] files = dir.listFiles(new ClassFilenameFilter(grammarName));
+            for(File f : files) {
+
+                // load class data
+                final InputStream in = new BufferedInputStream(new FileInputStream(f));
+                final byte[] classData = new byte[in.available()];
+                in.read(classData);
+                in.close();
+
+                // define class
+                final Class newClass = defineClass(null, classData, 0, classData.length);
+                assert(newClass != null);
+                resolveClass(newClass);
+
+                // save to hashtable
+                final String fileName = f.getName();
+                final String className = fileName.substring(0, fileName.lastIndexOf("."));
+                classList.put(className, newClass);
+                //System.out.println("adding: " + className);
+            }
+        } else {
+            throw new IOException(classDir + " is not a directory.");
+        }
+
+        if(classList.isEmpty() || !classList.containsKey(lexerName)) {
+            throw new ClassNotFoundException(lexerName + " not found.");
+        }
+
+    }
+
+
+
+    @Override
+    public synchronized Class loadClass(String name, boolean resolve) throws ClassNotFoundException {
+        //System.out.print("loading: " + name);
+        if(name.startsWith(grammar)) {
+            if(classList.containsKey(name)) {
+                //System.out.println(" .... found");
+                return classList.get(name);
+            } else {
+                //System.out.println(" .... not found");
+                throw new ClassNotFoundException(name);
+            }
+            
+        } else {
+            final Class c = findSystemClass(name);
+            //System.out.println(" .... system found " + c.getName());
+            return c;
+        }
+    }
+
+    /**
+     * Accepts grammarname...($...)?.class
+     */
+    protected static class ClassFilenameFilter implements FilenameFilter {
+
+        private String grammarName;
+
+        protected ClassFilenameFilter(String name) {
+            grammarName = name;
+        }
+
+        public boolean accept(File dir, String name) {
+            return name.startsWith(grammarName) && name.endsWith(".class");
+        }
+
+    }
+
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java
new file mode 100644
index 0000000..5c7d951
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java
@@ -0,0 +1,104 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.runner;
+
+import org.antlr.gunit.swingui.model.*;
+
+/**
+ * Adapter class for gunit parser to save information into testsuite object.
+ * @author Shaoting
+ */
+public class TestSuiteAdapter {
+
+    private TestSuite model ;
+    private Rule currentRule;
+
+    public TestSuiteAdapter(TestSuite testSuite) {
+        model = testSuite;
+    }
+
+    public void setGrammarName(String name) {
+        model.setGrammarName(name);
+    }
+
+    public void startRule(String name) {
+        currentRule = new Rule(name);
+    }
+
+    public void endRule() {
+        model.addRule(currentRule);
+        currentRule = null;
+    }
+
+    public void addTestCase(ITestCaseInput in, ITestCaseOutput out) {
+        TestCase testCase = new TestCase(in, out);
+        currentRule.addTestCase(testCase);
+    }
+
+    private static String trimChars(String text, int numOfChars) {
+        return text.substring(numOfChars, text.length() - numOfChars);
+    }
+
+    public static ITestCaseInput createFileInput(String fileName) {
+        if(fileName == null) throw new IllegalArgumentException("null");
+        return new TestCaseInputFile(fileName);
+    }
+
+    public static ITestCaseInput createStringInput(String line) {
+        if(line == null) throw new IllegalArgumentException("null");
+        // trim double quotes
+        return new TestCaseInputString(trimChars(line, 1));
+    }
+
+    public static ITestCaseInput createMultiInput(String text) {
+        if(text == null) throw new IllegalArgumentException("null");
+        // trim << and >>
+        return new TestCaseInputMultiString(trimChars(text, 2));
+    }
+
+    public static ITestCaseOutput createBoolOutput(boolean bool) {
+        return new TestCaseOutputResult(bool);
+    }
+
+    public static ITestCaseOutput createAstOutput(String ast) {
+        if(ast == null) throw new IllegalArgumentException("null");
+        return new TestCaseOutputAST(ast);
+    }
+
+    public static ITestCaseOutput createStdOutput(String text) {
+        if(text == null) throw new IllegalArgumentException("null");
+        // trim double quotes
+        return new TestCaseOutputStdOut(trimChars(text, 1));
+    }
+
+    public static ITestCaseOutput createReturnOutput(String text) {
+        if(text == null) throw new IllegalArgumentException("null");
+        // trim square brackets
+        return new TestCaseOutputReturn(trimChars(text, 1));
+    }    
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java
new file mode 100644
index 0000000..dad626d
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java
@@ -0,0 +1,80 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.gunit.swingui.runner;
+
+import java.io.File;
+import java.io.IOException;
+import org.antlr.runtime.*;
+import org.antlr.runtime.CharStream;
+import org.antlr.gunit.*;
+import org.antlr.gunit.swingui.model.TestSuite;
+
+/**
+ * Adapter between gUnitEditor Swing GUI and gUnit command-line tool.
+ * @author scai
+ */
+public class gUnitAdapter {
+
+    private ParserLoader loader ;
+    private TestSuite testSuite;
+
+    public gUnitAdapter(TestSuite suite) throws IOException, ClassNotFoundException {
+        int i = 3;
+        loader = new ParserLoader(suite.getGrammarName(), 
+                                  suite.getTestSuiteFile().getParent());
+        testSuite = suite;
+    }
+
+    public void run() {
+        if (testSuite == null)
+            throw new IllegalArgumentException("Null testsuite.");
+        
+        
+        try {
+
+            // Parse gUnit test suite file
+            final CharStream input = new ANTLRFileStream(testSuite.getTestSuiteFile().getCanonicalPath());
+            final gUnitLexer lexer = new gUnitLexer(input);
+            final CommonTokenStream tokens = new CommonTokenStream(lexer);
+            final GrammarInfo grammarInfo = new GrammarInfo();
+            final gUnitParser parser = new gUnitParser(tokens, grammarInfo);
+            parser.gUnitDef();	// parse gunit script and save elements to grammarInfo
+
+            // Execute test suite
+            final gUnitExecutor executer = new NotifiedTestExecuter(
+                    grammarInfo, loader, 
+                    testSuite.getTestSuiteFile().getParent(), testSuite);
+            executer.execTest();
+            
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+
+}
diff --git a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg b/gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg
similarity index 69%
copy from src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
copy to gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg
index 5dc1610..40aec57 100644
--- a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
+++ b/gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg
@@ -1,44 +1,49 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : C# by code generator.
- */
-group ASTDbg;
-
-parserMembers() ::= <<
-protected ITreeAdaptor adaptor = new DebugTreeAdaptor(dbg, new CommonTreeAdaptor());
-public ITreeAdaptor TreeAdaptor
-{
-	get { return this.adaptor; }
-	set { this.adaptor = new DebugTreeAdaptor(dbg, value); }
-}<\n>
->>
-
- at rewriteElement.pregen() ::= "dbg.Location(<e.line>,<e.pos>);"
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon, Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group gUnitTestResult;
+
+testResult(title, num_of_test, num_of_failure, failure, has_invalid, num_of_invalid, invalid) ::= <<
+-----------------------------------------------------------------------
+<title> with <num_of_test> tests
+-----------------------------------------------------------------------
+<num_of_failure> failures found:
+<failure:{<it.header>
+expected: <it.expectedResult>
+actual: <it.actualResult>
+
+}>
+<if(has_invalid)>
+<num_of_invalid> invalid inputs found:
+<invalid:{<it.header>
+invalid input: <it.actual>
+}>
+<endif>
+
+Tests run: <num_of_test>, Failures: <num_of_failure>
+
+>>
diff --git a/gunit/src/main/resources/org/antlr/gunit/junit.stg b/gunit/src/main/resources/org/antlr/gunit/junit.stg
new file mode 100644
index 0000000..d2c162a
--- /dev/null
+++ b/gunit/src/main/resources/org/antlr/gunit/junit.stg
@@ -0,0 +1,83 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon, Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group junit;
+
+classHeader(header,junitFileName,hasPackage,packagePath,lexerPath,parserPath,treeParserPath,isTreeGrammar) ::= <<
+<header>
+
+import org.antlr.gunit.gUnitBaseTest;
+
+public class <junitFileName> extends gUnitBaseTest {
+	
+	public void setUp() {
+		<if(hasPackage)><\t><\t>this.packagePath = "<packagePath>";<endif>
+		this.lexerPath = "<lexerPath>";
+		this.parserPath = "<parserPath>";
+		<if(isTreeGrammar)><\t><\t>this.treeParserPath = "<treeParserPath>";<endif>
+	}<\n><\n>
+>>
+
+testTreeRuleMethod(methodName,testTreeRuleName,testRuleName,testInput,isFile,tokenType,expecting) ::= <<
+	public void <methodName>() throws Exception {
+		// test input: <testInput>
+		Object retval = execTreeParser(<testTreeRuleName>, <testRuleName>, <testInput>, <isFile>);
+		Object actual = examineExecResult(<tokenType>, retval);
+		Object expecting = <expecting>;
+		
+		assertEquals("testing rule "+<testTreeRuleName>, expecting, actual);
+	}<\n><\n>
+>>
+
+testTreeRuleMethod2(methodName,testTreeRuleName,testRuleName,testInput,isFile,returnType,expecting) ::= <<
+	public void <methodName>() throws Exception {
+		// test input: <testInput>
+		<returnType> retval = (<returnType>)execTreeParser(<testTreeRuleName>, <testRuleName>, <testInput>, <isFile>);
+		
+		assertTrue("testing rule "+<testTreeRuleName>, <expecting>);
+	}<\n><\n>
+>>
+
+testRuleMethod(isLexicalRule,methodName,testRuleName,testInput,isFile,tokenType,expecting) ::= <<
+	public void <methodName>() throws Exception {
+		// test input: <testInput>
+		Object retval = <if(isLexicalRule)>execLexer<else>execParser<endif>(<testRuleName>, <testInput>, <isFile>);
+		Object actual = examineExecResult(<tokenType>, retval);
+		Object expecting = <expecting>;
+
+		assertEquals("testing rule "+<testRuleName>, expecting, actual);
+	}<\n><\n>
+>>
+
+testRuleMethod2(methodName,testRuleName,testInput,isFile,returnType,expecting) ::= <<
+	public void <methodName>() throws Exception {
+		// test input: <testInput>
+		<returnType> retval = (<returnType>)execParser(<testRuleName>, <testInput>, <isFile>);
+
+		assertTrue("testing rule "+<testRuleName>, <expecting>);
+	}<\n><\n>
+>>
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg b/gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg
new file mode 100644
index 0000000..c17178c
--- /dev/null
+++ b/gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg
@@ -0,0 +1,19 @@
+group gunit;
+
+gUnitFile(testSuite) ::= <<gunit <testSuite.grammarName>;
+
+<testSuite.rulesForStringTemplate:testGroup()>
+>>
+
+testGroup() ::= <<
+<if(it.notEmpty)>
+
+//------------------- <it.name>
+<it.name>:
+
+<it.testCases: testCase(); separator="\n\n">
+
+<endif>
+>>
+
+testCase() ::= "<it.input> <it.output>"
\ No newline at end of file
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png
new file mode 100644
index 0000000..e7d91e5
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png
new file mode 100644
index 0000000..b6f090b
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png
new file mode 100644
index 0000000..408b6b4
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png
new file mode 100644
index 0000000..bf852ba
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png
new file mode 100644
index 0000000..2e82c2e
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png
new file mode 100644
index 0000000..3c252de
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png
new file mode 100644
index 0000000..05f7f1f
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png
new file mode 100644
index 0000000..2e13243
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png
new file mode 100644
index 0000000..e8a2d99
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png
new file mode 100644
index 0000000..1a46470
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png
new file mode 100644
index 0000000..179998b
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png
new file mode 100644
index 0000000..fcdfb43
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png
new file mode 100644
index 0000000..1258622
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png
new file mode 100644
index 0000000..8eea6a9
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png
new file mode 100644
index 0000000..2683e98
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png
new file mode 100644
index 0000000..186f17f
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png
new file mode 100644
index 0000000..34c3893
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png
new file mode 100644
index 0000000..bfe8802
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png
new file mode 100644
index 0000000..65aaaa7
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png
new file mode 100644
index 0000000..1d63fa4
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png
new file mode 100644
index 0000000..a85fe14
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png
new file mode 100644
index 0000000..c37328e
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png
new file mode 100644
index 0000000..a8a67db
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png
new file mode 100644
index 0000000..3f0c5e5
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png differ
diff --git a/gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png
new file mode 100644
index 0000000..c595f9b
Binary files /dev/null and b/gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png differ
diff --git a/gunit/src/test/java/org/antlr/gunit/GunitTest.java b/gunit/src/test/java/org/antlr/gunit/GunitTest.java
new file mode 100644
index 0000000..3df86d0
--- /dev/null
+++ b/gunit/src/test/java/org/antlr/gunit/GunitTest.java
@@ -0,0 +1,38 @@
+package org.antlr.gunit;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+/**
+ * Unit test for gUnit itself....
+ */
+public class GunitTest
+    extends TestCase
+{
+    /**
+     * Create the test case
+     *
+     * @param testName name of the test case
+     */
+    public GunitTest( String testName )
+    {
+        super( testName );
+    }
+
+    /**
+     * @return the suite of tests being tested
+     */
+    public static Test suite()
+    {
+        return new TestSuite( GunitTest.class );
+    }
+
+    /**
+     * Rigourous Test :-)
+     */
+    public void testGunitTest()
+    {
+        assertTrue( true );
+    }
+}
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..c586de7
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,250 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr-master</artifactId>
+    <packaging>pom</packaging>
+    <version>3.2</version>
+    <name>ANTLR Master build control POM</name>
+    <url>http://maven.apache.org</url>
+
+  <!--
+    What version of ANTLR are we building? This sets the 
+    the version number for all other things that are built
+    as part of an ANTLR release, unless they override or
+    ignore it. We do this via a properites file for this
+    pom.
+    -->
+
+  <!--
+     This is the master pom for building the ANTLR
+     toolset and runtime (Java) at the specific level
+     defined above. Hence we specify here the modules that
+     this pom will build when we build this pom
+    -->
+
+    <modules>
+
+        <module>runtime/Java</module>
+        <module>tool</module>
+        <module>antlr3-maven-plugin</module>
+        <module>gunit</module>
+        <module>gunit-maven-plugin</module>
+    </modules>
+  <!--
+
+    Define where the ANTLR releated jars are deployed both for
+    the main ANTLR repository, which syncs with the maven main
+    repository, and the snapshot repository, which can be
+    used by developers that need the latest development version of
+    something, but is used here to show maven where to deploy
+    snapshots and releases.
+    -->
+    <distributionManagement>
+
+        <repository>
+            <id>antlr-repo</id>
+            <name>ANTLR Testing repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-repo</url>
+        </repository>
+      
+        <snapshotRepository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-snapshot</url>
+        </snapshotRepository>
+
+    </distributionManagement>
+  
+  <!--
+  
+    Inform Maven of the ANTLR snapshot repository, which it will
+    need to consult to get the latest snapshot build of the runtime
+    if it was not built and installed locally.
+    -->
+    <repositories>
+
+      <!--
+        This is the ANTLR repository.
+        -->
+        <repository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>http://antlr.org/antlr-snapshot</url>
+            <snapshots>
+                <enabled>true</enabled>
+                <updatePolicy>always</updatePolicy>
+            </snapshots>
+        </repository>
+      
+    </repositories>
+
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build, run and test the ANTLR jars.
+    This is the master pom, and so it only contains those
+    dependencies that are common to all the modules below
+    or are just included for test
+    -->
+    <dependencyManagement>
+
+        <dependencies>
+
+            <dependency>
+                <groupId>junit</groupId>
+                <artifactId>junit</artifactId>
+                <version>4.5</version>
+                <scope>test</scope>
+            </dependency>
+
+            <dependency>
+                <groupId>antlr</groupId>
+                <artifactId>antlr</artifactId>
+                <version>2.7.7</version>
+                <scope>compile</scope>
+            </dependency>
+
+            <dependency>
+                <groupId>org.antlr</groupId>
+                <artifactId>stringtemplate</artifactId>
+                <version>3.2.1</version>
+                <scope>compile</scope>
+            </dependency>
+
+        </dependencies>
+        
+    </dependencyManagement>
+
+    <build>
+      
+        <defaultGoal>install</defaultGoal>
+
+        <!--
+            The following filter definition means that both the master
+            project and the sub projects will read in a file in the same
+            directory as the pom.xml is located and set any properties
+            that are defined there in the standard x=y format. These
+            properties can then be referenced via ${x} in any resource
+            file specified in any pom. So, there is a master antlr.config
+            file in the same location as this pom.xml file and here you can
+            define anything that is relevant to all the modules that we
+            build here. However each module also has an antlr.config file
+            where you can override property values from the master file or
+            define things that are only relevant to that module. 
+          -->
+        <filters>
+            <filter>antlr.config</filter>
+        </filters>
+
+        <resources>
+            <resource>
+                <directory>src/main/resources</directory>
+                <filtering>true</filtering>
+            </resource>
+        </resources>
+        
+        <plugins>
+
+             <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>buildnumber-maven-plugin</artifactId>
+                <configuration>
+                  <format>{0,date,MMM dd, yyyy} {0,time,kk:mm:ss}</format>
+                  <items>
+                    <item>timestamp</item>
+                  </items>
+                </configuration>
+                <executions>
+                  <execution>
+                    <phase>validate</phase>
+                    <goals>
+                      <goal>create</goal>
+                    </goals>
+                  </execution>
+                </executions>
+             </plugin>
+
+            <plugin>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <configuration>
+                    <source>1.5</source>
+                    <target>jsr14</target>
+                    <sourceDirectory>src</sourceDirectory>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <artifactId>maven-surefire-plugin</artifactId>
+            </plugin>
+
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>findbugs-maven-plugin</artifactId>
+                <configuration>
+                    <findbugsXmlOutput>true</findbugsXmlOutput>
+                    <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
+                    <xmlOutput>true</xmlOutput>
+                </configuration>
+            </plugin>
+
+            <plugin>
+
+                <!--
+
+                    Build an uber-jar for the ANTLR Tool that is packaged with all the other dependencies,
+                    such as the antlr-runtime and stringtemplate etc. This will be useful
+                    for developers, who then do not need to download anything else or
+                    remember that they need stringtemplate.jar in their CLASSPATH and so
+                    on.
+
+                    This does not preclude any of the module generated jars from
+                    being used on their own of course.
+
+                    Here, we also build a master source jar as I was unable to pursuade
+                    this plugin to use multiple configurations and not have the thing
+                    screw up because of multiple modules :-(
+                    
+                  -->
+
+                <artifactId>maven-assembly-plugin</artifactId>
+
+                <!-- 
+                    Do not make the child modules build an assembly
+                  -->
+                <inherited>false</inherited>
+           
+                <configuration>
+                    <descriptors>
+                        <descriptor>antlrjar.xml</descriptor>
+                        <descriptor>antlrsources.xml</descriptor>
+                    </descriptors>
+                        <!--
+
+                            Specify that we want the resulting jar to be executable
+                            via java -jar, which we do by modifying the manifest
+                            of course.
+                          -->
+                    <archive>
+                        <manifest>
+                            <mainClass>org.antlr.Tool</mainClass>
+                        </manifest>
+                    </archive>
+                </configuration>
+
+
+
+            </plugin>
+
+        </plugins>
+
+        <extensions>
+            <extension>
+                <groupId>org.apache.maven.wagon</groupId>
+                <artifactId>wagon-ssh-external</artifactId>
+                <version>1.0-beta-2</version>
+            </extension>
+        </extensions>
+
+    </build>
+</project>
diff --git a/runtime/Java/antlr.config b/runtime/Java/antlr.config
new file mode 100644
index 0000000..e69de29
diff --git a/runtime/Java/doxyfile b/runtime/Java/doxyfile
index 7237472..c539907 100644
--- a/runtime/Java/doxyfile
+++ b/runtime/Java/doxyfile
@@ -5,7 +5,7 @@
 #---------------------------------------------------------------------------
 DOXYFILE_ENCODING      = UTF-8
 PROJECT_NAME           = "ANTLR API"
-PROJECT_NUMBER         = 3.0
+PROJECT_NUMBER         = 3.1.2
 OUTPUT_DIRECTORY       = api
 CREATE_SUBDIRS         = NO
 OUTPUT_LANGUAGE        = English
diff --git a/runtime/Java/pom.xml b/runtime/Java/pom.xml
new file mode 100644
index 0000000..60e7c9f
--- /dev/null
+++ b/runtime/Java/pom.xml
@@ -0,0 +1,119 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.antlr</groupId>
+  <artifactId>antlr-runtime</artifactId>
+  <packaging>jar</packaging>
+  
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+  <parent>
+      <groupId>org.antlr</groupId>
+      <artifactId>antlr-master</artifactId>
+      <version>3.2</version>
+  </parent>
+  
+  <name>Antlr 3 Runtime</name>
+  <description>A framework for constructing recognizers, compilers, and translators from grammatical descriptions containing Java, C#, C++, or Python actions.</description>
+  <url>http://www.antlr.org</url>
+      <developers>
+        <developer>
+            <name>Terence Parr</name>
+            <organization>USFCA</organization>
+            <organizationUrl>http://www.cs.usfca.edu</organizationUrl>
+            <email>parrt at antlr.org</email>
+            <roles>
+                <role>Project Leader</role>
+                <role>Developer - Java Target</role>
+            </roles>
+            <timezone>PST</timezone>
+        </developer>
+        <developer>
+            <name>Jim Idle</name>
+            <organization>Temporal Wave LLC</organization>
+            <organizationUrl>http://www.temporal-wave.com</organizationUrl>
+            <email>jimi at temporal-wave.com</email>
+            <roles>
+                <role>Developer - Maven stuff</role>
+                <role>Developer - C Target</role>
+            </roles>
+            <timezone>PST</timezone>
+        </developer>
+    </developers>
+    
+  <scm>
+    <url>http://fisheye2.cenqua.com/browse/antlr</url>
+    <connection>http://fisheye2.cenqua.com/browse/antlr</connection>
+  </scm>
+  
+  <!--
+    Definition of the ANTLR repositories. Note that you can only deploy
+    to the repositories via scp, and so the server must already know about
+    your public key. ONly ANTLR developers are allowed to deploy to the 
+    release and snapshot repositories, which are synced with the Maven central
+    repository.
+  -->
+  <distributionManagement>
+
+      <repository>
+          <id>antlr-repo</id>
+          <name>ANTLR Testing repository</name>
+          <url>scpexe://antlr.org/home/mavensync/antlr-repo</url>
+      </repository>
+      
+      <snapshotRepository>
+            <id>antlr-snapshot</id>
+            <name>ANTLR Testing Snapshot Repository</name>
+            <url>scpexe://antlr.org/home/mavensync/antlr-snapshot</url>
+      </snapshotRepository>
+
+  </distributionManagement>
+
+  <dependencies>
+    
+    <dependency>
+      <groupId>org.antlr</groupId>
+      <artifactId>stringtemplate</artifactId>
+      <version>3.2</version>
+      <scope>compile</scope>
+    </dependency>
+    
+  </dependencies>
+  
+<build>
+    <defaultGoal>install</defaultGoal>
+    <extensions>
+        <extension>
+            <groupId>org.apache.maven.wagon</groupId>
+            <artifactId>wagon-ssh-external</artifactId>
+            <version>1.0-beta-2</version>
+        </extension>
+    </extensions>
+    <plugins>
+        <plugin>
+            <artifactId>maven-compiler-plugin</artifactId>
+            <version>2.0.2</version>
+            <configuration>
+                <source>1.5</source>
+                <target>jsr14</target>
+            </configuration>
+        </plugin>
+
+        <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>findbugs-maven-plugin</artifactId>
+            <configuration>
+                <findbugsXmlOutput>true</findbugsXmlOutput>
+                <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
+                <xmlOutput>true</xmlOutput>
+            </configuration>
+        </plugin>
+    </plugins>
+</build>
+
+</project>
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
similarity index 98%
copy from runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java
copy to runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
index c06c511..155f248 100644
--- a/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java
similarity index 59%
rename from runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java
index c06c511..d2f5325 100644
--- a/runtime/Java/src/org/antlr/runtime/ANTLRFileStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,50 +29,42 @@ package org.antlr.runtime;
 
 import java.io.*;
 
-/** This is a char buffer stream that is loaded from a file
- *  all at once when you construct the object.  This looks very
- *  much like an ANTLReader or ANTLRInputStream, but it's a special case
- *  since we know the exact size of the object to load.  We can avoid lots
- *  of data copying. 
- */
-public class ANTLRFileStream extends ANTLRStringStream {
-	protected String fileName;
+/** A kind of ReaderStream that pulls from an InputStream.
+ *  Useful for reading from stdin and specifying file encodings etc...
+  */
+public class ANTLRInputStream extends ANTLRReaderStream {
+	public ANTLRInputStream() {
+	}
+
+	public ANTLRInputStream(InputStream input) throws IOException {
+		this(input, null);
+	}
+
+	public ANTLRInputStream(InputStream input, int size) throws IOException {
+		this(input, size, null);
+	}
 
-	public ANTLRFileStream(String fileName) throws IOException {
-		this(fileName, null);
+	public ANTLRInputStream(InputStream input, String encoding) throws IOException {
+		this(input, INITIAL_BUFFER_SIZE, encoding);
 	}
 
-	public ANTLRFileStream(String fileName, String encoding) throws IOException {
-		this.fileName = fileName;
-		load(fileName, encoding);
+	public ANTLRInputStream(InputStream input, int size, String encoding) throws IOException {
+		this(input, size, READ_BUFFER_SIZE, encoding);
 	}
 
-	public void load(String fileName, String encoding)
+	public ANTLRInputStream(InputStream input,
+							int size,
+							int readBufferSize,
+							String encoding)
 		throws IOException
 	{
-		if ( fileName==null ) {
-			return;
-		}
-		File f = new File(fileName);
-		int size = (int)f.length();
 		InputStreamReader isr;
-		FileInputStream fis = new FileInputStream(fileName);
 		if ( encoding!=null ) {
-			isr = new InputStreamReader(fis, encoding);
+			isr = new InputStreamReader(input, encoding);
 		}
 		else {
-			isr = new InputStreamReader(fis);
-		}
-		try {
-			data = new char[size];
-			super.n = isr.read(data);
-		}
-		finally {
-			isr.close();
+			isr = new InputStreamReader(input);
 		}
-	}
-
-	public String getSourceName() {
-		return fileName;
+		load(isr, size, readBufferSize);
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRReaderStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
similarity index 55%
rename from runtime/Java/src/org/antlr/runtime/ANTLRReaderStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
index d9aa85b..d53ebd6 100644
--- a/runtime/Java/src/org/antlr/runtime/ANTLRReaderStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime;
 
 import java.io.*;
@@ -36,7 +63,7 @@ public class ANTLRReaderStream extends ANTLRStringStream {
 			size = INITIAL_BUFFER_SIZE;
 		}
 		if ( readChunkSize<=0 ) {
-			size = READ_BUFFER_SIZE;
+			readChunkSize = READ_BUFFER_SIZE;
 		}
 		// System.out.println("load "+size+" in chunks of "+readChunkSize);
 		try {
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRStringStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
similarity index 97%
rename from runtime/Java/src/org/antlr/runtime/ANTLRStringStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
index 2d9fd62..eb4df2e 100644
--- a/runtime/Java/src/org/antlr/runtime/ANTLRStringStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,9 @@ public class ANTLRStringStream implements CharStream {
 	/** Track the last mark() call result value for use in rewind(). */
 	protected int lastMarker;
 
+	/** What is name or source of this char stream? */
+	public String name;
+
 	public ANTLRStringStream() {
 	}
 
@@ -218,4 +221,8 @@ public class ANTLRStringStream implements CharStream {
 	public void setCharPositionInLine(int pos) {
 		this.charPositionInLine = pos;
 	}
+
+	public String getSourceName() {
+		return name;
+	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/BaseRecognizer.java b/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
similarity index 64%
rename from runtime/Java/src/org/antlr/runtime/BaseRecognizer.java
rename to runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
index 3d98b03..fa71917 100644
--- a/runtime/Java/src/org/antlr/runtime/BaseRecognizer.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime;
 
 import java.util.ArrayList;
@@ -15,108 +42,120 @@ public abstract class BaseRecognizer {
 	public static final int MEMO_RULE_UNKNOWN = -1;
 	public static final int INITIAL_FOLLOW_STACK_SIZE = 100;
 
-	public static final Integer MEMO_RULE_FAILED_I = new Integer(MEMO_RULE_FAILED);
-
 	// copies from Token object for convenience in actions
 	public static final int DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
 	public static final int HIDDEN = Token.HIDDEN_CHANNEL;
 
 	public static final String NEXT_TOKEN_RULE_NAME = "nextToken";
 
-	/** Track the set of token types that can follow any rule invocation.
-	 *  Stack grows upwards.  When it hits the max, it grows 2x in size
-	 *  and keeps going.
-	 */
-	protected BitSet[] following = new BitSet[INITIAL_FOLLOW_STACK_SIZE];
-	protected int _fsp = -1;
-
-	/** This is true when we see an error and before having successfully
-	 *  matched a token.  Prevents generation of more than one error message
-	 *  per error.
-	 */
-	protected boolean errorRecovery = false;
-
-	/** The index into the input stream where the last error occurred.
-	 * 	This is used to prevent infinite loops where an error is found
-	 *  but no token is consumed during recovery...another error is found,
-	 *  ad naseum.  This is a failsafe mechanism to guarantee that at least
-	 *  one token/tree node is consumed for two errors.
-	 */
-	protected int lastErrorIndex = -1;
-
-	/** In lieu of a return value, this indicates that a rule or token
-	 *  has failed to match.  Reset to false upon valid token match.
+	/** State of a lexer, parser, or tree parser are collected into a state
+	 *  object so the state can be shared.  This sharing is needed to
+	 *  have one grammar import others and share same error variables
+	 *  and other state variables.  It's a kind of explicit multiple
+	 *  inheritance via delegation of methods and shared state.
 	 */
-	protected boolean failed = false;
+	protected RecognizerSharedState state;
 
-	/** If 0, no backtracking is going on.  Safe to exec actions etc...
-	 *  If >0 then it's the level of backtracking.
-	 */
-	protected int backtracking = 0;
+	public BaseRecognizer() {
+		state = new RecognizerSharedState();
+	}
 
-	/** An array[size num rules] of Map<Integer,Integer> that tracks
-	 *  the stop token index for each rule.  ruleMemo[ruleIndex] is
-	 *  the memoization table for ruleIndex.  For key ruleStartIndex, you
-	 *  get back the stop token for associated rule or MEMO_RULE_FAILED.
-	 *
-	 *  This is only used if rule memoization is on (which it is by default).
-	 */
-	protected Map[] ruleMemo;
+	public BaseRecognizer(RecognizerSharedState state) {
+		if ( state==null ) {
+			state = new RecognizerSharedState();
+		}
+		this.state = state;
+	}
 
 	/** reset the parser's state; subclasses must rewinds the input stream */
 	public void reset() {
 		// wack everything related to error recovery
-		_fsp = -1;
-		errorRecovery = false;
-		lastErrorIndex = -1;
-		failed = false;
+		if ( state==null ) {
+			return; // no shared state work to do
+		}
+		state._fsp = -1;
+		state.errorRecovery = false;
+		state.lastErrorIndex = -1;
+		state.failed = false;
+		state.syntaxErrors = 0;
 		// wack everything related to backtracking and memoization
-		backtracking = 0;
-		for (int i = 0; ruleMemo!=null && i < ruleMemo.length; i++) { // wipe cache
-			ruleMemo[i] = null;
+		state.backtracking = 0;
+		for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) { // wipe cache
+			state.ruleMemo[i] = null;
 		}
 	}
 
-	/** Match current input symbol against ttype.  Upon error, do one token
-	 *  insertion or deletion if possible.  You can override to not recover
-	 *  here and bail out of the current production to the normal error
-	 *  exception catch (at the end of the method) by just throwing
-	 *  MismatchedTokenException upon input.LA(1)!=ttype.
+
+	/** Match current input symbol against ttype.  Attempt
+	 *  single token insertion or deletion error recovery.  If
+	 *  that fails, throw MismatchedTokenException.
+	 *
+	 *  To turn off single token insertion or deletion error
+	 *  recovery, override recoverFromMismatchedToken() and have it
+     *  throw an exception. See TreeParser.recoverFromMismatchedToken().
+     *  This way any error in a rule will cause an exception and
+     *  immediate exit from rule.  Rule would recover by resynchronizing
+     *  to the set of symbols that can follow rule ref.
 	 */
-	public void match(IntStream input, int ttype, BitSet follow)
+	public Object match(IntStream input, int ttype, BitSet follow)
 		throws RecognitionException
 	{
+		//System.out.println("match "+((TokenStream)input).LT(1));
+		Object matchedSymbol = getCurrentInputSymbol(input);
 		if ( input.LA(1)==ttype ) {
 			input.consume();
-			errorRecovery = false;
-			failed = false;
-			return;
+			state.errorRecovery = false;
+			state.failed = false;
+			return matchedSymbol;
 		}
-		if ( backtracking>0 ) {
-			failed = true;
-			return;
+		if ( state.backtracking>0 ) {
+			state.failed = true;
+			return matchedSymbol;
 		}
-		mismatch(input, ttype, follow);
-		return;
+		matchedSymbol = recoverFromMismatchedToken(input, ttype, follow);
+		return matchedSymbol;
 	}
 
+	/** Match the wildcard: in a symbol */
 	public void matchAny(IntStream input) {
-		errorRecovery = false;
-		failed = false;
+		state.errorRecovery = false;
+		state.failed = false;
 		input.consume();
 	}
 
-	/** factor out what to do upon token mismatch so tree parsers can behave
-	 *  differently.  Override this method in your parser to do things
-	 *  like bailing out after the first error; just throw the mte object
-	 *  instead of calling the recovery method.
-	 */
-	protected void mismatch(IntStream input, int ttype, BitSet follow)
-		throws RecognitionException
-	{
-		MismatchedTokenException mte =
-			new MismatchedTokenException(ttype, input);
-		recoverFromMismatchedToken(input, mte, ttype, follow);
+	public boolean mismatchIsUnwantedToken(IntStream input, int ttype) {
+		return input.LA(2)==ttype;
+	}
+
+	public boolean mismatchIsMissingToken(IntStream input, BitSet follow) {
+		if ( follow==null ) {
+			// we have no information about the follow; we can only consume
+			// a single token and hope for the best
+			return false;
+		}
+		// compute what can follow this grammar element reference
+		if ( follow.member(Token.EOR_TOKEN_TYPE) ) {
+			BitSet viableTokensFollowingThisRule = computeContextSensitiveRuleFOLLOW();
+			follow = follow.or(viableTokensFollowingThisRule);
+            if ( state._fsp>=0 ) { // remove EOR if we're not the start symbol
+                follow.remove(Token.EOR_TOKEN_TYPE);
+            }
+		}
+		// if current token is consistent with what could come after set
+		// then we know we're missing a token; error recovery is free to
+		// "insert" the missing token
+
+		//System.out.println("viable tokens="+follow.toString(getTokenNames()));
+		//System.out.println("LT(1)="+((TokenStream)input).LT(1));
+
+		// BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
+		// in follow set to indicate that the fall of the start symbol is
+		// in the set (EOF can follow).
+		if ( follow.member(input.LA(1)) || follow.member(Token.EOR_TOKEN_TYPE) ) {
+			//System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
+			return true;
+		}
+		return false;
 	}
 
 	/** Report a recognition problem.
@@ -131,15 +170,18 @@ public abstract class BaseRecognizer {
 	 * 		3. consume until token found in resynch set
 	 * 		4. try to resume parsing
 	 * 		5. next match() will reset errorRecovery mode
+	 *
+	 *  If you override, make sure to update syntaxErrors if you care about that.
 	 */
 	public void reportError(RecognitionException e) {
 		// if we've already reported an error and have not matched a token
 		// yet successfully, don't report any errors.
-		if ( errorRecovery ) {
+		if ( state.errorRecovery ) {
 			//System.err.print("[SPURIOUS] ");
 			return;
 		}
-		errorRecovery = true;
+		state.syntaxErrors++; // don't count spurious
+		state.errorRecovery = true;
 
 		displayRecognitionError(this.getTokenNames(), e);
 	}
@@ -175,8 +217,31 @@ public abstract class BaseRecognizer {
 	 *  exception types.
 	 */
 	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
-		String msg = null;
-		if ( e instanceof MismatchedTokenException ) {
+		String msg = e.getMessage();
+		if ( e instanceof UnwantedTokenException ) {
+			UnwantedTokenException ute = (UnwantedTokenException)e;
+			String tokenName="<unknown>";
+			if ( ute.expecting== Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[ute.expecting];
+			}
+			msg = "extraneous input "+getTokenErrorDisplay(ute.getUnexpectedToken())+
+				" expecting "+tokenName;
+		}
+		else if ( e instanceof MissingTokenException ) {
+			MissingTokenException mte = (MissingTokenException)e;
+			String tokenName="<unknown>";
+			if ( mte.expecting== Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[mte.expecting];
+			}
+			msg = "missing "+tokenName+" at "+getTokenErrorDisplay(e.token);
+		}
+		else if ( e instanceof MismatchedTokenException ) {
 			MismatchedTokenException mte = (MismatchedTokenException)e;
 			String tokenName="<unknown>";
 			if ( mte.expecting== Token.EOF ) {
@@ -201,14 +266,14 @@ public abstract class BaseRecognizer {
 				" expecting "+tokenName;
 		}
 		else if ( e instanceof NoViableAltException ) {
-			NoViableAltException nvae = (NoViableAltException)e;
+			//NoViableAltException nvae = (NoViableAltException)e;
 			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
 			// and "(decision="+nvae.decisionNumber+") and
 			// "state "+nvae.stateNumber
 			msg = "no viable alternative at input "+getTokenErrorDisplay(e.token);
 		}
 		else if ( e instanceof EarlyExitException ) {
-			EarlyExitException eee = (EarlyExitException)e;
+			//EarlyExitException eee = (EarlyExitException)e;
 			// for development, can add "(decision="+eee.decisionNumber+")"
 			msg = "required (...)+ loop did not match anything at input "+
 				getTokenErrorDisplay(e.token);
@@ -231,6 +296,17 @@ public abstract class BaseRecognizer {
 		return msg;
 	}
 
+	/** Get number of recognition errors (lexer, parser, tree parser).  Each
+	 *  recognizer tracks its own number.  So parser and lexer each have
+	 *  separate count.  Does not count the spurious errors found between
+	 *  an error and next valid token match
+	 *
+	 *  See also reportError()
+	 */
+	public int getNumberOfSyntaxErrors() {
+		return state.syntaxErrors;
+	}
+
 	/** What is the error header, normally line/character position information? */
 	public String getErrorHeader(RecognitionException e) {
 		return "line "+e.line+":"+e.charPositionInLine;
@@ -265,19 +341,21 @@ public abstract class BaseRecognizer {
 		System.err.println(msg);
 	}
 
-	/** Recover from an error found on the input stream.  Mostly this is
-	 *  NoViableAlt exceptions, but could be a mismatched token that
-	 *  the match() routine could not recover from.
+	/** Recover from an error found on the input stream.  This is
+	 *  for NoViableAlt and mismatched symbol exceptions.  If you enable
+	 *  single token insertion and deletion, this will usually not
+	 *  handle mismatched symbol exceptions but there could be a mismatched
+	 *  token that the match() routine could not recover from.
 	 */
 	public void recover(IntStream input, RecognitionException re) {
-		if ( lastErrorIndex==input.index() ) {
+		if ( state.lastErrorIndex==input.index() ) {
 			// uh oh, another error at same token index; must be a case
 			// where LT(1) is in the recovery token set so nothing is
 			// consumed; consume a single token so at least to prevent
 			// an infinite loop; this is a failsafe.
 			input.consume();
 		}
-		lastErrorIndex = input.index();
+		state.lastErrorIndex = input.index();
 		BitSet followSet = computeErrorRecoverySet();
 		beginResync();
 		consumeUntil(input, followSet);
@@ -445,20 +523,29 @@ public abstract class BaseRecognizer {
 	}
 
 	protected BitSet combineFollows(boolean exact) {
-		int top = _fsp;
+		int top = state._fsp;
 		BitSet followSet = new BitSet();
 		for (int i=top; i>=0; i--) {
-			BitSet localFollowSet = (BitSet) following[i];
+			BitSet localFollowSet = (BitSet)state.following[i];
 			/*
 			System.out.println("local follow depth "+i+"="+
 							   localFollowSet.toString(getTokenNames())+")");
-			*/
+			 */
 			followSet.orInPlace(localFollowSet);
-			if ( exact && !localFollowSet.member(Token.EOR_TOKEN_TYPE) ) {
-				break;
+			if ( exact ) {
+				// can we see end of rule?
+				if ( localFollowSet.member(Token.EOR_TOKEN_TYPE) ) {
+					// Only leave EOR in set if at top (start rule); this lets
+					// us know if have to include follow(start rule); i.e., EOF
+					if ( i>0 ) {
+						followSet.remove(Token.EOR_TOKEN_TYPE);
+					}
+				}
+				else { // can't see end of rule, quit
+					break;
+				}
 			}
 		}
-		followSet.remove(Token.EOR_TOKEN_TYPE);
 		return followSet;
 	}
 
@@ -491,74 +578,91 @@ public abstract class BaseRecognizer {
 	 *  is in the set of tokens that can follow the ')' token
 	 *  reference in rule atom.  It can assume that you forgot the ')'.
 	 */
-	public void recoverFromMismatchedToken(IntStream input,
-										   RecognitionException e,
-										   int ttype,
-										   BitSet follow)
+	protected Object recoverFromMismatchedToken(IntStream input, int ttype, BitSet follow)
 		throws RecognitionException
 	{
-		System.err.println("BR.recoverFromMismatchedToken");		
+		RecognitionException e = null;
 		// if next token is what we are looking for then "delete" this token
-		if ( input.LA(2)==ttype ) {
-			reportError(e);
+		if ( mismatchIsUnwantedToken(input, ttype) ) {
+			e = new UnwantedTokenException(ttype, input);
 			/*
-			System.err.println("recoverFromMismatchedToken deleting "+input.LT(1)+
-							   " since "+input.LT(2)+" is what we want");
-			*/
+			System.err.println("recoverFromMismatchedToken deleting "+
+							   ((TokenStream)input).LT(1)+
+							   " since "+((TokenStream)input).LT(2)+" is what we want");
+			 */
 			beginResync();
 			input.consume(); // simply delete extra token
 			endResync();
+			reportError(e);  // report after consuming so AW sees the token in the exception
+			// we want to return the token we're actually matching
+			Object matchedSymbol = getCurrentInputSymbol(input);
 			input.consume(); // move past ttype token as if all were ok
-			return;
+			return matchedSymbol;
 		}
-		if ( !recoverFromMismatchedElement(input,e,follow) ) {
-			throw e;
+		// can't recover with single token deletion, try insertion
+		if ( mismatchIsMissingToken(input, follow) ) {
+			Object inserted = getMissingSymbol(input, e, ttype, follow);
+			e = new MissingTokenException(ttype, input, inserted);
+			reportError(e);  // report after inserting so AW sees the token in the exception
+			return inserted;
 		}
+		// even that didn't work; must throw the exception
+		e = new MismatchedTokenException(ttype, input);
+		throw e;
 	}
 
-	public void recoverFromMismatchedSet(IntStream input,
-										 RecognitionException e,
-										 BitSet follow)
+	/** Not currently used */
+	public Object recoverFromMismatchedSet(IntStream input,
+										   RecognitionException e,
+										   BitSet follow)
 		throws RecognitionException
 	{
-		// TODO do single token deletion like above for Token mismatch
-		if ( !recoverFromMismatchedElement(input,e,follow) ) {
-			throw e;
+		if ( mismatchIsMissingToken(input, follow) ) {
+			// System.out.println("missing token");
+			reportError(e);
+			// we don't know how to conjure up a token for sets yet
+			return getMissingSymbol(input, e, Token.INVALID_TOKEN_TYPE, follow);
 		}
+		// TODO do single token deletion like above for Token mismatch
+		throw e;
 	}
 
-	/** This code is factored out from mismatched token and mismatched set
-	 *  recovery.  It handles "single token insertion" error recovery for
-	 *  both.  No tokens are consumed to recover from insertions.  Return
-	 *  true if recovery was possible else return false.
+	/** Match needs to return the current input symbol, which gets put
+	 *  into the label for the associated token ref; e.g., x=ID.  Token
+	 *  and tree parsers need to return different objects. Rather than test
+	 *  for input stream type or change the IntStream interface, I use
+	 *  a simple method to ask the recognizer to tell me what the current
+	 *  input symbol is.
+	 * 
+	 *  This is ignored for lexers.
+	 */
+	protected Object getCurrentInputSymbol(IntStream input) { return null; }
+
+	/** Conjure up a missing token during error recovery.
+	 *
+	 *  The recognizer attempts to recover from single missing
+	 *  symbols. But, actions might refer to that missing symbol.
+	 *  For example, x=ID {f($x);}. The action clearly assumes
+	 *  that there has been an identifier matched previously and that
+	 *  $x points at that token. If that token is missing, but
+	 *  the next token in the stream is what we want we assume that
+	 *  this token is missing and we keep going. Because we
+	 *  have to return some token to replace the missing token,
+	 *  we have to conjure one up. This method gives the user control
+	 *  over the tokens returned for missing tokens. Mostly,
+	 *  you will want to create something special for identifier
+	 *  tokens. For literals such as '{' and ',', the default
+	 *  action in the parser or tree parser works. It simply creates
+	 *  a CommonToken of the appropriate type. The text will be the token.
+	 *  If you change what tokens must be created by the lexer,
+	 *  override this method to create the appropriate tokens.
 	 */
-	protected boolean recoverFromMismatchedElement(IntStream input,
-												   RecognitionException e,
-												   BitSet follow)
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
 	{
-		if ( follow==null ) {
-			// we have no information about the follow; we can only consume
-			// a single token and hope for the best
-			return false;
-		}
-		//System.out.println("recoverFromMismatchedElement");
-		// compute what can follow this grammar element reference
-		if ( follow.member(Token.EOR_TOKEN_TYPE) ) {
-			BitSet viableTokensFollowingThisRule =
-				computeContextSensitiveRuleFOLLOW();
-			follow = follow.or(viableTokensFollowingThisRule);
-			follow.remove(Token.EOR_TOKEN_TYPE);
-		}
-		// if current token is consistent with what could come after set
-		// then it is ok to "insert" the missing token, else throw exception
-		//System.out.println("viable tokens="+follow.toString(getTokenNames())+")");
-		if ( follow.member(input.LA(1)) ) {
-			//System.out.println("LT(1)=="+input.LT(1)+" is consistent with what follows; inserting...");
-			reportError(e);
-			return true;
-		}
-		//System.err.println("nothing to do; throw exception");
-		return false;
+		return null;
 	}
 
 	public void consumeUntil(IntStream input, int tokenType) {
@@ -583,12 +687,12 @@ public abstract class BaseRecognizer {
 
 	/** Push a rule's follow set using our own hardcoded stack */
 	protected void pushFollow(BitSet fset) {
-		if ( (_fsp +1)>=following.length ) {
-			BitSet[] f = new BitSet[following.length*2];
-			System.arraycopy(following, 0, f, 0, following.length-1);
-			following = f;
+		if ( (state._fsp +1)>=state.following.length ) {
+			BitSet[] f = new BitSet[state.following.length*2];
+			System.arraycopy(state.following, 0, f, 0, state.following.length);
+			state.following = f;
 		}
-		following[++_fsp] = fset;
+		state.following[++state._fsp] = fset;
 	}
 
 	/** Return List<String> of the rules in your parser instance
@@ -633,9 +737,12 @@ public abstract class BaseRecognizer {
 		return rules;
 	}
 
-	public int getBacktrackingLevel() {
-		return backtracking;
-	}
+    public int getBacktrackingLevel() { return state.backtracking; }
+
+    public void setBacktrackingLevel(int n) { state.backtracking = n; }
+
+    /** Return whether or not a backtracking attempt failed. */
+    public boolean failed() { return state.failed; }
 
 	/** Used to print out token names like ID during debugging and
 	 *  error reporting.  The generated parsers implement a method
@@ -652,6 +759,8 @@ public abstract class BaseRecognizer {
 		return null;
 	}
 
+	public abstract String getSourceName();
+
 	/** A convenience method for use most often with template rewrites.
 	 *  Convert a List<Token> to List<String>
 	 */
@@ -664,25 +773,6 @@ public abstract class BaseRecognizer {
 		return strings;
 	}
 
-	/** Convert a List<RuleReturnScope> to List<StringTemplate> by copying
-	 *  out the .st property.  Useful when converting from
-	 *  list labels to template attributes:
-	 *
-	 *    a : ids+=rule -> foo(ids={toTemplates($ids)})
-	 *      ;
-	 *  TJP: this is not needed anymore.  $ids is a List of templates
-	 *  when output=template
-	 * 
-	public List toTemplates(List retvals) {
-		if ( retvals==null ) return null;
-		List strings = new ArrayList(retvals.size());
-		for (int i=0; i<retvals.size(); i++) {
-			strings.add(((RuleReturnScope)retvals.get(i)).getTemplate());
-		}
-		return strings;
-	}
-	 */
-
 	/** Given a rule number and a start token index number, return
 	 *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
 	 *  start index.  If this rule has parsed input starting from the
@@ -694,11 +784,11 @@ public abstract class BaseRecognizer {
 	 *  tosses out data after we commit past input position i.
 	 */
 	public int getRuleMemoization(int ruleIndex, int ruleStartIndex) {
-		if ( ruleMemo[ruleIndex]==null ) {
-			ruleMemo[ruleIndex] = new HashMap();
+		if ( state.ruleMemo[ruleIndex]==null ) {
+			state.ruleMemo[ruleIndex] = new HashMap();
 		}
 		Integer stopIndexI =
-			(Integer)ruleMemo[ruleIndex].get(new Integer(ruleStartIndex));
+			(Integer)state.ruleMemo[ruleIndex].get(new Integer(ruleStartIndex));
 		if ( stopIndexI==null ) {
 			return MEMO_RULE_UNKNOWN;
 		}
@@ -721,10 +811,10 @@ public abstract class BaseRecognizer {
 		}
 		if ( stopIndex==MEMO_RULE_FAILED ) {
 			//System.out.println("rule "+ruleIndex+" will never succeed");
-			failed=true;
+			state.failed=true;
 		}
 		else {
-			//System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+failed);
+			//System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
 			input.seek(stopIndex+1); // jump to one past stop token
 		}
 		return true;
@@ -737,43 +827,27 @@ public abstract class BaseRecognizer {
 						int ruleIndex,
 						int ruleStartIndex)
 	{
-		int stopTokenIndex = failed?MEMO_RULE_FAILED:input.index()-1;
-		if ( ruleMemo[ruleIndex]!=null ) {
-			ruleMemo[ruleIndex].put(
+		int stopTokenIndex = state.failed?MEMO_RULE_FAILED:input.index()-1;
+		if ( state.ruleMemo==null ) {
+			System.err.println("!!!!!!!!! memo array is null for "+ getGrammarFileName());
+		}
+		if ( ruleIndex >= state.ruleMemo.length ) {
+			System.err.println("!!!!!!!!! memo size is "+state.ruleMemo.length+", but rule index is "+ruleIndex);
+		}
+		if ( state.ruleMemo[ruleIndex]!=null ) {
+			state.ruleMemo[ruleIndex].put(
 				new Integer(ruleStartIndex), new Integer(stopTokenIndex)
 			);
 		}
 	}
 
-	/** Assume failure in case a rule bails out with an exception.
-	 *  Reset to rule stop index if successful.
-	public void memoizeFailure(int ruleIndex, int ruleStartIndex) {
-		ruleMemo[ruleIndex].put(
-			new Integer(ruleStartIndex), MEMO_RULE_FAILED_I
-		);
-	}
-	 */
-
-	/** After successful completion of a rule, record success for this
-	 *  rule and that it can skip ahead next time it attempts this
-	 *  rule for this input position.
-	public void memoizeSuccess(IntStream input,
-							   int ruleIndex,
-							   int ruleStartIndex)
-	{
-		ruleMemo[ruleIndex].put(
-			new Integer(ruleStartIndex), new Integer(input.index()-1)
-		);
-	}
-	 */
-
 	/** return how many rule/input-index pairs there are in total.
 	 *  TODO: this includes synpreds. :(
 	 */
 	public int getRuleMemoizationCacheSize() {
 		int n = 0;
-		for (int i = 0; ruleMemo!=null && i < ruleMemo.length; i++) {
-			Map ruleMap = ruleMemo[i];
+		for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) {
+			Map ruleMap = state.ruleMemo[i];
 			if ( ruleMap!=null ) {
 				n += ruleMap.size(); // how many input indexes are recorded?
 			}
@@ -783,11 +857,8 @@ public abstract class BaseRecognizer {
 
 	public void traceIn(String ruleName, int ruleIndex, Object inputSymbol)  {
 		System.out.print("enter "+ruleName+" "+inputSymbol);
-		if ( failed ) {
-			System.out.println(" failed="+failed);
-		}
-		if ( backtracking>0 ) {
-			System.out.print(" backtracking="+backtracking);
+		if ( state.backtracking>0 ) {
+			System.out.print(" backtracking="+state.backtracking);
 		}
 		System.out.println();
 	}
@@ -797,35 +868,12 @@ public abstract class BaseRecognizer {
 						 Object inputSymbol)
 	{
 		System.out.print("exit "+ruleName+" "+inputSymbol);
-		if ( failed ) {
-			System.out.println(" failed="+failed);
-		}
-		if ( backtracking>0 ) {
-			System.out.print(" backtracking="+backtracking);
-		}
+		if ( state.backtracking>0 ) {
+            System.out.print(" backtracking="+state.backtracking);
+            if ( state.failed ) System.out.print(" failed");
+            else System.out.print(" succeeded");
+        }
 		System.out.println();
 	}
 
-	/** A syntactic predicate.  Returns true/false depending on whether
-	 *  the specified grammar fragment matches the current input stream.
-	 *  This resets the failed instance var afterwards.
-	public boolean synpred(IntStream input, GrammarFragmentPtr fragment) {
-		//int i = input.index();
-		//System.out.println("begin backtracking="+backtracking+" @"+i+"="+((CommonTokenStream)input).LT(1));
-		backtracking++;
-		beginBacktrack(backtracking);
-		int start = input.mark();
-		try {fragment.invoke();}
-		catch (RecognitionException re) {
-			System.err.println("impossible: "+re);
-		}
-		boolean success = !failed;
-		input.rewind(start);
-		endBacktrack(backtracking, success);
-		backtracking--;
-		//System.out.println("end backtracking="+backtracking+": "+(failed?"FAILED":"SUCCEEDED")+" @"+input.index()+" should be "+i);
-		failed=false;
-		return success;
-	}
-	 */
 }
diff --git a/runtime/Java/src/org/antlr/runtime/BitSet.java b/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
similarity index 99%
rename from runtime/Java/src/org/antlr/runtime/BitSet.java
rename to runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
index 99bf83b..2a19675 100644
--- a/runtime/Java/src/org/antlr/runtime/BitSet.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -59,6 +59,7 @@ public class BitSet implements Cloneable {
 
 	/** Construction from a list of integers */
 	public BitSet(List items) {
+		this();
 		for (int i = 0; i < items.size(); i++) {
 			Integer v = (Integer) items.get(i);
 			add(v.intValue());
diff --git a/runtime/Java/src/org/antlr/runtime/CharStream.java b/runtime/Java/src/main/java/org/antlr/runtime/CharStream.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/CharStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/CharStream.java
index fe0d406..668d71a 100644
--- a/runtime/Java/src/org/antlr/runtime/CharStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/CharStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/CharStreamState.java b/runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/CharStreamState.java
rename to runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java
index 5bcf116..f8a206e 100644
--- a/runtime/Java/src/org/antlr/runtime/CharStreamState.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/ClassicToken.java b/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
similarity index 60%
rename from runtime/Java/src/org/antlr/runtime/ClassicToken.java
rename to runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
index abda501..78a5f9d 100644
--- a/runtime/Java/src/org/antlr/runtime/ClassicToken.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime;
 
 /** A Token object like we'd use in ANTLR 2.x; has an actual string created
@@ -88,6 +115,13 @@ public class ClassicToken implements Token {
 		this.index = index;
 	}
 
+	public CharStream getInputStream() {
+		return null;
+	}
+
+	public void setInputStream(CharStream input) {
+	}
+	
 	public String toString() {
 		String channelStr = "";
 		if ( channel>0 ) {
diff --git a/runtime/Java/src/org/antlr/runtime/CommonToken.java b/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
similarity index 93%
rename from runtime/Java/src/org/antlr/runtime/CommonToken.java
rename to runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
index 89f63fb..0e252ea 100644
--- a/runtime/Java/src/org/antlr/runtime/CommonToken.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@ public class CommonToken implements Token, Serializable {
 	protected int charPositionInLine = -1; // set to invalid position
 	protected int channel=DEFAULT_CHANNEL;
 	protected transient CharStream input;
+
 	/** We need to be able to change the text once in a while.  If
 	 *  this is non-null, then getText should return this.  Note that
 	 *  start/stop are not affected by changing this.
@@ -75,6 +76,10 @@ public class CommonToken implements Token, Serializable {
 		index = oldToken.getTokenIndex();
 		charPositionInLine = oldToken.getCharPositionInLine();
 		channel = oldToken.getChannel();
+		if ( oldToken instanceof CommonToken ) {
+			start = ((CommonToken)oldToken).start;
+			stop = ((CommonToken)oldToken).stop;
+		}
 	}
 
 	public int getType() {
@@ -153,6 +158,14 @@ public class CommonToken implements Token, Serializable {
 		this.index = index;
 	}
 
+	public CharStream getInputStream() {
+		return input;
+	}
+
+	public void setInputStream(CharStream input) {
+		this.input = input;
+	}
+
 	public String toString() {
 		String channelStr = "";
 		if ( channel>0 ) {
diff --git a/runtime/Java/src/org/antlr/runtime/CommonTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/CommonTokenStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
index bb26ad3..ec85ba9 100644
--- a/runtime/Java/src/org/antlr/runtime/CommonTokenStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -328,6 +328,11 @@ public class CommonTokenStream implements TokenStream {
 		seek(lastMarker);
 	}
 
+	public void reset() {
+		p = 0;
+		lastMarker = 0;
+	}
+	
 	public void seek(int index) {
 		p = index;
 	}
@@ -336,6 +341,10 @@ public class CommonTokenStream implements TokenStream {
 		return tokenSource;
 	}
 
+	public String getSourceName() {
+		return getTokenSource().getSourceName();
+	}
+
 	public String toString() {
 		if ( p == -1 ) {
 			fillBuffer();
diff --git a/runtime/Java/src/org/antlr/runtime/DFA.java b/runtime/Java/src/main/java/org/antlr/runtime/DFA.java
similarity index 74%
rename from runtime/Java/src/org/antlr/runtime/DFA.java
rename to runtime/Java/src/main/java/org/antlr/runtime/DFA.java
index 5abec4b..d4ae12c 100644
--- a/runtime/Java/src/org/antlr/runtime/DFA.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/DFA.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime;
 
 /** A DFA implemented as a set of transition tables.
@@ -35,6 +62,9 @@ public class DFA {
 	public int predict(IntStream input)
 		throws RecognitionException
 	{
+		if ( debug ) {
+			System.err.println("Enter DFA.predict for decision "+decisionNumber);
+		}
 		int mark = input.mark(); // remember where decision started in input
 		int s = 0; // we always start at s0
 		try {
@@ -43,9 +73,19 @@ public class DFA {
 												"), index="+input.index());
 				int specialState = special[s];
 				if ( specialState>=0 ) {
-					if ( debug ) System.err.println("DFA "+decisionNumber+
-						" state "+s+" is special state "+specialState);
+					if ( debug ) {
+						System.err.println("DFA "+decisionNumber+
+							" state "+s+" is special state "+specialState);
+					}
 					s = specialStateTransition(specialState,input);
+					if ( debug ) {
+						System.err.println("DFA "+decisionNumber+
+							" returns from special state "+specialState+" to "+s);
+					}
+					if ( s==-1 ) {
+						noViableAlt(s,input);
+						return 0;
+					}
 					input.consume();
 					continue;
 				}
@@ -111,8 +151,8 @@ public class DFA {
 	}
 
 	protected void noViableAlt(int s, IntStream input) throws NoViableAltException {
-		if (recognizer.backtracking>0) {
-			recognizer.failed=true;
+		if (recognizer.state.backtracking>0) {
+			recognizer.state.failed=true;
 			return;
 		}
 		NoViableAltException nvae =
@@ -181,7 +221,9 @@ public class DFA {
 		return data;
 	}
 
+	/*
 	public int specialTransition(int state, int symbol) {
 		return 0;
 	}
+	*/
 }
diff --git a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java b/runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java
similarity index 97%
copy from runtime/Java/src/org/antlr/runtime/EarlyExitException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java
index 29f0865..535e77a 100644
--- a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/FailedPredicateException.java b/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/FailedPredicateException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
index b90fe0b..b330c49 100644
--- a/runtime/Java/src/org/antlr/runtime/FailedPredicateException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/IntStream.java b/runtime/Java/src/main/java/org/antlr/runtime/IntStream.java
similarity index 95%
rename from runtime/Java/src/org/antlr/runtime/IntStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/IntStream.java
index d6c5ab3..63e2f48 100644
--- a/runtime/Java/src/org/antlr/runtime/IntStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/IntStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -113,4 +113,10 @@ public interface IntStream {
 	 *  value includes a single EOF.
 	 */
 	int size();
+
+	/** Where are you getting symbols from?  Normally, implementations will
+	 *  pass the buck all the way to the lexer who can ask its input stream
+	 *  for the file name or whatever.
+	 */
+	public String getSourceName();
 }
diff --git a/runtime/Java/src/org/antlr/runtime/Lexer.java b/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
similarity index 69%
rename from runtime/Java/src/org/antlr/runtime/Lexer.java
rename to runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
index 9f069a8..6978519 100644
--- a/runtime/Java/src/org/antlr/runtime/Lexer.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -34,40 +34,7 @@ package org.antlr.runtime;
  */
 public abstract class Lexer extends BaseRecognizer implements TokenSource {
 	/** Where is the lexer drawing characters from? */
-    protected CharStream input;
-
-	/** The goal of all lexer rules/methods is to create a token object.
-	 *  This is an instance variable as multiple rules may collaborate to
-	 *  create a single token.  nextToken will return this object after
-	 *  matching lexer rule(s).  If you subclass to allow multiple token
-	 *  emissions, then set this to the last token to be matched or
-	 *  something nonnull so that the auto token emit mechanism will not
-	 *  emit another token.
-	 */
-    protected Token token;
-
-	/** What character index in the stream did the current token start at?
-	 *  Needed, for example, to get the text for current token.  Set at
-	 *  the start of nextToken.
- 	 */
-	protected int tokenStartCharIndex = -1;
-
-	/** The line on which the first character of the token resides */
-	protected int tokenStartLine;
-
-	/** The character position of first character within the line */
-	protected int tokenStartCharPositionInLine;
-
-	/** The channel number for the current token */
-	protected int channel;
-
-	/** The token type for the current token */
-	protected int type;
-
-	/** You can set the text for the current token to override what is in
-	 *  the input char buffer.  Use setText() or can set this instance var.
- 	 */
-	protected String text;
+	protected CharStream input;
 
 	public Lexer() {
 	}
@@ -76,51 +43,63 @@ public abstract class Lexer extends BaseRecognizer implements TokenSource {
 		this.input = input;
 	}
 
+	public Lexer(CharStream input, RecognizerSharedState state) {
+		super(state);
+		this.input = input;
+	}
+
 	public void reset() {
 		super.reset(); // reset all recognizer state variables
 		// wack Lexer state variables
-		token = null;
-		type = Token.INVALID_TOKEN_TYPE;
-		channel = Token.DEFAULT_CHANNEL;
-		tokenStartCharIndex = -1;
-		tokenStartCharPositionInLine = -1;
-		tokenStartLine = -1;
-		text = null;
 		if ( input!=null ) {
 			input.seek(0); // rewind the input
 		}
+		if ( state==null ) {
+			return; // no shared state work to do
+		}
+		state.token = null;
+		state.type = Token.INVALID_TOKEN_TYPE;
+		state.channel = Token.DEFAULT_CHANNEL;
+		state.tokenStartCharIndex = -1;
+		state.tokenStartCharPositionInLine = -1;
+		state.tokenStartLine = -1;
+		state.text = null;
 	}
 
 	/** Return a token from this source; i.e., match a token on the char
 	 *  stream.
 	 */
-    public Token nextToken() {
+	public Token nextToken() {
 		while (true) {
-			token = null;
-			channel = Token.DEFAULT_CHANNEL;
-			tokenStartCharIndex = input.index();
-			tokenStartCharPositionInLine = input.getCharPositionInLine();
-			tokenStartLine = input.getLine();
-			text = null;
+			state.token = null;
+			state.channel = Token.DEFAULT_CHANNEL;
+			state.tokenStartCharIndex = input.index();
+			state.tokenStartCharPositionInLine = input.getCharPositionInLine();
+			state.tokenStartLine = input.getLine();
+			state.text = null;
 			if ( input.LA(1)==CharStream.EOF ) {
-                return Token.EOF_TOKEN;
-            }
-            try {
-                mTokens();
-				if ( token==null ) {
+				return Token.EOF_TOKEN;
+			}
+			try {
+				mTokens();
+				if ( state.token==null ) {
 					emit();
 				}
-				else if ( token==Token.SKIP_TOKEN ) {
+				else if ( state.token==Token.SKIP_TOKEN ) {
 					continue;
 				}
-				return token;
+				return state.token;
 			}
-            catch (RecognitionException re) {
-                reportError(re);
-                recover(re);
-            }
-        }
-    }
+			catch (NoViableAltException nva) {
+				reportError(nva);
+				recover(nva); // throw out current char and try again
+			}
+			catch (RecognitionException re) {
+				reportError(re);
+				// match() routine has already called recover()
+			}
+		}
+	}
 
 	/** Instruct the lexer to skip creating a token for current lexer rule
 	 *  and look for another token.  nextToken() knows to keep looking when
@@ -129,7 +108,7 @@ public abstract class Lexer extends BaseRecognizer implements TokenSource {
 	 *  and emits it.
 	 */
 	public void skip() {
-		token = Token.SKIP_TOKEN;
+		state.token = Token.SKIP_TOKEN;
 	}
 
 	/** This is the lexer entry point that sets instance var 'token' */
@@ -142,13 +121,21 @@ public abstract class Lexer extends BaseRecognizer implements TokenSource {
 		this.input = input;
 	}
 
+	public CharStream getCharStream() {
+		return this.input;
+	}
+
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
 	/** Currently does not support multiple emits per nextToken invocation
 	 *  for efficiency reasons.  Subclass and override this method and
 	 *  nextToken (to push tokens into a list and pull from that list rather
 	 *  than a single variable as this implementation does).
 	 */
 	public void emit(Token token) {
-		this.token = token;
+		state.token = token;
 	}
 
 	/** The standard method called to automatically emit a token at the
@@ -156,78 +143,81 @@ public abstract class Lexer extends BaseRecognizer implements TokenSource {
 	 *  char buffer start..stop.  If there is a text override in 'text',
 	 *  use that to set the token's text.  Override this method to emit
 	 *  custom Token objects.
+	 *
+	 *  If you are building trees, then you should also override
+	 *  Parser or TreeParser.getMissingSymbol().
 	 */
 	public Token emit() {
-		Token t = new CommonToken(input, type, channel, tokenStartCharIndex, getCharIndex()-1);
-		t.setLine(tokenStartLine);
-		t.setText(text);
-		t.setCharPositionInLine(tokenStartCharPositionInLine);
+		Token t = new CommonToken(input, state.type, state.channel, state.tokenStartCharIndex, getCharIndex()-1);
+		t.setLine(state.tokenStartLine);
+		t.setText(state.text);
+		t.setCharPositionInLine(state.tokenStartCharPositionInLine);
 		emit(t);
 		return t;
 	}
 
 	public void match(String s) throws MismatchedTokenException {
-        int i = 0;
-        while ( i<s.length() ) {
-            if ( input.LA(1)!=s.charAt(i) ) {
-				if ( backtracking>0 ) {
-					failed = true;
+		int i = 0;
+		while ( i<s.length() ) {
+			if ( input.LA(1)!=s.charAt(i) ) {
+				if ( state.backtracking>0 ) {
+					state.failed = true;
 					return;
 				}
 				MismatchedTokenException mte =
 					new MismatchedTokenException(s.charAt(i), input);
 				recover(mte);
 				throw mte;
-            }
-            i++;
-            input.consume();
-			failed = false;
-        }
-    }
+			}
+			i++;
+			input.consume();
+			state.failed = false;
+		}
+	}
 
-    public void matchAny() {
-        input.consume();
-    }
+	public void matchAny() {
+		input.consume();
+	}
 
-    public void match(int c) throws MismatchedTokenException {
-        if ( input.LA(1)!=c ) {
-			if ( backtracking>0 ) {
-				failed = true;
+	public void match(int c) throws MismatchedTokenException {
+		if ( input.LA(1)!=c ) {
+			if ( state.backtracking>0 ) {
+				state.failed = true;
 				return;
 			}
 			MismatchedTokenException mte =
 				new MismatchedTokenException(c, input);
-			recover(mte);
+			recover(mte);  // don't really recover; just consume in lexer
 			throw mte;
-        }
-        input.consume();
-		failed = false;
-    }
+		}
+		input.consume();
+		state.failed = false;
+	}
 
-    public void matchRange(int a, int b)
+	public void matchRange(int a, int b)
 		throws MismatchedRangeException
 	{
-        if ( input.LA(1)<a || input.LA(1)>b ) {
-			if ( backtracking>0 ) {
-				failed = true;
+		if ( input.LA(1)<a || input.LA(1)>b ) {
+			if ( state.backtracking>0 ) {
+				state.failed = true;
 				return;
 			}
-            MismatchedRangeException mre =
+			MismatchedRangeException mre =
 				new MismatchedRangeException(a,b,input);
 			recover(mre);
 			throw mre;
-        }
-        input.consume();
-		failed = false;
-    }
+		}
+		input.consume();
+		state.failed = false;
+	}
 
-    public int getLine() {
-        return input.getLine();
-    }
+	public int getLine() {
+		return input.getLine();
+	}
 
-    public int getCharPositionInLine() {
-        return input.getCharPositionInLine();
-    }
+	public int getCharPositionInLine() {
+		return input.getCharPositionInLine();
+	}
 
 	/** What is the index of the current character of lookahead? */
 	public int getCharIndex() {
@@ -238,17 +228,17 @@ public abstract class Lexer extends BaseRecognizer implements TokenSource {
 	 *  text override.
 	 */
 	public String getText() {
-		if ( text!=null ) {
-			return text;
+		if ( state.text!=null ) {
+			return state.text;
 		}
-		return input.substring(tokenStartCharIndex,getCharIndex()-1);
+		return input.substring(state.tokenStartCharIndex,getCharIndex()-1);
 	}
 
 	/** Set the complete text of this token; it wipes any previous
 	 *  changes to the text.
 	 */
 	public void setText(String text) {
-		this.text = text;
+		state.text = text;
 	}
 
 	public void reportError(RecognitionException e) {
@@ -295,7 +285,7 @@ public abstract class Lexer extends BaseRecognizer implements TokenSource {
 		else if ( e instanceof MismatchedRangeException ) {
 			MismatchedRangeException mre = (MismatchedRangeException)e;
 			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+
-				getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b);
+				  getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b);
 		}
 		else {
 			msg = super.getErrorMessage(e, tokenNames);
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
similarity index 97%
copy from runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
index 373b123..694e81a 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
similarity index 93%
copy from runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
index b048aaf..716f51c 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -30,6 +30,9 @@ package org.antlr.runtime;
 public class MismatchedRangeException extends RecognitionException {
 	public int a,b;
 
+	/** Used for remote debugger deserialization */
+	public MismatchedRangeException() {;}
+
 	public MismatchedRangeException(int a, int b, IntStream input) {
 		super(input);
 		this.a = a;
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedSetException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
similarity index 97%
rename from runtime/Java/src/org/antlr/runtime/MismatchedSetException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
index 5794b08..ffd53cd 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedSetException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
similarity index 88%
copy from runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
index 97a7d34..2b3a75b 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,11 +27,12 @@
 */
 package org.antlr.runtime;
 
+/** A mismatched char or Token or tree node */
 public class MismatchedTokenException extends RecognitionException {
-	public int expecting;
+	public int expecting = Token.INVALID_TOKEN_TYPE;
 
-	public MismatchedTokenException() {
-	}
+	/** Used for remote debugger deserialization */
+	public MismatchedTokenException() {;}
 
 	public MismatchedTokenException(int expecting, IntStream input) {
 		super(input);
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
similarity index 79%
copy from runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
index 97a7d34..d6ff998 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,18 +27,23 @@
 */
 package org.antlr.runtime;
 
-public class MismatchedTokenException extends RecognitionException {
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.runtime.tree.Tree;
+
+/**
+ */
+public class MismatchedTreeNodeException extends RecognitionException {
 	public int expecting;
 
-	public MismatchedTokenException() {
+	public MismatchedTreeNodeException() {
 	}
 
-	public MismatchedTokenException(int expecting, IntStream input) {
+	public MismatchedTreeNodeException(int expecting, TreeNodeStream input) {
 		super(input);
 		this.expecting = expecting;
 	}
 
 	public String toString() {
-		return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
+		return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")";
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java b/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
similarity index 68%
copy from runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
index 373b123..d6d45da 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,15 +27,30 @@
 */
 package org.antlr.runtime;
 
-public class MismatchedNotSetException extends MismatchedSetException {
+/** We were expecting a token but it's not found.  The current token
+ *  is actually what we wanted next.  Used for tree node errors too.
+ */
+public class MissingTokenException extends MismatchedTokenException {
+	public Object inserted;
 	/** Used for remote debugger deserialization */
-	public MismatchedNotSetException() {;}
+	public MissingTokenException() {;}
 
-	public MismatchedNotSetException(BitSet expecting, IntStream input) {
+	public MissingTokenException(int expecting, IntStream input, Object inserted) {
 		super(expecting, input);
+		this.inserted = inserted;
+	}
+
+	public int getMissingType() {
+		return expecting;
 	}
 
 	public String toString() {
-		return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")";
+		if ( inserted!=null && token!=null ) {
+			return "MissingTokenException(inserted "+inserted+" at "+token.getText()+")";
+		}
+		if ( token!=null ) {
+			return "MissingTokenException(at "+token.getText()+")";
+		}
+		return "MissingTokenException";
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/NoViableAltException.java b/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
similarity index 88%
rename from runtime/Java/src/org/antlr/runtime/NoViableAltException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
index 02653ae..3074760 100644
--- a/runtime/Java/src/org/antlr/runtime/NoViableAltException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,11 @@ public class NoViableAltException extends RecognitionException {
 	}
 
 	public String toString() {
-		return "NoViableAltException("+getUnexpectedType()+"!=["+grammarDecisionDescription+"])";
+		if ( input instanceof CharStream ) {
+			return "NoViableAltException('"+(char)getUnexpectedType()+"'@["+grammarDecisionDescription+"])";
+		}
+		else {
+			return "NoViableAltException("+getUnexpectedType()+"@["+grammarDecisionDescription+"])";
+		}
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/Parser.java b/runtime/Java/src/main/java/org/antlr/runtime/Parser.java
similarity index 65%
copy from runtime/Java/src/org/antlr/runtime/Parser.java
copy to runtime/Java/src/main/java/org/antlr/runtime/Parser.java
index 1000a52..a8dfa80 100644
--- a/runtime/Java/src/org/antlr/runtime/Parser.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/Parser.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,16 @@ package org.antlr.runtime;
  *  of this.
  */
 public class Parser extends BaseRecognizer {
-    protected TokenStream input;
+	public TokenStream input;
 
 	public Parser(TokenStream input) {
-        setTokenStream(input);
+		super(); // highlight that we go to super to set state object
+		setTokenStream(input);
+    }
+
+	public Parser(TokenStream input, RecognizerSharedState state) {
+		super(state); // share the state object with another parser
+		setTokenStream(input);
     }
 
 	public void reset() {
@@ -44,6 +50,29 @@ public class Parser extends BaseRecognizer {
 		}
 	}
 
+	protected Object getCurrentInputSymbol(IntStream input) {
+		return ((TokenStream)input).LT(1);
+	}
+
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		String tokenText = null;
+		if ( expectedTokenType==Token.EOF ) tokenText = "<missing EOF>";
+		else tokenText = "<missing "+getTokenNames()[expectedTokenType]+">";
+		CommonToken t = new CommonToken(expectedTokenType, tokenText);
+		Token current = ((TokenStream)input).LT(1);
+		if ( current.getType() == Token.EOF ) {
+			current = ((TokenStream)input).LT(-1);
+		}
+		t.line = current.getLine();
+		t.charPositionInLine = current.getCharPositionInLine();
+		t.channel = DEFAULT_TOKEN_CHANNEL;
+		return t;
+	}
+
 	/** Set the token stream and reset the parser */
 	public void setTokenStream(TokenStream input) {
 		this.input = null;
@@ -55,6 +84,10 @@ public class Parser extends BaseRecognizer {
 		return input;
 	}
 
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
 	public void traceIn(String ruleName, int ruleIndex)  {
 		super.traceIn(ruleName, ruleIndex, input.LT(1));
 	}
diff --git a/runtime/Java/src/org/antlr/runtime/ParserRuleReturnScope.java b/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
similarity index 91%
rename from runtime/Java/src/org/antlr/runtime/ParserRuleReturnScope.java
rename to runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
index 9299db9..e61cad8 100644
--- a/runtime/Java/src/org/antlr/runtime/ParserRuleReturnScope.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -42,8 +42,11 @@ package org.antlr.runtime;
  *  input streams.
  *
  *  I do not use getters for fields of objects that are used simply to
- *  group values such as this aggregate.
+ *  group values such as this aggregate.  The getters/setters are there to
+ *  satisfy the superclass interface.
  */
 public class ParserRuleReturnScope extends RuleReturnScope {
 	public Token start, stop;
+	public Object getStart() { return start; }
+	public Object getStop() { return stop; }
 }
diff --git a/runtime/Java/src/org/antlr/runtime/RecognitionException.java b/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
similarity index 99%
rename from runtime/Java/src/org/antlr/runtime/RecognitionException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
index 4ca52ba..fa34907 100644
--- a/runtime/Java/src/org/antlr/runtime/RecognitionException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java b/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java
new file mode 100644
index 0000000..8861512
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java
@@ -0,0 +1,144 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/package org.antlr.runtime;
+
+import java.util.Map;
+
+/** The set of fields needed by an abstract recognizer to recognize input
+ *  and recover from errors etc...  As a separate state object, it can be
+ *  shared among multiple grammars; e.g., when one grammar imports another.
+ *
+ *  These fields are publically visible but the actual state pointer per
+ *  parser is protected.
+ */
+public class RecognizerSharedState {
+	/** Track the set of token types that can follow any rule invocation.
+	 *  Stack grows upwards.  When it hits the max, it grows 2x in size
+	 *  and keeps going.
+	 */
+	public BitSet[] following = new BitSet[BaseRecognizer.INITIAL_FOLLOW_STACK_SIZE];
+	public int _fsp = -1;
+
+	/** This is true when we see an error and before having successfully
+	 *  matched a token.  Prevents generation of more than one error message
+	 *  per error.
+	 */
+	public boolean errorRecovery = false;
+
+	/** The index into the input stream where the last error occurred.
+	 * 	This is used to prevent infinite loops where an error is found
+	 *  but no token is consumed during recovery...another error is found,
+	 *  ad naseum.  This is a failsafe mechanism to guarantee that at least
+	 *  one token/tree node is consumed for two errors.
+	 */
+	public int lastErrorIndex = -1;
+
+	/** In lieu of a return value, this indicates that a rule or token
+	 *  has failed to match.  Reset to false upon valid token match.
+	 */
+	public boolean failed = false;
+
+	/** Did the recognizer encounter a syntax error?  Track how many. */
+	public int syntaxErrors = 0;
+
+	/** If 0, no backtracking is going on.  Safe to exec actions etc...
+	 *  If >0 then it's the level of backtracking.
+	 */
+	public int backtracking = 0;
+
+	/** An array[size num rules] of Map<Integer,Integer> that tracks
+	 *  the stop token index for each rule.  ruleMemo[ruleIndex] is
+	 *  the memoization table for ruleIndex.  For key ruleStartIndex, you
+	 *  get back the stop token for associated rule or MEMO_RULE_FAILED.
+	 *
+	 *  This is only used if rule memoization is on (which it is by default).
+	 */
+	public Map[] ruleMemo;
+
+
+	// LEXER FIELDS (must be in same state object to avoid casting
+	//               constantly in generated code and Lexer object) :(
+
+
+	/** The goal of all lexer rules/methods is to create a token object.
+	 *  This is an instance variable as multiple rules may collaborate to
+	 *  create a single token.  nextToken will return this object after
+	 *  matching lexer rule(s).  If you subclass to allow multiple token
+	 *  emissions, then set this to the last token to be matched or
+	 *  something nonnull so that the auto token emit mechanism will not
+	 *  emit another token.
+	 */
+    public Token token;
+
+	/** What character index in the stream did the current token start at?
+	 *  Needed, for example, to get the text for current token.  Set at
+	 *  the start of nextToken.
+ 	 */
+	public int tokenStartCharIndex = -1;
+
+	/** The line on which the first character of the token resides */
+	public int tokenStartLine;
+
+	/** The character position of first character within the line */
+	public int tokenStartCharPositionInLine;
+
+	/** The channel number for the current token */
+	public int channel;
+
+	/** The token type for the current token */
+	public int type;
+
+	/** You can set the text for the current token to override what is in
+	 *  the input char buffer.  Use setText() or can set this instance var.
+ 	 */
+	public String text;
+
+    public RecognizerSharedState() {;}
+    
+    public RecognizerSharedState(RecognizerSharedState state) {
+        if ( this.following.length < state.following.length ) {
+            this.following = new BitSet[state.following.length];
+        }
+        System.arraycopy(state.following, 0, this.following, 0, state.following.length);
+        this._fsp = state._fsp;
+        this.errorRecovery = state.errorRecovery;
+        this.lastErrorIndex = state.lastErrorIndex;
+        this.failed = state.failed;
+        this.syntaxErrors = state.syntaxErrors;
+        this.backtracking = state.backtracking;
+        if ( state.ruleMemo!=null ) {
+            this.ruleMemo = new Map[state.ruleMemo.length];
+            System.arraycopy(state.ruleMemo, 0, this.ruleMemo, 0, state.ruleMemo.length);
+        }
+        this.token = state.token;
+        this.tokenStartCharIndex = state.tokenStartCharIndex;
+        this.tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
+        this.channel = state.channel;
+        this.type = state.type;
+        this.text = state.text;
+    }
+}
diff --git a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java b/runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java
similarity index 71%
copy from runtime/Java/src/org/antlr/runtime/EarlyExitException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java
index 29f0865..5b80b85 100644
--- a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,15 +27,16 @@
 */
 package org.antlr.runtime;
 
-/**  The recognizer did not match anything for a (..)+ loop. */
-public class EarlyExitException extends RecognitionException {
-	public int decisionNumber;
-
-	/** Used for remote debugger deserialization */
-	public EarlyExitException() {;}
-	
-	public EarlyExitException(int decisionNumber, IntStream input) {
-		super(input);
-		this.decisionNumber = decisionNumber;
-	}
+/** Rules can return start/stop info as well as possible trees and templates */
+public class RuleReturnScope {
+	/** Return the start token or tree */
+	public Object getStart() { return null; }
+	/** Return the stop token or tree */
+	public Object getStop() { return null; }
+	/** Has a value potentially if output=AST; */
+	public Object getTree() { return null; }
+	/** Has a value potentially if output=template; Don't use StringTemplate
+	 *  type as it then causes a dependency with ST lib.
+	 */
+	public Object getTemplate() { return null; }
 }
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java b/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java
new file mode 100644
index 0000000..53522f9
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java
@@ -0,0 +1,170 @@
+package org.antlr.runtime;
+
+import java.io.IOException;
+import java.io.FileInputStream;
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.util.List;
+import java.util.ArrayList;
+
+public class SerializedGrammar {
+    public static final String COOKIE = "$ANTLR";
+    public static final int FORMAT_VERSION = 1;
+    //public static org.antlr.tool.Grammar gr; // TESTING ONLY; remove later
+
+    public String name;
+    public char type; // in {l, p, t, c}
+    public List rules;
+
+    class Rule {
+        String name;
+        Block block;
+        public Rule(String name, Block block) {
+            this.name = name;
+            this.block = block;
+        }
+        public String toString() {
+            return name+":"+block;
+        }
+    }
+
+    class Block {
+        List[] alts;
+        public Block(List[] alts) {
+            this.alts = alts;
+        }
+        public String toString() {
+            StringBuffer buf = new StringBuffer();
+            buf.append("(");
+            for (int i = 0; i < alts.length; i++) {
+                List alt = alts[i];
+                if ( i>0 ) buf.append("|");
+                buf.append(alt.toString());
+            }
+            buf.append(")");
+            return buf.toString();
+        }
+    }
+
+    class TokenRef {
+        int ttype;
+        public TokenRef(int ttype) { this.ttype = ttype; }
+        public String toString() { return String.valueOf(ttype); }
+    }
+
+    class RuleRef {
+        int ruleIndex;
+        public RuleRef(int ruleIndex) { this.ruleIndex = ruleIndex; }
+        public String toString() { return String.valueOf(ruleIndex); }
+    }
+
+    public SerializedGrammar(String filename) throws IOException {
+        System.out.println("loading "+filename);
+        FileInputStream fis = new FileInputStream(filename);
+        BufferedInputStream bos = new BufferedInputStream(fis);
+        DataInputStream in = new DataInputStream(bos);
+        readFile(in);
+        in.close();
+    }
+
+    protected void readFile(DataInputStream in) throws IOException {
+        String cookie = readString(in); // get $ANTLR
+        if ( !cookie.equals(COOKIE) ) throw new IOException("not a serialized grammar file");
+        int version = in.readByte();
+        char grammarType = (char)in.readByte();
+        this.type = grammarType;
+        String grammarName = readString(in);
+        this.name = grammarName;
+        System.out.println(grammarType+" grammar "+grammarName);
+        int numRules = in.readShort();
+        System.out.println("num rules = "+numRules);
+        rules = readRules(in, numRules);
+    }
+
+    protected List readRules(DataInputStream in, int numRules) throws IOException {
+        List rules = new ArrayList();
+        for (int i=0; i<numRules; i++) {
+            Rule r = readRule(in);
+            rules.add(r);
+        }
+        return rules;
+    }
+
+    protected Rule readRule(DataInputStream in) throws IOException {
+        byte R = in.readByte();
+        if ( R!='R' ) throw new IOException("missing R on start of rule");
+        String name = readString(in);
+        System.out.println("rule: "+name);
+        byte B = in.readByte();
+        Block b = readBlock(in);
+        byte period = in.readByte();
+        if ( period!='.' ) throw new IOException("missing . on end of rule");
+        return new Rule(name, b);
+    }
+
+    protected Block readBlock(DataInputStream in) throws IOException {
+        int nalts = in.readShort();
+        List[] alts = new List[nalts];
+        //System.out.println("enter block n="+nalts);
+        for (int i=0; i<nalts; i++) {
+            List alt = readAlt(in);
+            alts[i] = alt;
+        }
+        //System.out.println("exit block");
+        return new Block(alts);
+    }
+
+    protected List readAlt(DataInputStream in) throws IOException {
+        List alt = new ArrayList();
+        byte A = in.readByte();
+        if ( A!='A' ) throw new IOException("missing A on start of alt");
+        byte cmd = in.readByte();
+        while ( cmd!=';' ) {
+            switch (cmd) {
+                case 't' :
+                    int ttype = in.readShort();
+                    alt.add(new TokenRef(ttype));
+                    //System.out.println("read token "+gr.getTokenDisplayName(ttype));
+                    break;
+                case 'r' :
+                    int ruleIndex = in.readShort();
+                    alt.add(new RuleRef(ruleIndex));
+                    //System.out.println("read rule "+gr.getRuleName(ruleIndex));
+                    break;
+                case '.' : // wildcard
+                    break;
+                case '-' : // range
+                    int from = in.readChar();
+                    int to = in.readChar();
+                    break;
+                case '~' : // not
+                    int notThisTokenType = in.readShort();
+                    break;
+                case 'B' : // nested block
+                    Block b = readBlock(in);
+                    alt.add(b);
+                    break;
+            }
+            cmd = in.readByte();
+        }
+        //System.out.println("exit alt");
+        return alt;
+    }
+
+    protected String readString(DataInputStream in) throws IOException {
+        byte c = in.readByte();
+        StringBuffer buf = new StringBuffer();
+        while ( c!=';' ) {
+            buf.append((char)c);
+            c = in.readByte();
+        }
+        return buf.toString();
+    }
+
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        buf.append(type+" grammar "+name);
+        buf.append(rules);
+        return buf.toString();
+    }
+}
diff --git a/runtime/Java/src/org/antlr/runtime/Token.java b/runtime/Java/src/main/java/org/antlr/runtime/Token.java
similarity index 80%
rename from runtime/Java/src/org/antlr/runtime/Token.java
rename to runtime/Java/src/main/java/org/antlr/runtime/Token.java
index d765459..e469e31 100644
--- a/runtime/Java/src/org/antlr/runtime/Token.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/Token.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2007 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -60,25 +60,32 @@ public interface Token {
 	public static final int HIDDEN_CHANNEL = 99;
 
 	/** Get the text of the token */
-	public abstract String getText();
-	public abstract void setText(String text);
+	public String getText();
+	public void setText(String text);
 
-	public abstract int getType();
-	public abstract void setType(int ttype);
+	public int getType();
+	public void setType(int ttype);
 	/**  The line number on which this token was matched; line=1..n */
-	public abstract int getLine();
-    public abstract void setLine(int line);
+	public int getLine();
+    public void setLine(int line);
 
 	/** The index of the first character relative to the beginning of the line 0..n-1 */
-	public abstract int getCharPositionInLine();
-	public abstract void setCharPositionInLine(int pos);
+	public int getCharPositionInLine();
+	public void setCharPositionInLine(int pos);
 
-	public abstract int getChannel();
-	public abstract void setChannel(int channel);
+	public int getChannel();
+	public void setChannel(int channel);
 
 	/** An index from 0..n-1 of the token object in the input stream.
 	 *  This must be valid in order to use the ANTLRWorks debugger.
 	 */
-	public abstract int getTokenIndex();
-	public abstract void setTokenIndex(int index);
+	public int getTokenIndex();
+	public void setTokenIndex(int index);
+
+	/** From what character stream was this token created?  You don't have to
+	 *  implement but it's nice to know where a Token comes from if you have
+	 *  include files etc... on the input.
+	 */
+	public CharStream getInputStream();
+	public void setInputStream(CharStream input);
 }
diff --git a/runtime/Java/src/org/antlr/runtime/TokenRewriteStream.java b/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
similarity index 56%
rename from runtime/Java/src/org/antlr/runtime/TokenRewriteStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
index 55de81c..1e3bc2b 100644
--- a/runtime/Java/src/org/antlr/runtime/TokenRewriteStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -87,8 +87,11 @@ public class TokenRewriteStream extends CommonTokenStream {
 
 	// Define the rewrite operation hierarchy
 
-	static class RewriteOperation {
-		protected int index;
+	class RewriteOperation {
+        /** What index into rewrites List are we? */
+        protected int instructionIndex;
+        /** Token buffer index. */
+        protected int index;
 		protected Object text;
 		protected RewriteOperation(int index, Object text) {
 			this.index = index;
@@ -104,32 +107,25 @@ public class TokenRewriteStream extends CommonTokenStream {
 			String opName = getClass().getName();
 			int $index = opName.indexOf('$');
 			opName = opName.substring($index+1, opName.length());
-			return opName+"@"+index+'"'+text+'"';
+			return "<"+opName+"@"+index+":\""+text+"\">";			
 		}
 	}
 
-	static class InsertBeforeOp extends RewriteOperation {
+	class InsertBeforeOp extends RewriteOperation {
 		public InsertBeforeOp(int index, Object text) {
 			super(index,text);
 		}
 		public int execute(StringBuffer buf) {
 			buf.append(text);
-			return index;
-		}
-	}
-
-	/** TODO: make insertAfters append after each other.
-	static class InsertAfterOp extends InsertBeforeOp {
-		public InsertAfterOp(int index, String text) {
-			super(index,text);
+			buf.append(((Token)tokens.get(index)).getText());			
+			return index+1;
 		}
 	}
-	 */
 
 	/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
 	 *  instructions.
 	 */
-	static class ReplaceOp extends RewriteOperation {
+	class ReplaceOp extends RewriteOperation {
 		protected int lastIndex;
 		public ReplaceOp(int from, int to, Object text) {
 			super(from,text);
@@ -141,12 +137,18 @@ public class TokenRewriteStream extends CommonTokenStream {
 			}
 			return lastIndex+1;
 		}
+		public String toString() {
+			return "<ReplaceOp@"+index+".."+lastIndex+":\""+text+"\">";
+		}
 	}
 
-	static class DeleteOp extends ReplaceOp {
+	class DeleteOp extends ReplaceOp {
 		public DeleteOp(int from, int to) {
 			super(from, to, null);
 		}
+		public String toString() {
+			return "<DeleteOp@"+index+".."+lastIndex+">";
+		}
 	}
 
 	/** You may have multiple, named streams of rewrite operations.
@@ -202,92 +204,6 @@ public class TokenRewriteStream extends CommonTokenStream {
 		rollback(programName, MIN_TOKEN_INDEX);
 	}
 
-	/** If op.index > lastRewriteTokenIndexes, just add to the end.
-	 *  Otherwise, do linear */
-	protected void addToSortedRewriteList(RewriteOperation op) {
-		addToSortedRewriteList(DEFAULT_PROGRAM_NAME, op);
-	}
-
-	/** Add an instruction to the rewrite instruction list ordered by
-	 *  the instruction number (use a binary search for efficiency).
-	 *  The list is ordered so that toString() can be done efficiently.
-	 *
-	 *  When there are multiple instructions at the same index, the instructions
-	 *  must be ordered to ensure proper behavior.  For example, a delete at
-	 *  index i must kill any replace operation at i.  Insert-before operations
-	 *  must come before any replace / delete instructions.  If there are
-	 *  multiple insert instructions for a single index, they are done in
-	 *  reverse insertion order so that "insert foo" then "insert bar" yields
-	 *  "foobar" in front rather than "barfoo".  This is convenient because
-	 *  I can insert new InsertOp instructions at the index returned by
-	 *  the binary search.  A ReplaceOp kills any previous replace op.  Since
-	 *  delete is the same as replace with null text, i can check for
-	 *  ReplaceOp and cover DeleteOp at same time. :)
-	 */
-	protected void addToSortedRewriteList(String programName, RewriteOperation op) {
-		List rewrites = getProgram(programName);
-		//System.out.println("### add "+op+"; rewrites="+rewrites);
-		Comparator comparator = new Comparator() {
-			public int compare(Object o, Object o1) {
-				RewriteOperation a = (RewriteOperation)o;
-				RewriteOperation b = (RewriteOperation)o1;
-				if ( a.index<b.index ) return -1;
-				if ( a.index>b.index ) return 1;
-				return 0;
-			}
-		};
-        int pos = Collections.binarySearch(rewrites, op, comparator);
-		//System.out.println("bin search returns: pos="+pos);
-
-		if ( pos>=0 ) {
-			// binarySearch does not guarantee first element when multiple
-			// are found.  I must seach backwards for first op with op.index
-			for (; pos>=0; pos--) {
-				RewriteOperation prevOp = (RewriteOperation)rewrites.get(pos);
-				if ( prevOp.index<op.index ) {
-					break;
-				}
-			}
-			pos++; // pos points at first op before ops with op.index; go back up one
-			// now pos is the index in rewrites of first op with op.index
-			//System.out.println("first op with op.index: pos="+pos);
-
-			// an instruction operating already on that index was found;
-			// make this one happen after all the others
-			//System.out.println("found instr for index="+op.index);
-			if ( op instanceof ReplaceOp ) {
-				boolean replaced = false;
-				int i;
-				// look for an existing replace
-				for (i=pos; i<rewrites.size(); i++) {
-					RewriteOperation prevOp = (RewriteOperation)rewrites.get(pos);
-					if ( prevOp.index!=op.index ) {
-						break;
-					}
-					if ( prevOp instanceof ReplaceOp ) {
-						rewrites.set(pos, op); // replace old with new
-						replaced=true;
-						break;
-					}
-					// keep going; must be an insert
-				}
-				if ( !replaced ) {
-					// add replace op to the end of all the inserts
-					rewrites.add(i, op);
-				}
-			}
-			else {
-				// inserts are added in front of existing inserts
-				rewrites.add(pos, op);
-			}
-		}
-		else {
-			//System.out.println("no instruction at pos=="+pos);
-			rewrites.add(-pos-1, op);
-		}
-		//System.out.println("after, rewrites="+rewrites);
-	}
-
 	public void insertAfter(Token t, Object text) {
 		insertAfter(DEFAULT_PROGRAM_NAME, t, text);
 	}
@@ -319,7 +235,11 @@ public class TokenRewriteStream extends CommonTokenStream {
 	}
 
 	public void insertBefore(String programName, int index, Object text) {
-		addToSortedRewriteList(programName, new InsertBeforeOp(index,text));
+		//addToSortedRewriteList(programName, new InsertBeforeOp(index,text));
+		RewriteOperation op = new InsertBeforeOp(index,text);
+		List rewrites = getProgram(programName);
+        op.instructionIndex = rewrites.size();
+        rewrites.add(op);		
 	}
 
 	public void replace(int index, Object text) {
@@ -339,18 +259,13 @@ public class TokenRewriteStream extends CommonTokenStream {
 	}
 
 	public void replace(String programName, int from, int to, Object text) {
-		if ( from > to || from<0 || to<0 ) {
-			return;
-		}
-		addToSortedRewriteList(programName, new ReplaceOp(from, to, text));
-		/*
-		// replace from..to by deleting from..to-1 and then do a replace
-		// on last index
-		for (int i=from; i<to; i++) {
-			addToSortedRewriteList(new DeleteOp(i,i));
+		if ( from > to || from<0 || to<0 || to >= tokens.size() ) {
+			throw new IllegalArgumentException("replace: range invalid: "+from+".."+to+"(size="+tokens.size()+")");
 		}
-		addToSortedRewriteList(new ReplaceOp(to, to, text));
-		*/
+		RewriteOperation op = new ReplaceOp(from, to, text);
+		List rewrites = getProgram(programName);
+        op.instructionIndex = rewrites.size();
+        rewrites.add(op);
 	}
 
 	public void replace(String programName, Token from, Token to, Object text) {
@@ -440,62 +355,201 @@ public class TokenRewriteStream extends CommonTokenStream {
 
 	public String toString(String programName, int start, int end) {
 		List rewrites = (List)programs.get(programName);
-		if ( rewrites==null || rewrites.size()==0 ) {
+
+        // ensure start/end are in range
+        if ( end>tokens.size()-1 ) end = tokens.size()-1;
+        if ( start<0 ) start = 0;
+
+        if ( rewrites==null || rewrites.size()==0 ) {
 			return toOriginalString(start,end); // no instructions to execute
 		}
 		StringBuffer buf = new StringBuffer();
 
-		/// Index of first rewrite we have not done
-		int rewriteOpIndex = 0;
-
-		int tokenCursor=start;
-		while ( tokenCursor>=MIN_TOKEN_INDEX &&
-				tokenCursor<=end &&
-				tokenCursor<tokens.size() )
-		{
-			//System.out.println("tokenCursor="+tokenCursor);
-			// execute instructions associated with this token index
-			if ( rewriteOpIndex<rewrites.size() ) {
-				RewriteOperation op =
-						(RewriteOperation)rewrites.get(rewriteOpIndex);
-
-				// skip all ops at lower index
-				while ( op.index<tokenCursor && rewriteOpIndex<rewrites.size() ) {
-					rewriteOpIndex++;
-					if ( rewriteOpIndex<rewrites.size() ) {
-						op = (RewriteOperation)rewrites.get(rewriteOpIndex);
-					}
+		// First, optimize instruction stream
+		Map indexToOp = reduceToSingleOperationPerIndex(rewrites);
+
+        // Walk buffer, executing instructions and emitting tokens
+        int i = start;
+        while ( i <= end && i < tokens.size() ) {
+			RewriteOperation op = (RewriteOperation)indexToOp.get(new Integer(i));
+			indexToOp.remove(new Integer(i)); // remove so any left have index size-1
+			Token t = (Token) tokens.get(i);
+			if ( op==null ) {
+				// no operation at that index, just dump token
+				buf.append(t.getText());
+				i++; // move to next token
+			}
+			else {
+				i = op.execute(buf); // execute operation and skip
+			}
+		}
+
+        // include stuff after end if it's last index in buffer
+        // So, if they did an insertAfter(lastValidIndex, "foo"), include
+        // foo if end==lastValidIndex.
+        if ( end==tokens.size()-1 ) {
+            // Scan any remaining operations after last token
+            // should be included (they will be inserts).
+            Iterator it = indexToOp.values().iterator();
+            while (it.hasNext()) {
+                RewriteOperation op = (RewriteOperation)it.next();
+                if ( op.index >= tokens.size()-1 ) buf.append(op.text);
+            }
+        }
+        return buf.toString();
+	}
+
+	/** We need to combine operations and report invalid operations (like
+	 *  overlapping replaces that are not completed nested).  Inserts to
+	 *  same index need to be combined etc...   Here are the cases:
+	 *
+	 *  I.i.u I.j.v								leave alone, nonoverlapping
+	 *  I.i.u I.i.v								combine: Iivu
+	 *
+	 *  R.i-j.u R.x-y.v	| i-j in x-y			delete first R
+	 *  R.i-j.u R.i-j.v							delete first R
+	 *  R.i-j.u R.x-y.v	| x-y in i-j			ERROR
+	 *  R.i-j.u R.x-y.v	| boundaries overlap	ERROR
+	 *
+	 *  I.i.u R.x-y.v | i in x-y				delete I
+	 *  I.i.u R.x-y.v | i not in x-y			leave alone, nonoverlapping
+	 *  R.x-y.v I.i.u | i in x-y				ERROR
+	 *  R.x-y.v I.x.u 							R.x-y.uv (combine, delete I)
+	 *  R.x-y.v I.i.u | i not in x-y			leave alone, nonoverlapping
+	 *
+	 *  I.i.u = insert u before op @ index i
+	 *  R.x-y.u = replace x-y indexed tokens with u
+	 *
+	 *  First we need to examine replaces.  For any replace op:
+	 *
+	 * 		1. wipe out any insertions before op within that range.
+	 *		2. Drop any replace op before that is contained completely within
+	 *         that range.
+	 *		3. Throw exception upon boundary overlap with any previous replace.
+	 *
+	 *  Then we can deal with inserts:
+	 *
+	 * 		1. for any inserts to same index, combine even if not adjacent.
+	 * 		2. for any prior replace with same left boundary, combine this
+	 *         insert with replace and delete this replace.
+	 * 		3. throw exception if index in same range as previous replace
+	 *
+	 *  Don't actually delete; make op null in list. Easier to walk list.
+	 *  Later we can throw as we add to index -> op map.
+	 *
+	 *  Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+	 *  inserted stuff would be before the replace range.  But, if you
+	 *  add tokens in front of a method body '{' and then delete the method
+	 *  body, I think the stuff before the '{' you added should disappear too.
+	 *
+	 *  Return a map from token index to operation.
+	 */
+	protected Map reduceToSingleOperationPerIndex(List rewrites) {
+		//System.out.println("rewrites="+rewrites);
+
+		// WALK REPLACES
+		for (int i = 0; i < rewrites.size(); i++) {
+			RewriteOperation op = (RewriteOperation)rewrites.get(i);
+			if ( op==null ) continue;
+			if ( !(op instanceof ReplaceOp) ) continue;
+			ReplaceOp rop = (ReplaceOp)rewrites.get(i);
+			// Wipe prior inserts within range
+			List inserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
+			for (int j = 0; j < inserts.size(); j++) {
+				InsertBeforeOp iop = (InsertBeforeOp) inserts.get(j);
+				if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
+                    // delete insert as it's a no-op.
+                    rewrites.set(iop.instructionIndex, null);
+				}
+			}
+			// Drop any prior replaces contained within
+			List prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
+			for (int j = 0; j < prevReplaces.size(); j++) {
+				ReplaceOp prevRop = (ReplaceOp) prevReplaces.get(j);
+				if ( prevRop.index>=rop.index && prevRop.lastIndex <= rop.lastIndex ) {
+                    // delete replace as it's a no-op.
+                    rewrites.set(prevRop.instructionIndex, null);
+					continue;
 				}
+				// throw exception unless disjoint or identical
+				boolean disjoint =
+					prevRop.lastIndex<rop.index || prevRop.index > rop.lastIndex;
+				boolean same =
+					prevRop.index==rop.index && prevRop.lastIndex==rop.lastIndex;
+				if ( !disjoint && !same ) {
+					throw new IllegalArgumentException("replace op boundaries of "+rop+
+													   " overlap with previous "+prevRop);
+				}
+			}
+		}
 
-				// while we have ops for this token index, exec them
-				while ( tokenCursor==op.index && rewriteOpIndex<rewrites.size() ) {
-					//System.out.println("execute "+op+" at instruction "+rewriteOpIndex);
-					tokenCursor = op.execute(buf);
-					//System.out.println("after execute tokenCursor = "+tokenCursor);
-					rewriteOpIndex++;
-					if ( rewriteOpIndex<rewrites.size() ) {
-						op = (RewriteOperation)rewrites.get(rewriteOpIndex);
-					}
+		// WALK INSERTS
+		for (int i = 0; i < rewrites.size(); i++) {
+			RewriteOperation op = (RewriteOperation)rewrites.get(i);
+			if ( op==null ) continue;
+			if ( !(op instanceof InsertBeforeOp) ) continue;
+			InsertBeforeOp iop = (InsertBeforeOp)rewrites.get(i);
+			// combine current insert with prior if any at same index
+			List prevInserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
+			for (int j = 0; j < prevInserts.size(); j++) {
+				InsertBeforeOp prevIop = (InsertBeforeOp) prevInserts.get(j);
+				if ( prevIop.index == iop.index ) { // combine objects
+					// convert to strings...we're in process of toString'ing
+					// whole token buffer so no lazy eval issue with any templates
+					iop.text = catOpText(iop.text,prevIop.text);
+                    // delete redundant prior insert
+                    rewrites.set(prevIop.instructionIndex, null);
 				}
 			}
-			// dump the token at this index
-			if ( tokenCursor<=end ) {
-				buf.append(get(tokenCursor).getText());
-				tokenCursor++;
+			// look for replaces where iop.index is in range; error
+			List prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
+			for (int j = 0; j < prevReplaces.size(); j++) {
+				ReplaceOp rop = (ReplaceOp) prevReplaces.get(j);
+				if ( iop.index == rop.index ) {
+					rop.text = catOpText(iop.text,rop.text);
+					rewrites.set(i, null);  // delete current insert
+					continue;
+				}
+				if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
+					throw new IllegalArgumentException("insert op "+iop+
+													   " within boundaries of previous "+rop);
+				}
 			}
 		}
-		// now see if there are operations (append) beyond last token index
-		for (int opi=rewriteOpIndex; opi<rewrites.size(); opi++) {
-			RewriteOperation op =
-					(RewriteOperation)rewrites.get(opi);
-			if ( op.index>=size() ) {
-				op.execute(buf); // must be insertions if after last token
+		// System.out.println("rewrites after="+rewrites);
+		Map m = new HashMap();
+		for (int i = 0; i < rewrites.size(); i++) {
+			RewriteOperation op = (RewriteOperation)rewrites.get(i);
+			if ( op==null ) continue; // ignore deleted ops
+			if ( m.get(new Integer(op.index))!=null ) {
+				throw new Error("should only be one op per index");
 			}
-			//System.out.println("execute "+op+" at "+opi);
-			//op.execute(buf); // must be insertions if after last token
+			m.put(new Integer(op.index), op);
 		}
-
-		return buf.toString();
+		//System.out.println("index to op: "+m);
+		return m;
+	}
+
+	protected String catOpText(Object a, Object b) {
+		String x = "";
+		String y = "";
+		if ( a!=null ) x = a.toString();
+		if ( b!=null ) y = b.toString();
+		return x+y;
+	}
+	protected List getKindOfOps(List rewrites, Class kind) {
+		return getKindOfOps(rewrites, kind, rewrites.size());
+	}
+
+    /** Get all operations before an index of a particular kind */
+    protected List getKindOfOps(List rewrites, Class kind, int before) {
+		List ops = new ArrayList();
+		for (int i=0; i<before && i<rewrites.size(); i++) {
+			RewriteOperation op = (RewriteOperation)rewrites.get(i);
+			if ( op==null ) continue; // ignore deleted
+			if ( op.getClass() == kind ) ops.add(op);
+		}		
+		return ops;
 	}
 
 	public String toDebugString() {
diff --git a/runtime/Java/src/org/antlr/runtime/TokenSource.java b/runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java
similarity index 93%
rename from runtime/Java/src/org/antlr/runtime/TokenSource.java
rename to runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java
index 225c594..71f3ab8 100644
--- a/runtime/Java/src/org/antlr/runtime/TokenSource.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -46,4 +46,9 @@ public interface TokenSource {
 	 *  until you get a good one; errors are not passed through to the parser.
 	 */
 	public Token nextToken();
+
+	/** Where are you getting tokens from? normally the implication will simply
+	 *  ask lexers input stream.
+	 */
+	public String getSourceName();
 }
diff --git a/runtime/Java/src/org/antlr/runtime/TokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/TokenStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
index b7c5903..9cfac81 100644
--- a/runtime/Java/src/org/antlr/runtime/TokenStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java b/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
similarity index 73%
rename from runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
index 373b123..44e0b1f 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedNotSetException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,15 +27,27 @@
 */
 package org.antlr.runtime;
 
-public class MismatchedNotSetException extends MismatchedSetException {
+/** An extra token while parsing a TokenStream */
+public class UnwantedTokenException extends MismatchedTokenException {
 	/** Used for remote debugger deserialization */
-	public MismatchedNotSetException() {;}
+	public UnwantedTokenException() {;}
 
-	public MismatchedNotSetException(BitSet expecting, IntStream input) {
+	public UnwantedTokenException(int expecting, IntStream input) {
 		super(expecting, input);
 	}
 
+	public Token getUnexpectedToken() {
+		return token;
+	}
+
 	public String toString() {
-		return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")";
+		String exp = ", expected "+expecting;
+		if ( expecting==Token.INVALID_TOKEN_TYPE ) {
+			exp = "";
+		}
+		if ( token==null ) {
+			return "UnwantedTokenException(found="+null+exp+")";
+		}
+		return "UnwantedTokenException(found="+token.getText()+exp+")";
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/debug/BlankDebugEventListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
old mode 100755
new mode 100644
similarity index 93%
rename from runtime/Java/src/org/antlr/runtime/debug/BlankDebugEventListener.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
index 05b6d28..073c8a3
--- a/runtime/Java/src/org/antlr/runtime/debug/BlankDebugEventListener.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -35,8 +35,8 @@ import org.antlr.runtime.Token;
  *  sensitive to updates to debug interface.
  */
 public class BlankDebugEventListener implements DebugEventListener {
-	public void enterRule(String ruleName) {}
-	public void exitRule(String ruleName) {}
+	public void enterRule(String grammarFileName, String ruleName) {}
+	public void exitRule(String grammarFileName, String ruleName) {}
 	public void enterAlt(int alt) {}
 	public void enterSubRule(int decisionNumber) {}
 	public void exitSubRule(int decisionNumber) {}
@@ -66,7 +66,7 @@ public class BlankDebugEventListener implements DebugEventListener {
 	// AST Stuff
 
 	public void nilNode(Object t) {}
-	//public void setSubTreeRoot(String name, int ID) {}
+	public void errorNode(Object t) {}
 	public void createNode(Object t) {}
 	public void createNode(Object node, Token token) {}
 	public void becomeRoot(Object newRoot, Object oldRoot) {}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventHub.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
similarity index 79%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugEventHub.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
index 90ee112..3b9b1e9 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugEventHub.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.debug;
 
 import org.antlr.runtime.Token;
@@ -28,7 +55,7 @@ public class DebugEventHub implements DebugEventListener {
 	 *  Don't add events in one thread while parser fires events in another.
 	 */
 	public void addListener(DebugEventListener listener) {
-		listeners.add(listeners);
+		listeners.add(listener);
 	}
 	
 	/* To avoid a mess like this:
@@ -40,17 +67,17 @@ public class DebugEventHub implements DebugEventListener {
 		I am dup'ing the for-loop in each.  Where are Java closures!? blech!
 	 */
 
-	public void enterRule(String ruleName) {
+	public void enterRule(String grammarFileName, String ruleName) {
 		for (int i = 0; i < listeners.size(); i++) {
 			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.enterRule(ruleName);
+			listener.enterRule(grammarFileName,ruleName);
 		}
 	}
 
-	public void exitRule(String ruleName) {
+	public void exitRule(String grammarFileName, String ruleName) {
 		for (int i = 0; i < listeners.size(); i++) {
 			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.exitRule(ruleName);
+			listener.exitRule(grammarFileName, ruleName);
 		}
 	}
 
@@ -221,6 +248,13 @@ public class DebugEventHub implements DebugEventListener {
 		}
 	}
 
+	public void errorNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = (DebugEventListener)listeners.get(i);
+			listener.errorNode(t);
+		}
+	}
+
 	public void createNode(Object t) {
 		for (int i = 0; i < listeners.size(); i++) {
 			DebugEventListener listener = (DebugEventListener)listeners.get(i);
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
similarity index 94%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugEventListener.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
index 0cd76d8..56f605a 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugEventListener.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -40,7 +40,8 @@ import org.antlr.runtime.Token;
  *  for future. 4/26/2006.
  */
 public interface DebugEventListener {
-	public static final String PROTOCOL_VERSION = "1";
+	/** Moved to version 2 for v3.1: added grammar name to enter/exit Rule */
+	public static final String PROTOCOL_VERSION = "2";
 	
 	/** serialized version of true */
 	public static final int TRUE = 1;
@@ -49,8 +50,10 @@ public interface DebugEventListener {
 	/** The parser has just entered a rule.  No decision has been made about
 	 *  which alt is predicted.  This is fired AFTER init actions have been
 	 *  executed.  Attributes are defined and available etc...
+	 *  The grammarFileName allows composite grammars to jump around among
+	 *  multiple grammar files.
 	 */
-	public void enterRule(String ruleName);
+	public void enterRule(String grammarFileName, String ruleName);
 
 	/** Because rules can have lots of alternatives, it is very useful to
 	 *  know which alt you are entering.  This is 1..n for n alts.
@@ -61,8 +64,10 @@ public interface DebugEventListener {
 	 *  executed even if an exception is thrown.  This is triggered after
 	 *  error reporting and recovery have occurred (unless the exception is
 	 *  not caught in this rule).  This implies an "exitAlt" event.
+	 *  The grammarFileName allows composite grammars to jump around among
+	 *  multiple grammar files.
 	 */
-	public void exitRule(String ruleName);
+	public void exitRule(String grammarFileName, String ruleName);
 
 	/** Track entry into any (...) subrule other EBNF construct */
 	public void enterSubRule(int decisionNumber);
@@ -260,6 +265,12 @@ public interface DebugEventListener {
 	 */
 	public void nilNode(Object t);
 
+	/** Upon syntax error, recognizers bracket the error with an error node
+	 *  if they are building ASTs.
+	 * @param t
+	 */
+	public void errorNode(Object t);
+
 	/** Announce a new node built from token elements such as type etc...
 	 * 
 	 *  If you are receiving this event over a socket via
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventRepeater.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
similarity index 61%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugEventRepeater.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
index fd74691..4a97685 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugEventRepeater.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.debug;
 
 import org.antlr.runtime.Token;
@@ -10,7 +37,7 @@ import org.antlr.runtime.RecognitionException;
  *  the method in this class so the event will continue on to the original
  *  recipient.
  *
- *  @see also DebugEventHub
+ *  @see DebugEventHub
  */
 public class DebugEventRepeater implements DebugEventListener {
 	protected DebugEventListener listener;
@@ -19,8 +46,8 @@ public class DebugEventRepeater implements DebugEventListener {
 		this.listener = listener;
 	}
 	
-	public void enterRule(String ruleName) { listener.enterRule(ruleName); }
-	public void exitRule(String ruleName) { listener.exitRule(ruleName); }
+	public void enterRule(String grammarFileName, String ruleName) { listener.enterRule(grammarFileName, ruleName); }
+	public void exitRule(String grammarFileName, String ruleName) { listener.exitRule(grammarFileName, ruleName); }
 	public void enterAlt(int alt) { listener.enterAlt(alt); }
 	public void enterSubRule(int decisionNumber) { listener.enterSubRule(decisionNumber); }
 	public void exitSubRule(int decisionNumber) { listener.exitSubRule(decisionNumber); }
@@ -50,6 +77,7 @@ public class DebugEventRepeater implements DebugEventListener {
 	// AST Stuff
 
 	public void nilNode(Object t) { listener.nilNode(t); }
+	public void errorNode(Object t) { listener.errorNode(t); }
 	public void createNode(Object t) { listener.createNode(t); }
 	public void createNode(Object node, Token token) { listener.createNode(node, token); }
 	public void becomeRoot(Object newRoot, Object oldRoot) { listener.becomeRoot(newRoot, oldRoot); }
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugEventSocketProxy.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
similarity index 77%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugEventSocketProxy.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
index 90fc868..6787594 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugEventSocketProxy.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@ import java.net.Socket;
  *  be kept in sync.  New events must be handled on both sides of socket.
  */
 public class DebugEventSocketProxy extends BlankDebugEventListener {
-	public static final int DEFAULT_DEBUGGER_PORT = 0xC001;
+	public static final int DEFAULT_DEBUGGER_PORT = 49100; // was 49153
 	protected int port = DEFAULT_DEBUGGER_PORT;
 	protected ServerSocket serverSocket;
 	protected Socket socket;
@@ -84,6 +84,7 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 			out.println("ANTLR "+ DebugEventListener.PROTOCOL_VERSION);
 			out.println("grammar \""+ grammarFileName);
 			out.flush();
+			ack();			
 		}
 	}
 
@@ -109,7 +110,6 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 		catch (IOException ioe) {
 			ioe.printStackTrace(System.err);
 		}
-
 	}
 
 	protected void transmit(String event) {
@@ -118,55 +118,55 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 		ack();
 	}
 
-	public void enterRule(String ruleName) {
-		transmit("enterRule "+ruleName);
+	public void enterRule(String grammarFileName, String ruleName) {
+		transmit("enterRule\t"+grammarFileName+"\t"+ruleName);
 	}
 
 	public void enterAlt(int alt) {
-		transmit("enterAlt "+alt);
+		transmit("enterAlt\t"+alt);
 	}
 
-	public void exitRule(String ruleName) {
-		transmit("exitRule "+ruleName);
+	public void exitRule(String grammarFileName, String ruleName) {
+		transmit("exitRule\t"+grammarFileName+"\t"+ruleName);
 	}
 
 	public void enterSubRule(int decisionNumber) {
-		transmit("enterSubRule "+decisionNumber);
+		transmit("enterSubRule\t"+decisionNumber);
 	}
 
 	public void exitSubRule(int decisionNumber) {
-		transmit("exitSubRule "+decisionNumber);
+		transmit("exitSubRule\t"+decisionNumber);
 	}
 
 	public void enterDecision(int decisionNumber) {
-		transmit("enterDecision "+decisionNumber);
+		transmit("enterDecision\t"+decisionNumber);
 	}
 
 	public void exitDecision(int decisionNumber) {
-		transmit("exitDecision "+decisionNumber);
+		transmit("exitDecision\t"+decisionNumber);
 	}
 
 	public void consumeToken(Token t) {
 		String buf = serializeToken(t);
-		transmit("consumeToken "+buf);
+		transmit("consumeToken\t"+buf);
 	}
 
 	public void consumeHiddenToken(Token t) {
 		String buf = serializeToken(t);
-		transmit("consumeHiddenToken "+buf);
+		transmit("consumeHiddenToken\t"+buf);
 	}
 
 	public void LT(int i, Token t) {
         if(t != null)
-            transmit("LT "+i+" "+serializeToken(t));
+            transmit("LT\t"+i+"\t"+serializeToken(t));
 	}
 
 	public void mark(int i) {
-		transmit("mark "+i);
+		transmit("mark\t"+i);
 	}
 
 	public void rewind(int i) {
-		transmit("rewind "+i);
+		transmit("rewind\t"+i);
 	}
 
 	public void rewind() {
@@ -174,27 +174,27 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 	}
 
 	public void beginBacktrack(int level) {
-		transmit("beginBacktrack "+level);
+		transmit("beginBacktrack\t"+level);
 	}
 
 	public void endBacktrack(int level, boolean successful) {
-		transmit("endBacktrack "+level+" "+(successful?TRUE:FALSE));
+		transmit("endBacktrack\t"+level+"\t"+(successful?TRUE:FALSE));
 	}
 
 	public void location(int line, int pos) {
-		transmit("location "+line+" "+pos);
+		transmit("location\t"+line+"\t"+pos);
 	}
 
 	public void recognitionException(RecognitionException e) {
 		StringBuffer buf = new StringBuffer(50);
-		buf.append("exception ");
+		buf.append("exception\t");
 		buf.append(e.getClass().getName());
 		// dump only the data common to all exceptions for now
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(e.index);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(e.line);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(e.charPositionInLine);
 		transmit(buf.toString());
 	}
@@ -209,7 +209,7 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 
 	public void semanticPredicate(boolean result, String predicate) {
 		StringBuffer buf = new StringBuffer(50);
-		buf.append("semanticPredicate ");
+		buf.append("semanticPredicate\t");
 		buf.append(result);
 		serializeText(buf, predicate);
 		transmit(buf.toString());
@@ -229,7 +229,7 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 		String text = adaptor.getText(t);
 		int type = adaptor.getType(t);
 		StringBuffer buf = new StringBuffer(50);
-		buf.append("LN "); // lookahead node; distinguish from LT in protocol
+		buf.append("LN\t"); // lookahead node; distinguish from LT in protocol
 		buf.append(i);
 		serializeNode(buf, t);
 		transmit(buf.toString());
@@ -239,9 +239,9 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 		int ID = adaptor.getUniqueID(t);
 		String text = adaptor.getText(t);
 		int type = adaptor.getType(t);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(ID);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(type);
 		Token token = adaptor.getToken(t);
 		int line = -1;
@@ -250,12 +250,12 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 			line = token.getLine();
 			pos = token.getCharPositionInLine();
 		}
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(line);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(pos);
 		int tokenIndex = adaptor.getTokenStartIndex(t);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(tokenIndex);
 		serializeText(buf, text);
 	}
@@ -265,7 +265,19 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 
 	public void nilNode(Object t) {
 		int ID = adaptor.getUniqueID(t);
-		transmit("nilNode "+ID);
+		transmit("nilNode\t"+ID);
+	}
+
+	public void errorNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = t.toString();
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("errorNode\t");
+		buf.append(ID);
+		buf.append("\t");
+		buf.append(Token.INVALID_TOKEN_TYPE);
+		serializeText(buf, text);
+		transmit(buf.toString());
 	}
 
 	public void createNode(Object t) {
@@ -273,9 +285,9 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 		String text = adaptor.getText(t);
 		int type = adaptor.getType(t);
 		StringBuffer buf = new StringBuffer(50);
-		buf.append("createNodeFromTokenElements ");
+		buf.append("createNodeFromTokenElements\t");
 		buf.append(ID);
-		buf.append(" ");
+		buf.append("\t");
 		buf.append(type);
 		serializeText(buf, text);
 		transmit(buf.toString());
@@ -284,41 +296,45 @@ public class DebugEventSocketProxy extends BlankDebugEventListener {
 	public void createNode(Object node, Token token) {
 		int ID = adaptor.getUniqueID(node);
 		int tokenIndex = token.getTokenIndex();
-		transmit("createNode "+ID+" "+tokenIndex);
+		transmit("createNode\t"+ID+"\t"+tokenIndex);
 	}
 
 	public void becomeRoot(Object newRoot, Object oldRoot) {
 		int newRootID = adaptor.getUniqueID(newRoot);
 		int oldRootID = adaptor.getUniqueID(oldRoot);
-		transmit("becomeRoot "+newRootID+" "+oldRootID);
+		transmit("becomeRoot\t"+newRootID+"\t"+oldRootID);
 	}
 
 	public void addChild(Object root, Object child) {
 		int rootID = adaptor.getUniqueID(root);
 		int childID = adaptor.getUniqueID(child);
-		transmit("addChild "+rootID+" "+childID);
+		transmit("addChild\t"+rootID+"\t"+childID);
 	}
 
 	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
 		int ID = adaptor.getUniqueID(t);
-		transmit("setTokenBoundaries "+ID+" "+tokenStartIndex+" "+tokenStopIndex);
+		transmit("setTokenBoundaries\t"+ID+"\t"+tokenStartIndex+"\t"+tokenStopIndex);
 	}
 
-	// support
 
-	protected String serializeToken(Token t) {
-		StringBuffer buf = new StringBuffer(50);
-		buf.append(t.getTokenIndex()); buf.append(' ');
-		buf.append(t.getType()); buf.append(' ');
-		buf.append(t.getChannel()); buf.append(' ');
-		buf.append(t.getLine()); buf.append(' ');
+    // support
+
+    public void setTreeAdaptor(TreeAdaptor adaptor) { this.adaptor = adaptor; }
+    public TreeAdaptor getTreeAdaptor() { return adaptor; }
+
+    protected String serializeToken(Token t) {
+        StringBuffer buf = new StringBuffer(50);
+        buf.append(t.getTokenIndex()); buf.append('\t');
+        buf.append(t.getType()); buf.append('\t');
+        buf.append(t.getChannel()); buf.append('\t');
+        buf.append(t.getLine()); buf.append('\t');
 		buf.append(t.getCharPositionInLine());
 		serializeText(buf, t.getText());
 		return buf.toString();
 	}
 
 	protected void serializeText(StringBuffer buf, String text) {
-		buf.append(" \"");
+		buf.append("\t\"");
 		if ( text==null ) {
 			text = "";
 		}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugParser.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
similarity index 76%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugParser.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
index 9fff7b3..5e827b9 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugParser.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -43,17 +43,17 @@ public class DebugParser extends Parser {
 	/** Create a normal parser except wrap the token stream in a debug
 	 *  proxy that fires consume events.
 	 */
-	public DebugParser(TokenStream input, DebugEventListener dbg) {
-		super(new DebugTokenStream(input,dbg));
+	public DebugParser(TokenStream input, DebugEventListener dbg, RecognizerSharedState state) {
+		super(input instanceof DebugTokenStream?input:new DebugTokenStream(input,dbg), state);
 		setDebugListener(dbg);
 	}
 
-	public DebugParser(TokenStream input) {
-		this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT);
+	public DebugParser(TokenStream input, RecognizerSharedState state) {
+		super(input instanceof DebugTokenStream?input:new DebugTokenStream(input,null), state);
 	}
 
-	public DebugParser(TokenStream input, int port) {
-		super(new DebugTokenStream(input,null));
+	public DebugParser(TokenStream input, DebugEventListener dbg) {
+		this(input instanceof DebugTokenStream?input:new DebugTokenStream(input,dbg), dbg, null);
 	}
 
 	/** Provide a new debug event listener for this parser.  Notify the
@@ -91,23 +91,7 @@ public class DebugParser extends Parser {
 		dbg.endBacktrack(level,successful);		
 	}
 
-	public void recoverFromMismatchedToken(IntStream input,
-										   RecognitionException mte,
-										   int ttype,
-										   BitSet follow)
-		throws RecognitionException
-	{
-		System.err.println("recoverFromMismatchedToken");
-		dbg.recognitionException(mte);
-		super.recoverFromMismatchedToken(input,mte,ttype,follow);
-	}
-
-	public void recoverFromMismatchedSet(IntStream input,
-										 RecognitionException mte,
-										 BitSet follow)
-		throws RecognitionException
-	{
-		dbg.recognitionException(mte);
-		super.recoverFromMismatchedSet(input,mte,follow);
+	public void reportError(RecognitionException e) {
+		dbg.recognitionException(e);
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
similarity index 97%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugTokenStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
index adee23a..fbab814 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugTokenStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -132,6 +132,10 @@ public class DebugTokenStream implements TokenStream {
 		return input.getTokenSource();
 	}
 
+	public String getSourceName() {
+		return getTokenSource().getSourceName();
+	}
+
 	public String toString() {
 		return input.toString();
 	}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
similarity index 53%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugTreeAdaptor.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
index 644c2e3..fefd899 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeAdaptor.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
@@ -1,6 +1,35 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.debug;
 
 import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.RecognitionException;
 import org.antlr.runtime.tree.TreeAdaptor;
 
 /** A TreeAdaptor proxy that fires debugging events to a DebugEventListener
@@ -25,19 +54,49 @@ public class DebugTreeAdaptor implements TreeAdaptor {
 	}
 
 	public Object create(Token payload) {
+		if ( payload.getTokenIndex() < 0 ) {
+			// could be token conjured up during error recovery
+			return create(payload.getType(), payload.getText());
+		}
 		Object node = adaptor.create(payload);
 		dbg.createNode(node, payload);
 		return node;
 	}
 
+	public Object errorNode(TokenStream input, Token start, Token stop,
+							RecognitionException e)
+	{
+		Object node = adaptor.errorNode(input, start, stop, e);
+		if ( node!=null ) {
+			dbg.errorNode(node);
+		}
+		return node;
+	}
+
 	public Object dupTree(Object tree) {
-		// TODO: do these need to be sent to dbg?
-		return adaptor.dupTree(tree);
+		Object t = adaptor.dupTree(tree);
+		// walk the tree and emit create and add child events
+		// to simulate what dupTree has done. dupTree does not call this debug
+		// adapter so I must simulate.
+		simulateTreeConstruction(t);
+		return t;
+	}
+
+	/** ^(A B C): emit create A, create B, add child, ...*/
+	protected void simulateTreeConstruction(Object t) {
+		dbg.createNode(t);
+		int n = adaptor.getChildCount(t);
+		for (int i=0; i<n; i++) {
+			Object child = adaptor.getChild(t, i);
+			simulateTreeConstruction(child);
+			dbg.addChild(t, child);
+		}
 	}
 
 	public Object dupNode(Object treeNode) {
-		// TODO: do these need to be sent to dbg?
-		return adaptor.dupNode(treeNode);
+		Object d = adaptor.dupNode(treeNode);
+		dbg.createNode(d);
+		return d;
 	}
 
 	public Object nil() {
@@ -139,6 +198,14 @@ public class DebugTreeAdaptor implements TreeAdaptor {
 		return adaptor.getChild(t, i);
 	}
 
+	public void setChild(Object t, int i, Object child) {
+		adaptor.setChild(t, i, child);
+	}
+
+	public Object deleteChild(Object t, int i) {
+		return deleteChild(t, i);
+	}
+
 	public int getChildCount(Object t) {
 		return adaptor.getChildCount(t);
 	}
@@ -147,14 +214,33 @@ public class DebugTreeAdaptor implements TreeAdaptor {
 		return adaptor.getUniqueID(node);
 	}
 
-	
+	public Object getParent(Object t) {
+		return adaptor.getParent(t);
+	}
+
+	public int getChildIndex(Object t) {
+		return adaptor.getChildIndex(t);
+	}
+
+	public void setParent(Object t, Object parent) {
+		adaptor.setParent(t, parent);
+	}
+
+	public void setChildIndex(Object t, int index) {
+		adaptor.setChildIndex(t, index);
+	}
+
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+	}
+
 	// support
 
-	public DebugEventListener getDebugEventListener() {
+	public DebugEventListener getDebugListener() {
 		return dbg;
 	}
 
-	public void setDebugEventListener(DebugEventListener dbg) {
+	public void setDebugListener(DebugEventListener dbg) {
 		this.dbg = dbg;
 	}
 
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
similarity index 91%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugTreeNodeStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
index aec3c99..b20b860 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeNodeStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -122,10 +122,16 @@ public class DebugTreeNodeStream implements TreeNodeStream {
 		return input.size();
 	}
 
-	public Object getTreeSource() {
+    public void reset() { ; }
+
+    public Object getTreeSource() {
 		return input;
 	}
 
+	public String getSourceName() {
+		return getTokenStream().getSourceName();
+	}
+
 	public TokenStream getTokenStream() {
 		return input.getTokenStream();
 	}
@@ -139,6 +145,10 @@ public class DebugTreeNodeStream implements TreeNodeStream {
 		input.setUniqueNavigationNodes(uniqueNavigationNodes);
 	}
 
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		input.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+	}
+
 	public String toString(Object start, Object stop) {
 		return input.toString(start,stop);
 	}
diff --git a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeParser.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
similarity index 77%
rename from runtime/Java/src/org/antlr/runtime/debug/DebugTreeParser.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
index 7444e16..4091909 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/DebugTreeParser.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -28,9 +28,8 @@
 package org.antlr.runtime.debug;
 
 import org.antlr.runtime.*;
-import org.antlr.runtime.tree.TreeParser;
 import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeParser;
 
 import java.io.IOException;
 
@@ -46,17 +45,17 @@ public class DebugTreeParser extends TreeParser {
 	/** Create a normal parser except wrap the token stream in a debug
 	 *  proxy that fires consume events.
 	 */
-	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg) {
-		super(new DebugTreeNodeStream(input,dbg));
+	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg, RecognizerSharedState state) {
+		super(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,dbg), state);
 		setDebugListener(dbg);
 	}
 
-	public DebugTreeParser(TreeNodeStream input) {
-		this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT);
+	public DebugTreeParser(TreeNodeStream input, RecognizerSharedState state) {
+		super(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,null), state);
 	}
 
-	public DebugTreeParser(TreeNodeStream input, int port) {
-		super(new DebugTreeNodeStream(input,null));
+	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg) {
+		this(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,dbg), dbg, null);
 	}
 
 	/** Provide a new debug event listener for this parser.  Notify the
@@ -78,6 +77,20 @@ public class DebugTreeParser extends TreeParser {
 		e.printStackTrace(System.err);
 	}
 
+	public void reportError(RecognitionException e) {
+		dbg.recognitionException(e);
+	}
+
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		Object o = super.getMissingSymbol(input, e, expectedTokenType, follow);
+		dbg.consumeNode(o);
+		return o;
+	}
+
 	public void beginResync() {
 		dbg.beginResync();
 	}
@@ -93,23 +106,4 @@ public class DebugTreeParser extends TreeParser {
 	public void endBacktrack(int level, boolean successful) {
 		dbg.endBacktrack(level,successful);		
 	}
-
-	public void recoverFromMismatchedToken(IntStream input,
-										   RecognitionException mte,
-										   int ttype,
-										   BitSet follow)
-		throws RecognitionException
-	{
-		dbg.recognitionException(mte);
-		super.recoverFromMismatchedToken(input,mte,ttype,follow);
-	}
-
-	public void recoverFromMismatchedSet(IntStream input,
-										 RecognitionException mte,
-										 org.antlr.runtime.BitSet follow)
-		throws RecognitionException
-	{
-		dbg.recognitionException(mte);
-		super.recoverFromMismatchedSet(input,mte,follow);
-	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/debug/ParseTreeBuilder.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
similarity index 72%
rename from runtime/Java/src/org/antlr/runtime/debug/ParseTreeBuilder.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
index ff915ea..6306ed4 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/ParseTreeBuilder.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -32,12 +32,18 @@ import org.antlr.runtime.Token;
 import org.antlr.runtime.tree.ParseTree;
 
 import java.util.Stack;
+import java.util.ArrayList;
+import java.util.List;
 
 /** This parser listener tracks rule entry/exit and token matches
  *  to build a simple parse tree using ParseTree nodes.
  */
 public class ParseTreeBuilder extends BlankDebugEventListener {
+	public static final String EPSILON_PAYLOAD = "<epsilon>";
+	
 	Stack callStack = new Stack();
+	List hiddenTokens = new ArrayList();
+	int backtracking = 0;
 
 	public ParseTreeBuilder(String grammarName) {
 		ParseTree root = create("<grammar "+grammarName+">");
@@ -55,24 +61,47 @@ public class ParseTreeBuilder extends BlankDebugEventListener {
 		return new ParseTree(payload);
 	}
 
-	public void enterRule(String ruleName) {
+	public ParseTree epsilonNode() {
+		return create(EPSILON_PAYLOAD);
+	}
+
+	/** Backtracking or cyclic DFA, don't want to add nodes to tree */
+	public void enterDecision(int d) { backtracking++; }
+	public void exitDecision(int i) { backtracking--; }
+
+	public void enterRule(String filename, String ruleName) {
+		if ( backtracking>0 ) return;
 		ParseTree parentRuleNode = (ParseTree)callStack.peek();
 		ParseTree ruleNode = create(ruleName);
 		parentRuleNode.addChild(ruleNode);
 		callStack.push(ruleNode);
 	}
 
-	public void exitRule(String ruleName) {
-		callStack.pop();
+	public void exitRule(String filename, String ruleName) {
+		if ( backtracking>0 ) return;
+		ParseTree ruleNode = (ParseTree)callStack.peek();
+		if ( ruleNode.getChildCount()==0 ) {
+			ruleNode.addChild(epsilonNode());
+		}
+		callStack.pop();		
 	}
 
 	public void consumeToken(Token token) {
+		if ( backtracking>0 ) return;
 		ParseTree ruleNode = (ParseTree)callStack.peek();
 		ParseTree elementNode = create(token);
+		elementNode.hiddenTokens = this.hiddenTokens;
+		this.hiddenTokens = new ArrayList();
 		ruleNode.addChild(elementNode);
 	}
 
+	public void consumeHiddenToken(Token token) {
+		if ( backtracking>0 ) return;
+		hiddenTokens.add(token);
+	}
+
 	public void recognitionException(RecognitionException e) {
+		if ( backtracking>0 ) return;
 		ParseTree ruleNode = (ParseTree)callStack.peek();
 		ParseTree errorNode = create(e);
 		ruleNode.addChild(errorNode);
diff --git a/runtime/Java/src/org/antlr/runtime/debug/Profiler.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/debug/Profiler.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
index 80c30e7..12b878b 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/Profiler.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -83,7 +83,7 @@ public class Profiler extends BlankDebugEventListener {
 		this.parser = parser;
 	}
 
-	public void enterRule(String ruleName) {
+	public void enterRule(String grammarFileName, String ruleName) {
 		//System.out.println("enterRule "+ruleName);
 		ruleLevel++;
 		numRuleInvocations++;
@@ -125,7 +125,7 @@ public class Profiler extends BlankDebugEventListener {
 		numMemoizationCacheEntries++;
 	}
 
-	public void exitRule(String ruleName) {
+	public void exitRule(String grammarFileName, String ruleName) {
 		ruleLevel--;
 	}
 
diff --git a/runtime/Java/src/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
similarity index 90%
rename from runtime/Java/src/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
index 0c0fbb5..86f3276 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,6 +29,7 @@ package org.antlr.runtime.debug;
 
 import org.antlr.runtime.RecognitionException;
 import org.antlr.runtime.Token;
+import org.antlr.runtime.CharStream;
 import org.antlr.runtime.tree.BaseTree;
 import org.antlr.runtime.tree.Tree;
 
@@ -109,6 +110,11 @@ public class RemoteDebugEventSocketListener implements Runnable {
 		public void setTokenIndex(int index) {
 			this.index = index;
 		}
+		public CharStream getInputStream() {
+			return null;
+		}
+		public void setInputStream(CharStream input) {
+		}
 		public String toString() {
 			String channelStr = "";
 			if ( channel!=Token.DEFAULT_CHANNEL ) {
@@ -244,16 +250,17 @@ public class RemoteDebugEventSocketListener implements Runnable {
 	}
 
 	protected void dispatch(String line) {
-		String[] elements = getEventElements(line);
+        //System.out.println("event: "+line);
+        String[] elements = getEventElements(line);
 		if ( elements==null || elements[0]==null ) {
 			System.err.println("unknown debug event: "+line);
 			return;
 		}
 		if ( elements[0].equals("enterRule") ) {
-			listener.enterRule(elements[1]);
+			listener.enterRule(elements[1], elements[2]);
 		}
 		else if ( elements[0].equals("exitRule") ) {
-			listener.exitRule(elements[1]);
+			listener.exitRule(elements[1], elements[2]);
 		}
 		else if ( elements[0].equals("enterAlt") ) {
 			listener.enterAlt(Integer.parseInt(elements[1]));
@@ -362,47 +369,56 @@ public class RemoteDebugEventSocketListener implements Runnable {
 			listener.consumeNode(node);
 		}
 		else if ( elements[0].equals("LN") ) {
-			int i = Integer.valueOf(elements[1]);
+			int i = Integer.parseInt(elements[1]);
 			ProxyTree node = deserializeNode(elements, 2);
 			listener.LT(i, node);
 		}
 		else if ( elements[0].equals("createNodeFromTokenElements") ) {
-			int ID = Integer.valueOf(elements[1]);
-			int type = Integer.valueOf(elements[2]);
+			int ID = Integer.parseInt(elements[1]);
+			int type = Integer.parseInt(elements[2]);
 			String text = elements[3];
 			text = unEscapeNewlines(text);
 			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
 			listener.createNode(node);
 		}
 		else if ( elements[0].equals("createNode") ) {
-			int ID = Integer.valueOf(elements[1]);
-			int tokenIndex = Integer.valueOf(elements[2]);
+			int ID = Integer.parseInt(elements[1]);
+			int tokenIndex = Integer.parseInt(elements[2]);
 			// create dummy node/token filled with ID, tokenIndex
 			ProxyTree node = new ProxyTree(ID);
 			ProxyToken token = new ProxyToken(tokenIndex);
 			listener.createNode(node, token);
 		}
 		else if ( elements[0].equals("nilNode") ) {
-			int ID = Integer.valueOf(elements[1]);
+			int ID = Integer.parseInt(elements[1]);
 			ProxyTree node = new ProxyTree(ID);
 			listener.nilNode(node);
 		}
+		else if ( elements[0].equals("errorNode") ) {
+			// TODO: do we need a special tree here?
+			int ID = Integer.parseInt(elements[1]);
+			int type = Integer.parseInt(elements[2]);
+			String text = elements[3];
+			text = unEscapeNewlines(text);
+			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
+			listener.errorNode(node);
+		}
 		else if ( elements[0].equals("becomeRoot") ) {
-			int newRootID = Integer.valueOf(elements[1]);
-			int oldRootID = Integer.valueOf(elements[2]);
+			int newRootID = Integer.parseInt(elements[1]);
+			int oldRootID = Integer.parseInt(elements[2]);
 			ProxyTree newRoot = new ProxyTree(newRootID);
 			ProxyTree oldRoot = new ProxyTree(oldRootID);
 			listener.becomeRoot(newRoot, oldRoot);
 		}
 		else if ( elements[0].equals("addChild") ) {
-			int rootID = Integer.valueOf(elements[1]);
-			int childID = Integer.valueOf(elements[2]);
+			int rootID = Integer.parseInt(elements[1]);
+			int childID = Integer.parseInt(elements[2]);
 			ProxyTree root = new ProxyTree(rootID);
 			ProxyTree child = new ProxyTree(childID);
 			listener.addChild(root, child);
 		}
 		else if ( elements[0].equals("setTokenBoundaries") ) {
-			int ID = Integer.valueOf(elements[1]);
+			int ID = Integer.parseInt(elements[1]);
 			ProxyTree node = new ProxyTree(ID);
 			listener.setTokenBoundaries(
 				node,
@@ -415,11 +431,11 @@ public class RemoteDebugEventSocketListener implements Runnable {
 	}
 
 	protected ProxyTree deserializeNode(String[] elements, int offset) {
-		int ID = Integer.valueOf(elements[offset+0]);
-		int type = Integer.valueOf(elements[offset+1]);
-		int tokenLine = Integer.valueOf(elements[offset+2]);
-		int charPositionInLine = Integer.valueOf(elements[offset+3]);
-		int tokenIndex = Integer.valueOf(elements[offset+4]);
+		int ID = Integer.parseInt(elements[offset+0]);
+		int type = Integer.parseInt(elements[offset+1]);
+		int tokenLine = Integer.parseInt(elements[offset+2]);
+		int charPositionInLine = Integer.parseInt(elements[offset+3]);
+		int tokenIndex = Integer.parseInt(elements[offset+4]);
 		String text = elements[offset+5];
 		text = unEscapeNewlines(text);
 		return new ProxyTree(ID, type, tokenLine, charPositionInLine, tokenIndex, text);
@@ -474,7 +490,7 @@ public class RemoteDebugEventSocketListener implements Runnable {
 				str = event.substring(firstQuoteIndex+1,event.length());
 				event = eventWithoutString;
 			}
-			StringTokenizer st = new StringTokenizer(event, " \t", false);
+			StringTokenizer st = new StringTokenizer(event, "\t", false);
 			int i = 0;
 			while ( st.hasMoreTokens() ) {
 				if ( i>=MAX_EVENT_ELEMENTS ) {
diff --git a/runtime/Java/src/org/antlr/runtime/debug/TraceDebugEventListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
similarity index 61%
rename from runtime/Java/src/org/antlr/runtime/debug/TraceDebugEventListener.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
index 99e17c7..0c8d7d2 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/TraceDebugEventListener.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.debug;
 
 import org.antlr.runtime.Token;
diff --git a/runtime/Java/src/org/antlr/runtime/debug/Tracer.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/debug/Tracer.java
rename to runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
index dbc663e..2630cb5 100644
--- a/runtime/Java/src/org/antlr/runtime/debug/Tracer.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java
new file mode 100644
index 0000000..25f34d6
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java
@@ -0,0 +1,93 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.misc;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.NoSuchElementException;
+
+/** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
+ *  A linked list is fast at dequeue but slow at get(i).  An array is
+ *  the reverse.  This is O(1) for both operations.
+ *
+ *  List grows until you dequeue last element at end of buffer. Then
+ *  it resets to start filling at 0 again.  If adds/removes are balanced, the
+ *  buffer will not grow too large.
+ *
+ *  No iterator stuff as that's not how we'll use it.
+ */
+public class FastQueue<T> {
+    /** dynamically-sized buffer of elements */
+    protected List<T> data = new ArrayList<T>();
+    /** index of next element to fill */
+    protected int p = 0;
+
+    public void reset() { p = 0; data.clear(); }
+
+    /** Get and remove first element in queue */
+    public T remove() {
+        T o = get(0);
+        p++;
+        // have we hit end of buffer?
+        if ( p == data.size() ) {
+            // if so, it's an opportunity to start filling at index 0 again
+            clear(); // size goes to 0, but retains memory
+        }
+        return o;
+    }
+
+    public void add(T o) { data.add(o); }
+
+    public int size() { return data.size() - p; }
+
+    public T head() { return get(0); }
+
+    /** Return element i elements ahead of current element.  i==0 gets
+     *  current element.  This is not an absolute index into the data list
+     *  since p defines the start of the real list.
+     */
+    public T get(int i) {
+        if ( p+i >= data.size() ) {
+            throw new NoSuchElementException("queue index "+(p+i)+" > size "+data.size());
+        }
+        return data.get(p+i);
+    }
+
+    public void clear() { p = 0; data.clear(); }
+
+    /** Return string of current buffer contents; non-destructive */
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        int n = size();
+        for (int i=0; i<n; i++) {
+            buf.append(get(i));
+            if ( (i+1)<n ) buf.append(" ");
+        }
+        return buf.toString();
+    }
+}
\ No newline at end of file
diff --git a/src/org/antlr/analysis/LookaheadSet.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java
similarity index 50%
copy from src/org/antlr/analysis/LookaheadSet.java
copy to runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java
index 8239e06..a075770 100644
--- a/src/org/antlr/analysis/LookaheadSet.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,68 +25,63 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.analysis;
+package org.antlr.runtime.misc;
 
-import org.antlr.misc.IntervalSet;
-import org.antlr.misc.IntSet;
-import org.antlr.tool.Grammar;
-
-/** An LL(1) lookahead set; contains a set of token types and a "hasEOF"
- *  condition when the set contains EOF.  Since EOF is -1 everywhere and -1
- *  cannot be stored in my BitSet, I set a condition here.  There may be other
- *  reasons in the future to abstract a LookaheadSet over a raw BitSet.
+/** A dynamic array that uses int not Integer objects. In principle this
+ *  is more efficient in time, but certainly in space.
+ *
+ *  This is simple enough that you can access the data array directly,
+ *  but make sure that you append elements only with add() so that you
+ *  get dynamic sizing.  Make sure to call ensureCapacity() when you are
+ *  manually adding new elements.
+ *
+ *  Doesn't impl List because it doesn't return objects and I mean this
+ *  really as just an array not a List per se.  Manipulate the elements
+ *  at will.  This has stack methods too.
+ *
+ *  When runtime can be 1.5, I'll make this generic.
  */
-public class LookaheadSet {
-	public IntSet tokenTypeSet;
-	public boolean hasEOF;
+public class IntArray {
+	public static final int INITIAL_SIZE = 10;
+	public int[] data;
+	protected int p = -1;
 
-	public LookaheadSet() {
-		tokenTypeSet = new IntervalSet();
+	public void add(int v) {
+		ensureCapacity(p+1);
+		data[++p] = v;
 	}
 
-	public LookaheadSet(IntSet s) {
-		this();
-		tokenTypeSet.addAll(s);
+	public void push(int v) {
+		add(v);
 	}
 
-	public LookaheadSet(int atom) {
-		tokenTypeSet = IntervalSet.of(atom);
+	public int pop() {
+		int v = data[p];
+		p--;
+		return v;
 	}
 
-	public void orInPlace(LookaheadSet other) {
-		this.tokenTypeSet.addAll(other.tokenTypeSet);
-		this.hasEOF = this.hasEOF || other.hasEOF;
+	/** This only tracks elements added via push/add. */
+	public int size() {
+		return p;
 	}
 
-	public boolean member(int a) {
-		return tokenTypeSet.member(a);
-	}
+    public void clear() {
+        p = -1;
+    }
 
-	public void remove(int a) {
-		tokenTypeSet = tokenTypeSet.subtract(IntervalSet.of(a));
-	}
-
-	public String toString(Grammar g) {
-		if ( tokenTypeSet==null ) {
-			if ( hasEOF ) {
-				return "EOF";
-			}
-			return "";
+    public void ensureCapacity(int index) {
+		if ( data==null ) {
+			data = new int[INITIAL_SIZE];
 		}
-		String r = tokenTypeSet.toString(g);
-		if ( hasEOF ) {
-			return r+"+EOF";
+		else if ( (index+1)>=data.length ) {
+			int newSize = data.length*2;
+			if ( index>newSize ) {
+				newSize = index+1;
+			}
+			int[] newData = new int[newSize];
+			System.arraycopy(data, 0, newData, 0, data.length);
+			data = newData;
 		}
-		return r;
-	}
-
-	public static LookaheadSet EOF() {
-		LookaheadSet eof = new LookaheadSet();
-		eof.hasEOF = true;
-		return eof;
-	}
-
-	public String toString() {
-		return toString(null);
 	}
 }
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java
new file mode 100644
index 0000000..097d7a9
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java
@@ -0,0 +1,163 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.misc;
+
+import java.util.List;
+import java.util.ArrayList;
+
+/** A lookahead queue that knows how to mark/release locations
+ *  in the buffer for backtracking purposes. Any markers force the FastQueue
+ *  superclass to keep all tokens until no more markers; then can reset
+ *  to avoid growing a huge buffer.
+ */
+public abstract class LookaheadStream<T> extends FastQueue<T> {
+    public static final int UNINITIALIZED_EOF_ELEMENT_INDEX = Integer.MAX_VALUE;
+
+    /** Set to buffer index of eof when nextElement returns eof */
+    protected int eofElementIndex = UNINITIALIZED_EOF_ELEMENT_INDEX;
+
+    /** Returned by nextElement upon end of stream; we add to buffer also */
+    public T eof = null;
+
+    /** Track the last mark() call result value for use in rewind(). */
+    protected int lastMarker;
+
+    /** tracks how deep mark() calls are nested */
+    protected int markDepth = 0;    
+
+    public LookaheadStream(T eof) {
+        this.eof = eof;
+    }
+
+    public void reset() {
+        eofElementIndex = UNINITIALIZED_EOF_ELEMENT_INDEX;
+        super.reset();
+    }
+    
+    /** Implement nextElement to supply a stream of elements to this
+     *  lookahead buffer.  Return eof upon end of the stream we're pulling from.
+     */
+    public abstract T nextElement();
+
+    /** Get and remove first element in queue; override FastQueue.remove() */
+    public T remove() {
+        T o = get(0);
+        p++;
+        // have we hit end of buffer and not backtracking?
+        if ( p == data.size() && markDepth==0 ) {
+            // if so, it's an opportunity to start filling at index 0 again
+            clear(); // size goes to 0, but retains memory
+        }
+        return o;
+    }
+
+    /** Make sure we have at least one element to remove, even if EOF */
+    public void consume() { sync(1); remove(); }
+
+    /** Make sure we have 'need' elements from current position p. Last valid
+     *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
+     *  ahead.  If we need 1 element, (p+1-1)==p must be < data.size().
+     */
+    public void sync(int need) {
+        int n = (p+need-1) - data.size() + 1; // how many more elements we need?
+        if ( n > 0 ) fill(n);                 // out of elements?
+    }
+
+    /** add n elements to buffer */
+    public void fill(int n) {
+        for (int i=1; i<=n; i++) {
+            T o = nextElement();
+            if ( o==eof ) {
+                data.add(eof);
+                eofElementIndex = data.size()-1;
+            }
+            else data.add(o);
+        }
+    }
+
+    //public boolean hasNext() { return eofElementIndex!=UNINITIALIZED_EOF_ELEMENT_INDEX; }
+    
+    /** Size of entire stream is unknown; we only know buffer size from FastQueue */
+    public int size() { throw new UnsupportedOperationException("streams are of unknown size"); }
+
+    public Object LT(int k) {
+		if ( k==0 ) {
+			return null;
+		}
+		if ( k<0 ) {
+			return LB(-k);
+		}
+		//System.out.print("LT(p="+p+","+k+")=");
+		if ( (p+k-1) >= eofElementIndex ) { // move to super.LT
+			return eof;
+		}
+        sync(k);
+        return get(k-1);
+	}
+
+	/** Look backwards k nodes */
+	protected Object LB(int k) {
+		if ( k==0 ) {
+			return null;
+		}
+		if ( (p-k)<0 ) {
+			return null;
+		}
+		return get(-k);
+	}
+
+    public Object getCurrentSymbol() { return LT(1); }
+
+    public int index() { return p; }
+
+	public int mark() {
+        markDepth++;
+        lastMarker = index();
+        return lastMarker;
+	}
+
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	public void rewind(int marker) {
+        markDepth--;
+        seek(marker); // assume marker is top
+        // release(marker); // waste of call; it does nothing in this class
+    }
+
+	public void rewind() {
+        seek(lastMarker); // rewind but do not release marker
+    }
+
+    /** Seek to a 0-indexed position within data buffer.  Can't handle
+     *  case where you seek beyond end of existing buffer.  Normally used
+     *  to seek backwards in the buffer. Does not force loading of nodes.
+     */
+    public void seek(int index) { p = index; }
+}
\ No newline at end of file
diff --git a/runtime/Java/src/org/antlr/runtime/misc/Stats.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java
similarity index 64%
rename from runtime/Java/src/org/antlr/runtime/misc/Stats.java
rename to runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java
index 9d52bc6..1d73772 100644
--- a/runtime/Java/src/org/antlr/runtime/misc/Stats.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.misc;
 
 import java.io.*;
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java
new file mode 100644
index 0000000..991768d
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java
@@ -0,0 +1,349 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A generic tree implementation with no payload.  You must subclass to
+ *  actually have any user data.  ANTLR v3 uses a list of children approach
+ *  instead of the child-sibling approach in v2.  A flat tree (a list) is
+ *  an empty node whose children represent the list.  An empty, but
+ *  non-null node is called "nil".
+ */
+public abstract class BaseTree implements Tree {
+	protected List children;
+
+	public BaseTree() {
+	}
+
+	/** Create a new node from an existing node does nothing for BaseTree
+	 *  as there are no fields other than the children list, which cannot
+	 *  be copied as the children are not considered part of this node. 
+	 */
+	public BaseTree(Tree node) {
+	}
+
+	public Tree getChild(int i) {
+		if ( children==null || i>=children.size() ) {
+			return null;
+		}
+		return (Tree)children.get(i);
+	}
+
+	/** Get the children internal List; note that if you directly mess with
+	 *  the list, do so at your own risk.
+	 */
+	public List getChildren() {
+		return children;
+	}
+
+	public Tree getFirstChildWithType(int type) {
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			Tree t = (Tree) children.get(i);
+			if ( t.getType()==type ) {
+				return t;
+			}
+		}	
+		return null;
+	}
+
+	public int getChildCount() {
+		if ( children==null ) {
+			return 0;
+		}
+		return children.size();
+	}
+
+	/** Add t as child of this node.
+	 *
+	 *  Warning: if t has no children, but child does
+	 *  and child isNil then this routine moves children to t via
+	 *  t.children = child.children; i.e., without copying the array.
+	 */
+	public void addChild(Tree t) {
+		//System.out.println("add child "+t.toStringTree()+" "+this.toStringTree());
+		//System.out.println("existing children: "+children);
+		if ( t==null ) {
+			return; // do nothing upon addChild(null)
+		}
+		BaseTree childTree = (BaseTree)t;
+		if ( childTree.isNil() ) { // t is an empty node possibly with children
+			if ( this.children!=null && this.children == childTree.children ) {
+				throw new RuntimeException("attempt to add child list to itself");
+			}
+			// just add all of childTree's children to this
+			if ( childTree.children!=null ) {
+				if ( this.children!=null ) { // must copy, this has children already
+					int n = childTree.children.size();
+					for (int i = 0; i < n; i++) {
+						Tree c = (Tree)childTree.children.get(i);
+						this.children.add(c);
+						// handle double-link stuff for each child of nil root
+						c.setParent(this);
+						c.setChildIndex(children.size()-1);
+					}
+				}
+				else {
+					// no children for this but t has children; just set pointer
+					// call general freshener routine
+					this.children = childTree.children;
+					this.freshenParentAndChildIndexes();
+				}
+			}
+		}
+		else { // child is not nil (don't care about children)
+			if ( children==null ) {
+				children = createChildrenList(); // create children list on demand
+			}
+			children.add(t);
+			childTree.setParent(this);
+			childTree.setChildIndex(children.size()-1);
+		}
+		// System.out.println("now children are: "+children);
+	}
+
+	/** Add all elements of kids list as children of this node */
+	public void addChildren(List kids) {
+		for (int i = 0; i < kids.size(); i++) {
+			Tree t = (Tree) kids.get(i);
+			addChild(t);
+		}
+	}
+
+	public void setChild(int i, Tree t) {
+		if ( t==null ) {
+			return;
+		}
+		if ( t.isNil() ) {
+			throw new IllegalArgumentException("Can't set single child to a list");
+		}
+		if ( children==null ) {
+			children = createChildrenList();
+		}
+		children.set(i, t);
+		t.setParent(this);
+		t.setChildIndex(i);
+	}
+	
+	public Object deleteChild(int i) {
+		if ( children==null ) {
+			return null;
+		}
+		Tree killed = (Tree)children.remove(i);
+		// walk rest and decrement their child indexes
+		this.freshenParentAndChildIndexes(i);
+		return killed;
+	}
+
+	/** Delete children from start to stop and replace with t even if t is
+	 *  a list (nil-root tree).  num of children can increase or decrease.
+	 *  For huge child lists, inserting children can force walking rest of
+	 *  children to set their childindex; could be slow.
+	 */
+	public void replaceChildren(int startChildIndex, int stopChildIndex, Object t) {
+		/*
+		System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
+						   " with "+((BaseTree)t).toStringTree());
+		System.out.println("in="+toStringTree());
+		*/
+		if ( children==null ) {
+			throw new IllegalArgumentException("indexes invalid; no children in list");
+		}
+		int replacingHowMany = stopChildIndex - startChildIndex + 1;
+		int replacingWithHowMany;
+		BaseTree newTree = (BaseTree)t;
+		List newChildren = null;
+		// normalize to a list of children to add: newChildren
+		if ( newTree.isNil() ) {
+			newChildren = newTree.children;
+		}
+		else {
+			newChildren = new ArrayList(1);
+			newChildren.add(newTree);
+		}
+		replacingWithHowMany = newChildren.size();
+		int numNewChildren = newChildren.size();
+		int delta = replacingHowMany - replacingWithHowMany;
+		// if same number of nodes, do direct replace
+		if ( delta == 0 ) {
+			int j = 0; // index into new children
+			for (int i=startChildIndex; i<=stopChildIndex; i++) {
+				BaseTree child = (BaseTree)newChildren.get(j);
+				children.set(i, child);
+				child.setParent(this);
+				child.setChildIndex(i);
+                j++;
+            }
+		}
+		else if ( delta > 0 ) { // fewer new nodes than there were
+			// set children and then delete extra
+			for (int j=0; j<numNewChildren; j++) {
+				children.set(startChildIndex+j, newChildren.get(j));
+			}
+			int indexToDelete = startChildIndex+numNewChildren;
+			for (int c=indexToDelete; c<=stopChildIndex; c++) {
+				// delete same index, shifting everybody down each time
+				children.remove(indexToDelete);
+			}
+			freshenParentAndChildIndexes(startChildIndex);
+		}
+		else { // more new nodes than were there before
+			// fill in as many children as we can (replacingHowMany) w/o moving data
+			for (int j=0; j<replacingHowMany; j++) {
+				children.set(startChildIndex+j, newChildren.get(j));
+			}
+			int numToInsert = replacingWithHowMany-replacingHowMany;
+			for (int j=replacingHowMany; j<replacingWithHowMany; j++) {
+				children.add(startChildIndex+j, newChildren.get(j));
+			}
+			freshenParentAndChildIndexes(startChildIndex);
+		}
+		//System.out.println("out="+toStringTree());
+	}
+
+	/** Override in a subclass to change the impl of children list */
+	protected List createChildrenList() {
+		return new ArrayList();
+	}
+
+	public boolean isNil() {
+		return false;
+	}
+
+	/** Set the parent and child index values for all child of t */
+	public void freshenParentAndChildIndexes() {
+		freshenParentAndChildIndexes(0);
+	}
+
+	public void freshenParentAndChildIndexes(int offset) {
+		int n = getChildCount();
+		for (int c = offset; c < n; c++) {
+			Tree child = (Tree)getChild(c);
+			child.setChildIndex(c);
+			child.setParent(this);
+		}
+	}
+
+	public void sanityCheckParentAndChildIndexes() {
+		sanityCheckParentAndChildIndexes(null, -1);
+	}
+
+	public void sanityCheckParentAndChildIndexes(Tree parent, int i) {
+		if ( parent!=this.getParent() ) {
+			throw new IllegalStateException("parents don't match; expected "+parent+" found "+this.getParent());
+		}
+		if ( i!=this.getChildIndex() ) {
+			throw new IllegalStateException("child indexes don't match; expected "+i+" found "+this.getChildIndex());
+		}
+		int n = this.getChildCount();
+		for (int c = 0; c < n; c++) {
+			CommonTree child = (CommonTree)this.getChild(c);
+			child.sanityCheckParentAndChildIndexes(this, c);
+		}
+	}
+
+	/** BaseTree doesn't track child indexes. */
+	public int getChildIndex() {
+		return 0;
+	}
+	public void setChildIndex(int index) {
+	}
+
+	/** BaseTree doesn't track parent pointers. */
+	public Tree getParent() {
+		return null;
+	}
+
+    public void setParent(Tree t) {
+	}
+
+    /** Walk upwards looking for ancestor with this token type. */
+    public boolean hasAncestor(int ttype) { return getAncestor(ttype)!=null; }
+
+    /** Walk upwards and get first ancestor with this token type. */
+    public Tree getAncestor(int ttype) {
+        Tree t = this;
+        t = t.getParent();
+        while ( t!=null ) {
+            if ( t.getType()==ttype ) return t;
+            t = t.getParent();
+        }
+        return null;
+    }
+
+    /** Return a list of all ancestors of this node.  The first node of
+     *  list is the root and the last is the parent of this node.
+     */
+    public List getAncestors() {
+        if ( getParent()==null ) return null;
+        List ancestors = new ArrayList();
+        Tree t = this;
+        t = t.getParent();
+        while ( t!=null ) {
+            ancestors.add(0, t); // insert at start
+            t = t.getParent();
+        }
+        return ancestors;
+    }
+
+    /** Print out a whole tree not just a node */
+    public String toStringTree() {
+		if ( children==null || children.size()==0 ) {
+			return this.toString();
+		}
+		StringBuffer buf = new StringBuffer();
+		if ( !isNil() ) {
+			buf.append("(");
+			buf.append(this.toString());
+			buf.append(' ');
+		}
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			Tree t = (Tree)children.get(i);
+			if ( i>0 ) {
+				buf.append(' ');
+			}
+			buf.append(t.toStringTree());
+		}
+		if ( !isNil() ) {
+			buf.append(")");
+		}
+		return buf.toString();
+	}
+
+    public int getLine() {
+		return 0;
+	}
+
+	public int getCharPositionInLine() {
+		return 0;
+	}
+
+	/** Override to say how a node (not a tree) should look as text */
+	public abstract String toString();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/BaseTreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
similarity index 58%
rename from runtime/Java/src/org/antlr/runtime/tree/BaseTreeAdaptor.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
index 4432ef4..6b1a853 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/BaseTreeAdaptor.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
@@ -1,12 +1,42 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.tree;
 
 import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.RecognitionException;
 
-import java.util.Map;
 import java.util.HashMap;
+import java.util.Map;
 
+/** A TreeAdaptor that works with any Tree implementation. */
 public abstract class BaseTreeAdaptor implements TreeAdaptor {
-	/** System.identityHashCode() is not always unique due to GC; we have to
+	/** System.identityHashCode() is not always unique; we have to
 	 *  track ourselves.  That's ok, it's only for debugging, though it's
 	 *  expensive: we have to create a hashtable with all tree nodes in it.
 	 */
@@ -17,12 +47,52 @@ public abstract class BaseTreeAdaptor implements TreeAdaptor {
 		return create(null);
 	}
 
+	/** create tree node that holds the start and stop tokens associated
+	 *  with an error.
+	 *
+	 *  If you specify your own kind of tree nodes, you will likely have to
+	 *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
+	 *  if no token payload but you might have to set token type for diff
+	 *  node type.
+     *
+     *  You don't have to subclass CommonErrorNode; you will likely need to
+     *  subclass your own tree node class to avoid class cast exception.
+	 */
+	public Object errorNode(TokenStream input, Token start, Token stop,
+							RecognitionException e)
+	{
+		CommonErrorNode t = new CommonErrorNode(input, start, stop, e);
+		//System.out.println("returning error node '"+t+"' @index="+input.index());
+		return t;
+	}
+
 	public boolean isNil(Object tree) {
 		return ((Tree)tree).isNil();
 	}
 
 	public Object dupTree(Object tree) {
-		return ((Tree)tree).dupTree();
+		return dupTree(tree, null);
+	}
+
+	/** This is generic in the sense that it will work with any kind of
+	 *  tree (not just Tree interface).  It invokes the adaptor routines
+	 *  not the tree node routines to do the construction.  
+	 */
+	public Object dupTree(Object t, Object parent) {
+		if ( t==null ) {
+			return null;
+		}
+		Object newTree = dupNode(t);
+		// ensure new subtree root has parent/child index set
+		setChildIndex(newTree, getChildIndex(t)); // same index in new tree
+		setParent(newTree, parent);
+		int n = getChildCount(t);
+		for (int i = 0; i < n; i++) {
+			Object child = getChild(t, i);
+			Object newSubTree = dupTree(child, t);
+			addChild(newTree, newSubTree);
+		}
+		return newTree;
 	}
 
 	/** Add a child to the tree t.  If child is a flat tree (a list), make all
@@ -65,19 +135,21 @@ public abstract class BaseTreeAdaptor implements TreeAdaptor {
 	 *  efficiency.
 	 */
 	public Object becomeRoot(Object newRoot, Object oldRoot) {
-		Tree newRootTree = (Tree)newRoot;
+        //System.out.println("becomeroot new "+newRoot.toString()+" old "+oldRoot);
+        Tree newRootTree = (Tree)newRoot;
 		Tree oldRootTree = (Tree)oldRoot;
 		if ( oldRoot==null ) {
 			return newRoot;
 		}
 		// handle ^(nil real-node)
 		if ( newRootTree.isNil() ) {
-			if ( newRootTree.getChildCount()>1 ) {
+            int nc = newRootTree.getChildCount();
+            if ( nc==1 ) newRootTree = (Tree)newRootTree.getChild(0);
+            else if ( nc >1 ) {
 				// TODO: make tree run time exceptions hierarchy
 				throw new RuntimeException("more than one node as root (TODO: make exception hierarchy)");
 			}
-			newRootTree = (Tree)newRootTree.getChild(0);
-		}
+        }
 		// add oldRoot to newRoot; addChild takes care of case where oldRoot
 		// is a flat list (i.e., nil-rooted tree).  All children of oldRoot
 		// are added to newRoot.
@@ -85,11 +157,20 @@ public abstract class BaseTreeAdaptor implements TreeAdaptor {
 		return newRootTree;
 	}
 
-	/** Transform ^(nil x) to x */
+	/** Transform ^(nil x) to x and nil to null */
 	public Object rulePostProcessing(Object root) {
+		//System.out.println("rulePostProcessing: "+((Tree)root).toStringTree());
 		Tree r = (Tree)root;
-		if ( r!=null && r.isNil() && r.getChildCount()==1 ) {
-			r = (Tree)r.getChild(0);
+		if ( r!=null && r.isNil() ) {
+			if ( r.getChildCount()==0 ) {
+				r = null;
+			}
+			else if ( r.getChildCount()==1 ) {
+				r = (Tree)r.getChild(0);
+				// whoever invokes rule will set parent and child index
+				r.setParent(null);
+				r.setChildIndex(-1);
+			}
 		}
 		return r;
 	}
@@ -121,8 +202,7 @@ public abstract class BaseTreeAdaptor implements TreeAdaptor {
 	}
 
 	public int getType(Object t) {
-		((Tree)t).getType();
-		return 0;
+		return ((Tree)t).getType();
 	}
 
 	public void setType(Object t, int type) {
@@ -141,6 +221,14 @@ public abstract class BaseTreeAdaptor implements TreeAdaptor {
 		return ((Tree)t).getChild(i);
 	}
 
+	public void setChild(Object t, int i, Object child) {
+		((Tree)t).setChild(i, (Tree)child);
+	}
+
+	public Object deleteChild(Object t, int i) {
+		return ((Tree)t).deleteChild(i);
+	}
+
 	public int getChildCount(Object t) {
 		return ((Tree)t).getChildCount();
 	}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/CommonTreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
similarity index 64%
rename from runtime/Java/src/org/antlr/runtime/tree/CommonTreeNodeStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
index a645b16..8a1bacb 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/CommonTreeNodeStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
@@ -1,6 +1,6 @@
 /*
 [The "BSD licence"]
-Copyright (c) 2005-2006 Terence Parr
+Copyright (c) 2005-2008 Terence Parr
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -29,7 +29,7 @@ package org.antlr.runtime.tree;
 
 import org.antlr.runtime.Token;
 import org.antlr.runtime.TokenStream;
-
+import org.antlr.runtime.misc.IntArray;
 import java.util.*;
 
 /** A buffered stream of tree nodes.  Nodes can be from a tree of ANY kind.
@@ -46,13 +46,19 @@ import java.util.*;
  *  There is some duplicated functionality here with UnBufferedTreeNodeStream
  *  but just in bookkeeping, not tree walking etc...
  *
- *  @see UnBufferedTreeNodeStream
+ *  TARGET DEVELOPERS:
+ *
+ *  This is the old CommonTreeNodeStream that buffered up entire node stream.
+ *  No need to implement really as new CommonTreeNodeStream is much better
+ *  and covers what we need.
+ *
+ *  @see CommonTreeNodeStream
  */
-public class CommonTreeNodeStream implements TreeNodeStream {
+public class BufferedTreeNodeStream implements TreeNodeStream {
 	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
 	public static final int INITIAL_CALL_STACK_SIZE = 10;
 
-	protected class StreamIterator implements Iterator {
+    protected class StreamIterator implements Iterator {
 		int i = 0;
 		public boolean hasNext() {
 			return i<nodes.size();
@@ -111,43 +117,17 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 	protected int lastMarker;
 
 	/** Stack of indexes used for push/pop calls */
-	protected int[] calls;
-
-	/** Stack pointer for stack of indexes; -1 indicates empty.  Points
-	 *  at next location to push a value.
-	 */
-	protected int _sp = -1;
-
-	/** During fillBuffer(), we can make a reverse index from a set
-	 *  of token types of interest to the list of indexes into the
-	 *  node stream.  This lets us convert a node pointer to a
-	 *  stream index semi-efficiently for a list of interesting
-	 *  nodes such as function definition nodes (you'll want to seek
-	 *  to their bodies for an interpreter).  Also useful for doing
-	 *  dynamic searches; i.e., go find me all PLUS nodes.
-	 */
-	protected Map tokenTypeToStreamIndexesMap;
-
-	/** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
-	 *  occurs for all token types.
-	 */
-	public static final Set INDEX_ALL = new HashSet();
+	protected IntArray calls;
 
-	/** A set of token types user would like to index for faster lookup.
-	 *  If this is INDEX_ALL, then all token types are tracked.  If null,
-	 *  then none are indexed.
-	 */
-	protected Set tokenTypesToReverseIndex = null;
-
-	public CommonTreeNodeStream(Object tree) {
+	public BufferedTreeNodeStream(Object tree) {
 		this(new CommonTreeAdaptor(), tree);
 	}
 
-	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree) {
+	public BufferedTreeNodeStream(TreeAdaptor adaptor, Object tree) {
 		this(adaptor, tree, DEFAULT_INITIAL_BUFFER_SIZE);
 	}
 
-	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree, int initialBufferSize) {
+	public BufferedTreeNodeStream(TreeAdaptor adaptor, Object tree, int initialBufferSize) {
 		this.root = tree;
 		this.adaptor = adaptor;
 		nodes = new ArrayList(initialBufferSize);
@@ -165,11 +145,10 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		p = 0; // buffer of nodes intialized now
 	}
 
-	protected void fillBuffer(Object t) {
+	public void fillBuffer(Object t) {
 		boolean nil = adaptor.isNil(t);
 		if ( !nil ) {
 			nodes.add(t); // add this node
-			fillReverseIndex(t, nodes.size()-1);
 		}
 		// add DOWN node if t has children
 		int n = adaptor.getChildCount(t);
@@ -187,106 +166,10 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		}
 	}
 
-	/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
-	 *  You can override this method to alter how indexing occurs.  The
-	 *  default is to create a
-	 *
-	 *    Map<Integer token type,ArrayList<Integer stream index>>
-	 *
-	 *  This data structure allows you to find all nodes with type INT in order.
-	 *
-	 *  If you really need to find a node of type, say, FUNC quickly then perhaps
-	 *
-	 *    Map<Integertoken type,Map<Object tree node,Integer stream index>>
-	 *
-	 *  would be better for you.  The interior maps map a tree node to
-	 *  the index so you don't have to search linearly for a specific node.
-	 *
-	 *  If you change this method, you will likely need to change
-	 *  getNodeIndex(), which extracts information.
+	/** What is the stream index for node? 0..n-1
+	 *  Return -1 if node not found.
 	 */
-	protected void fillReverseIndex(Object node, int streamIndex) {
-		//System.out.println("revIndex "+node+"@"+streamIndex);
-		if ( tokenTypesToReverseIndex==null ) {
-			return; // no indexing if this is empty (nothing of interest)
-		}
-		if ( tokenTypeToStreamIndexesMap==null ) {
-			tokenTypeToStreamIndexesMap = new HashMap(); // first indexing op
-		}
-		int tokenType = adaptor.getType(node);
-		Integer tokenTypeI = new Integer(tokenType);
-		if ( !(tokenTypesToReverseIndex==INDEX_ALL ||
-			   tokenTypesToReverseIndex.contains(tokenTypeI)) )
-		{
-			return; // tokenType not of interest
-		}
-		Integer streamIndexI = new Integer(streamIndex);
-		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
-		if ( indexes==null ) {
-			indexes = new ArrayList(); // no list yet for this token type
-			indexes.add(streamIndexI); // not there yet, add
-			tokenTypeToStreamIndexesMap.put(tokenTypeI, indexes);
-		}
-		else {
-			if ( !indexes.contains(streamIndexI) ) {
-				indexes.add(streamIndexI); // not there yet, add
-			}
-		}
-	}
-
-	/** Track the indicated token type in the reverse index.  Call this
-	 *  repeatedly for each type or use variant with Set argument to
-	 *  set all at once.
-	 * @param tokenType
-	 */
-	public void reverseIndex(int tokenType) {
-		if ( tokenTypesToReverseIndex==null ) {
-			tokenTypesToReverseIndex = new HashSet();
-		}
-		else if ( tokenTypesToReverseIndex==INDEX_ALL ) {
-			return;
-		}
-		tokenTypesToReverseIndex.add(new Integer(tokenType));
-	}
-
-	/** Track the indicated token types in the reverse index. Set
-	 *  to INDEX_ALL to track all token types.
-	 */
-	public void reverseIndex(Set tokenTypes) {
-		tokenTypesToReverseIndex = tokenTypes;
-	}
-
-	/** Given a node pointer, return its index into the node stream.
-	 *  This is not its Token stream index.  If there is no reverse map
-	 *  from node to stream index or the map does not contain entries
-	 *  for node's token type, a linear search of entire stream is used.
-	 *
-	 *  Return -1 if exact node pointer not in stream.
-	 */
-	public int getNodeIndex(Object node) {
-		//System.out.println("get "+node);
-		if ( tokenTypeToStreamIndexesMap==null ) {
-			return getNodeIndexLinearly(node);
-		}
-		int tokenType = adaptor.getType(node);
-		Integer tokenTypeI = new Integer(tokenType);
-		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
-		if ( indexes==null ) {
-			//System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
-			return getNodeIndexLinearly(node);
-		}
-		for (int i = 0; i < indexes.size(); i++) {
-			Integer streamIndexI = (Integer)indexes.get(i);
-			Object n = get(streamIndexI.intValue());
-			if ( n==node ) {
-				//System.out.println("found in index; stream index = "+streamIndexI);
-				return streamIndexI.intValue(); // found it!
-			}
-		}
-		return -1;
-	}
-
-	protected int getNodeIndexLinearly(Object node) {
+	protected int getNodeIndex(Object node) {
 		if ( p==-1 ) {
 			fillBuffer();
 		}
@@ -348,6 +231,8 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		return nodes.get(p+k-1);
 	}
 
+	public Object getCurrentSymbol() { return LT(1); }
+
 /*
 	public Object getLastTreeNode() {
 		int i = index();
@@ -382,6 +267,10 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		return root;
 	}
 
+	public String getSourceName() {
+		return getTokenStream().getSourceName();
+	}
+
 	public TokenStream getTokenStream() {
 		return tokens;
 	}
@@ -394,6 +283,10 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		return adaptor;
 	}
 
+	public void setTreeAdaptor(TreeAdaptor adaptor) {
+		this.adaptor = adaptor;
+	}
+
 	public boolean hasUniqueNavigationNodes() {
 		return uniqueNavigationNodes;
 	}
@@ -445,19 +338,13 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 	}
 
 	/** Make stream jump to a new location, saving old location.
-	 *  Switch back with pop().  I manage dyanmic array manually
-	 *  to avoid creating Integer objects all over the place.
+	 *  Switch back with pop().
 	 */
 	public void push(int index) {
 		if ( calls==null ) {
-			calls = new int[INITIAL_CALL_STACK_SIZE];
-		}
-		else if ( (_sp+1)>=calls.length ) {
-			int[] newStack = new int[calls.length*2];
-			System.arraycopy(calls, 0, newStack, 0, calls.length);
-			calls = newStack;
+			calls = new IntArray();
 		}
-		calls[++_sp] = p; // save current index
+		calls.push(p); // save current index
 		seek(index);
 	}
 
@@ -465,11 +352,19 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 	 *  Return top of stack (return index).
 	 */
 	public int pop() {
-		int ret = calls[_sp--];
+		int ret = calls.pop();
 		seek(ret);
 		return ret;
 	}
 
+	public void reset() {
+		p = 0;
+		lastMarker = 0;
+        if (calls != null) {
+            calls.clear();
+        }
+    }
+
 	public int size() {
 		if ( p==-1 ) {
 			fillBuffer();
@@ -484,8 +379,16 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		return new StreamIterator();
 	}
 
+	// TREE REWRITE INTERFACE
+
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		if ( parent!=null ) {
+			adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+		}
+	}
+
 	/** Used for testing, just return the token type stream */
-	public String toString() {
+	public String toTokenTypeString() {
 		if ( p==-1 ) {
 			fillBuffer();
 		}
@@ -498,14 +401,29 @@ public class CommonTreeNodeStream implements TreeNodeStream {
 		return buf.toString();
 	}
 
+	/** Debugging */
+	public String toTokenString(int start, int stop) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		StringBuffer buf = new StringBuffer();
+		for (int i = start; i < nodes.size() && i <= stop; i++) {
+			Object t = (Object) nodes.get(i);
+			buf.append(" ");
+			buf.append(adaptor.getToken(t));
+		}
+		return buf.toString();
+	}
+
 	public String toString(Object start, Object stop) {
+		System.out.println("toString");
 		if ( start==null || stop==null ) {
 			return null;
 		}
 		if ( p==-1 ) {
 			fillBuffer();
 		}
-		System.out.println("stop: "+stop);
+		//System.out.println("stop: "+stop);
 		if ( start instanceof CommonTree )
 			System.out.print("toString: "+((CommonTree)start).getToken()+", ");
 		else
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java
new file mode 100644
index 0000000..5884495
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java
@@ -0,0 +1,108 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.*;
+
+/** A node representing erroneous token range in token stream */
+public class CommonErrorNode extends CommonTree {
+	public IntStream input;
+	public Token start;
+	public Token stop;
+	public RecognitionException trappedException;
+
+	public CommonErrorNode(TokenStream input, Token start, Token stop,
+						   RecognitionException e)
+	{
+		//System.out.println("start: "+start+", stop: "+stop);
+		if ( stop==null ||
+			 (stop.getTokenIndex() < start.getTokenIndex() &&
+			  stop.getType()!=Token.EOF) )
+		{
+			// sometimes resync does not consume a token (when LT(1) is
+			// in follow set.  So, stop will be 1 to left to start. adjust.
+			// Also handle case where start is the first token and no token
+			// is consumed during recovery; LT(-1) will return null.
+			stop = start;
+		}
+		this.input = input;
+		this.start = start;
+		this.stop = stop;
+		this.trappedException = e;
+	}
+
+	public boolean isNil() {
+		return false;
+	}
+
+	public int getType() {
+		return Token.INVALID_TOKEN_TYPE;
+	}
+
+	public String getText() {
+		String badText = null;
+		if ( start instanceof Token ) {
+			int i = ((Token)start).getTokenIndex();
+			int j = ((Token)stop).getTokenIndex();
+			if ( ((Token)stop).getType() == Token.EOF ) {
+				j = ((TokenStream)input).size();
+			}
+			badText = ((TokenStream)input).toString(i, j);
+		}
+		else if ( start instanceof Tree ) {
+			badText = ((TreeNodeStream)input).toString(start, stop);
+		}
+		else {
+			// people should subclass if they alter the tree type so this
+			// next one is for sure correct.
+			badText = "<unknown>";
+		}
+		return badText;
+	}
+
+	public String toString() {
+		if ( trappedException instanceof MissingTokenException ) {
+			return "<missing type: "+
+				   ((MissingTokenException)trappedException).getMissingType()+
+				   ">";
+		}
+		else if ( trappedException instanceof UnwantedTokenException ) {
+			return "<extraneous: "+
+				   ((UnwantedTokenException)trappedException).getUnexpectedToken()+
+				   ", resync="+getText()+">";
+		}
+		else if ( trappedException instanceof MismatchedTokenException ) {
+			return "<mismatched token: "+trappedException.token+", resync="+getText()+">";
+		}
+		else if ( trappedException instanceof NoViableAltException ) {
+			return "<unexpected: "+trappedException.token+
+				   ", resync="+getText()+">";
+		}
+		return "<error: "+getText()+">";
+	}
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/CommonTree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
similarity index 60%
rename from runtime/Java/src/org/antlr/runtime/tree/CommonTree.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
index 1998d6e..b86246f 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/CommonTree.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,21 +29,34 @@ package org.antlr.runtime.tree;
 
 import org.antlr.runtime.Token;
 
-/** A tree node that is wrapper for a Token object. */
+/** A tree node that is wrapper for a Token object.  After 3.0 release
+ *  while building tree rewrite stuff, it became clear that computing
+ *  parent and child index is very difficult and cumbersome.  Better to
+ *  spend the space in every tree node.  If you don't want these extra
+ *  fields, it's easy to cut them out in your own BaseTree subclass.
+ */
 public class CommonTree extends BaseTree {
+	/** A single token is the payload */
+	public Token token;
+
 	/** What token indexes bracket all tokens associated with this node
 	 *  and below?
 	 */
-	public int startIndex=-1, stopIndex=-1;
+	protected int startIndex=-1, stopIndex=-1;
 
-	/** A single token is the payload */
-	public Token token;
+	/** Who is the parent node of this node; if null, implies node is root */
+	public CommonTree parent;
+
+	/** What index is this node in the child list? Range: 0..n-1 */
+	public int childIndex = -1;
 
 	public CommonTree() { }
 	
 	public CommonTree(CommonTree node) {
 		super(node);
 		this.token = node.token;
+		this.startIndex = node.startIndex;
+		this.stopIndex = node.stopIndex;
 	}
 
 	public CommonTree(Token t) {
@@ -64,7 +77,7 @@ public class CommonTree extends BaseTree {
 
 	public int getType() {
 		if ( token==null ) {
-			return 0;
+			return Token.INVALID_TOKEN_TYPE;
 		}
 		return token.getType();
 	}
@@ -118,10 +131,55 @@ public class CommonTree extends BaseTree {
 		stopIndex = index;
 	}
 
+    /** For every node in this subtree, make sure it's start/stop token's
+     *  are set.  Walk depth first, visit bottom up.  Only updates nodes
+     *  with at least one token index < 0.
+     */
+    public void setUnknownTokenBoundaries() {
+        if ( children==null ) {
+            if ( startIndex<0 || stopIndex<0 ) {
+                startIndex = stopIndex = token.getTokenIndex();
+            }
+            return;
+        }
+        for (int i=0; i<children.size(); i++) {
+            ((CommonTree)children.get(i)).setUnknownTokenBoundaries();
+        }
+        if ( startIndex>=0 && stopIndex>=0 ) return; // already set
+        if ( children.size() > 0 ) {
+            CommonTree firstChild = (CommonTree)children.get(0);
+            CommonTree lastChild = (CommonTree)children.get(children.size()-1);
+            startIndex = firstChild.getTokenStartIndex();
+            stopIndex = lastChild.getTokenStopIndex();
+        }
+    }
+
+	public int getChildIndex() {
+		return childIndex;
+	}
+
+	public Tree getParent() {
+		return parent;
+	}
+
+	public void setParent(Tree t) {
+		this.parent = (CommonTree)t;
+	}
+
+	public void setChildIndex(int index) {
+		this.childIndex = index;
+	}
+
 	public String toString() {
 		if ( isNil() ) {
 			return "nil";
 		}
+		if ( getType()==Token.INVALID_TOKEN_TYPE ) {
+			return "<errornode>";
+		}
+		if ( token==null ) {
+			return null;
+		}
 		return token.getText();
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/CommonTreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
similarity index 56%
rename from runtime/Java/src/org/antlr/runtime/tree/CommonTreeAdaptor.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
index 547d005..1377ee8 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/CommonTreeAdaptor.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.runtime.tree;
 
 import org.antlr.runtime.CommonToken;
@@ -10,7 +37,8 @@ import org.antlr.runtime.Token;
  *  use your subclass.
  *
  *  To get your parser to build nodes of a different type, override
- *  create(Token).
+ *  create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
+ *  dupNode is called to duplicate nodes during rewrite operations.
  */
 public class CommonTreeAdaptor extends BaseTreeAdaptor {
 	/** Duplicate a node.  This is part of the factory;
@@ -20,9 +48,7 @@ public class CommonTreeAdaptor extends BaseTreeAdaptor {
 	 *  but reflection is slow.
 	 */
 	public Object dupNode(Object t) {
-		if ( t==null ) {
-			return null;
-		}
+		if ( t==null ) return null;
 		return ((Tree)t).dupNode();
 	}
 
@@ -66,46 +92,32 @@ public class CommonTreeAdaptor extends BaseTreeAdaptor {
 	 *  Might be useful info so I'll not force to be i..i.
 	 */
 	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
-		if ( t==null ) {
-			return;
-		}
+		if ( t==null ) return;
 		int start = 0;
 		int stop = 0;
-		if ( startToken!=null ) {
-			start = startToken.getTokenIndex();
-		}
-		if ( stopToken!=null ) {
-			stop = stopToken.getTokenIndex();
-		}
+		if ( startToken!=null ) start = startToken.getTokenIndex();
+		if ( stopToken!=null ) stop = stopToken.getTokenIndex();
 		((Tree)t).setTokenStartIndex(start);
 		((Tree)t).setTokenStopIndex(stop);
 	}
 
 	public int getTokenStartIndex(Object t) {
-		if ( t==null ) {
-			return -1;
-		}
+		if ( t==null ) return -1;
 		return ((Tree)t).getTokenStartIndex();
 	}
 
 	public int getTokenStopIndex(Object t) {
-		if ( t==null ) {
-			return -1;
-		}
+		if ( t==null ) return -1;
 		return ((Tree)t).getTokenStopIndex();
 	}
 
 	public String getText(Object t) {
-		if ( t==null ) {
-			return null;
-		}
+		if ( t==null ) return null;
 		return ((Tree)t).getText();
 	}
 
     public int getType(Object t) {
-		if ( t==null ) {
-			return Token.INVALID_TOKEN_TYPE;
-		}
+		if ( t==null ) return Token.INVALID_TOKEN_TYPE;
 		return ((Tree)t).getType();
 	}
 
@@ -121,17 +133,36 @@ public class CommonTreeAdaptor extends BaseTreeAdaptor {
 	}
 
 	public Object getChild(Object t, int i) {
-		if ( t==null ) {
-			return null;
-		}
+		if ( t==null ) return null;
         return ((Tree)t).getChild(i);
     }
 
     public int getChildCount(Object t) {
-		if ( t==null ) {
-			return 0;
-		}
+		if ( t==null ) return 0;
         return ((Tree)t).getChildCount();
     }
 
+	public Object getParent(Object t) {
+		if ( t==null ) return null;
+        return ((Tree)t).getParent();
+	}
+
+	public void setParent(Object t, Object parent) {
+        if ( t!=null ) ((Tree)t).setParent((Tree)parent);
+	}
+
+	public int getChildIndex(Object t) {
+        if ( t==null ) return 0;
+		return ((Tree)t).getChildIndex();
+	}
+
+	public void setChildIndex(Object t, int index) {
+        if ( t!=null ) ((Tree)t).setChildIndex(index);
+	}
+
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		if ( parent!=null ) {
+			((Tree)parent).replaceChildren(startChildIndex, stopChildIndex, t);
+		}
+	}
 }
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java
new file mode 100644
index 0000000..d1e2594
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java
@@ -0,0 +1,167 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.misc.LookaheadStream;
+import org.antlr.runtime.misc.IntArray;
+
+import java.util.*;
+
+public class CommonTreeNodeStream extends LookaheadStream<Object> implements TreeNodeStream {
+	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
+	public static final int INITIAL_CALL_STACK_SIZE = 10;
+
+	/** Pull nodes from which tree? */
+	protected Object root;
+
+	/** If this tree (root) was created from a token stream, track it. */
+	protected TokenStream tokens;
+
+	/** What tree adaptor was used to build these trees */
+	TreeAdaptor adaptor;
+
+    /** The tree iterator we using */
+    protected TreeIterator it;
+
+    /** Stack of indexes used for push/pop calls */
+    protected IntArray calls;    
+
+    /** Tree (nil A B C) trees like flat A B C streams */
+    protected boolean hasNilRoot = false;
+
+    /** Tracks tree depth.  Level=0 means we're at root node level. */
+    protected int level = 0;
+
+	public CommonTreeNodeStream(Object tree) {
+		this(new CommonTreeAdaptor(), tree);
+	}
+
+	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree) {
+        super(adaptor.create(Token.EOF, "EOF")); // set EOF
+		this.root = tree;
+		this.adaptor = adaptor;
+        it = new TreeIterator(root);
+        it.eof = this.eof; // make sure tree iterator returns the EOF we want
+	}
+
+    public void reset() {
+        super.reset();
+        it.reset();
+        hasNilRoot = false;
+        level = 0;
+        if ( calls != null ) calls.clear();
+    }
+    
+    /** Pull elements from tree iterator.  Track tree level 0..max_level.
+     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
+     */
+    public Object nextElement() {
+        Object t = it.next();
+        //System.out.println("pulled "+adaptor.getType(t));
+        if ( t == it.up ) {
+            level--;
+            if ( level==0 && hasNilRoot ) return it.next(); // don't give last UP; get EOF
+        }
+        else if ( t == it.down ) level++;
+        if ( level==0 && adaptor.isNil(t) ) { // if nil root, scarf nil, DOWN
+            hasNilRoot = true;
+            t = it.next(); // t is now DOWN, so get first real node next
+            level++;
+            t = it.next();
+        }
+        return t;
+    }
+
+    public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) { }
+
+	public Object getTreeSource() {	return root; }
+
+	public String getSourceName() { return getTokenStream().getSourceName(); }
+
+	public TokenStream getTokenStream() { return tokens; }
+
+	public void setTokenStream(TokenStream tokens) { this.tokens = tokens; }
+
+	public TreeAdaptor getTreeAdaptor() { return adaptor; }
+
+	public void setTreeAdaptor(TreeAdaptor adaptor) { this.adaptor = adaptor; }
+
+	public int LA(int i) { return adaptor.getType(LT(i)); }
+
+    /** Make stream jump to a new location, saving old location.
+     *  Switch back with pop().
+     */
+    public void push(int index) {
+        if ( calls==null ) {
+            calls = new IntArray();
+        }
+        calls.push(p); // save current index
+        seek(index);
+    }
+
+    /** Seek back to previous index saved during last push() call.
+     *  Return top of stack (return index).
+     */
+    public int pop() {
+        int ret = calls.pop();
+        seek(ret);
+        return ret;
+    }    
+
+	// TREE REWRITE INTERFACE
+
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		if ( parent!=null ) {
+			adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+		}
+	}
+
+	public String toString(Object start, Object stop) {
+        // we'll have to walk from start to stop in tree; we're not keeping
+        // a complete node stream buffer
+        return "n/a";
+	}
+
+    /** For debugging; destructive: moves tree iterator to end. */
+    public String toTokenTypeString() {
+        reset();
+		StringBuffer buf = new StringBuffer();
+        Object o = LT(1);
+        int type = adaptor.getType(o);
+        while ( type!=Token.EOF ) {
+            buf.append(" ");
+            buf.append(type);
+            consume();
+            o = LT(1);
+            type = adaptor.getType(o);
+		}
+		return buf.toString();
+    }
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/DOTTreeGenerator.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
similarity index 85%
rename from runtime/Java/src/org/antlr/runtime/tree/DOTTreeGenerator.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
index f6f18d1..78ff661 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/DOTTreeGenerator.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -54,12 +54,12 @@ public class DOTTreeGenerator {
 
 	public static StringTemplate _treeST =
 		new StringTemplate(
-			"digraph {\n" +
-			"  ordering=out;\n" +
-			"  ranksep=.4;\n" +
-			"  node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
-			"        width=.25, height=.25];\n" +
-			"  edge [arrowsize=.5]\n" +
+			"digraph {\n\n" +
+			"\tordering=out;\n" +
+			"\tranksep=.4;\n" +
+			"\tbgcolor=\"lightgrey\"; node [shape=box, fixedsize=false, fontsize=12, fontname=\"Helvetica-bold\", fontcolor=\"blue\"\n" +
+			"\t\twidth=.25, height=.25, color=\"black\", fillcolor=\"white\", style=\"filled, solid, bold\"];\n" +
+			"\tedge [arrowsize=.5, color=\"black\", style=\"bold\"]\n\n" +
 			"  $nodes$\n" +
 			"  $edges$\n" +
 			"}\n");
@@ -174,8 +174,8 @@ public class DOTTreeGenerator {
 			StringTemplate edgeST = _edgeST.getInstanceOf();
 			edgeST.setAttribute("parent", parentName);
 			edgeST.setAttribute("child", childName);
-			edgeST.setAttribute("parentText", parentText);
-			edgeST.setAttribute("childText", childText);
+			edgeST.setAttribute("parentText", fixString(parentText));
+			edgeST.setAttribute("childText", fixString(childText));
 			treeST.setAttribute("edges", edgeST);
 			toDOTDefineEdges(child, adaptor, treeST);
 		}
@@ -186,8 +186,8 @@ public class DOTTreeGenerator {
 		StringTemplate nodeST = _nodeST.getInstanceOf();
 		String uniqueName = "n"+getNodeNumber(t);
 		nodeST.setAttribute("name", uniqueName);
-		if (text!=null) text = text.replaceAll("\"", "\\\\\"");
-		nodeST.setAttribute("text", text);
+
+		nodeST.setAttribute("text", fixString(text));
 		return nodeST;
 	}
 
@@ -202,4 +202,23 @@ public class DOTTreeGenerator {
 			return nodeNumber-1;
 		}
 	}
+
+    protected String fixString(String in)
+    {
+        String text = in;
+
+        if (text!=null) {
+
+            text = text.replaceAll("\"", "\\\\\"");
+            text = text.replaceAll("\\t", "    ");
+            text = text.replaceAll("\\n", "\\\\n");
+            text = text.replaceAll("\\r", "\\\\r");
+            if  (text.length() > 20)    {
+                text = text.substring(0, 8) + "..." + text.substring(text.length()-8);
+            }
+
+        }
+
+        return text;
+    }
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/ParseTree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
similarity index 68%
rename from runtime/Java/src/org/antlr/runtime/tree/ParseTree.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
index e5baedb..6fb2b7f 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/ParseTree.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,6 +29,8 @@ package org.antlr.runtime.tree;
 
 import org.antlr.runtime.Token;
 
+import java.util.List;
+
 /** A record of the rules used to match a token sequence.  The tokens
  *  end up as the leaves of this tree and rule nodes are the interior nodes.
  *  This really adds no functionality, it is just an alias for CommonTree
@@ -36,6 +38,8 @@ import org.antlr.runtime.Token;
  */
 public class ParseTree extends BaseTree {
 	public Object payload;
+	public List hiddenTokens;
+
 	public ParseTree(Object label) {
 		this.payload = label;
 	}
@@ -76,4 +80,40 @@ public class ParseTree extends BaseTree {
 		}
 		return payload.toString();
 	}
+
+	/** Emit a token and all hidden nodes before.  EOF node holds all
+	 *  hidden tokens after last real token.
+	 */
+	public String toStringWithHiddenTokens() {
+		StringBuffer buf = new StringBuffer();
+		if ( hiddenTokens!=null ) {
+			for (int i = 0; i < hiddenTokens.size(); i++) {
+				Token hidden = (Token) hiddenTokens.get(i);
+				buf.append(hidden.getText());
+			}
+		}
+		String nodeText = this.toString();
+		if ( !nodeText.equals("<EOF>") ) buf.append(nodeText);
+		return buf.toString();
+	}
+
+	/** Print out the leaves of this tree, which means printing original
+	 *  input back out.
+	 */
+	public String toInputString() {
+		StringBuffer buf = new StringBuffer();
+		_toStringLeaves(buf);
+		return buf.toString();
+	}
+
+	public void _toStringLeaves(StringBuffer buf) {
+		if ( payload instanceof Token ) { // leaf node token?
+			buf.append(this.toStringWithHiddenTokens());
+			return;
+		}
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			ParseTree t = (ParseTree)children.get(i);
+			t._toStringLeaves(buf);
+		}
+	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteCardinalityException.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/tree/RewriteCardinalityException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
index 2abe04f..c46c26c 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteCardinalityException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEarlyExitException.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java
similarity index 97%
rename from runtime/Java/src/org/antlr/runtime/tree/RewriteEarlyExitException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java
index c2bc29b..07b268e 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEarlyExitException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java
similarity index 97%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java
index 815b4e6..865288c 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleElementStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
similarity index 99%
rename from runtime/Java/src/org/antlr/runtime/tree/RewriteRuleElementStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
index b8799c7..23ed373 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleElementStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -134,7 +134,7 @@ public abstract class RewriteRuleElementStream {
 	 *  Return a duplicate node/subtree if stream is out of elements and
 	 *  size==1.  If we've already used the element, dup (dirty bit set).
 	 */
-	public Object next() {
+	public Object nextTree() {
 		int n = size();
 		if ( dirty || (cursor>=n && n==1) ) {
 			// if out of elements and size is 1, dup
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
similarity index 71%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
copy to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
index 4e6e843..7900b09 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,41 +27,44 @@
 */
 package org.antlr.runtime.tree;
 
-import org.antlr.runtime.Token;
-
 import java.util.List;
 
-public class RewriteRuleTokenStream extends RewriteRuleElementStream {
+/** Queues up nodes matched on left side of -> in a tree parser. This is
+ *  the analog of RewriteRuleTokenStream for normal parsers. 
+ */
+public class RewriteRuleNodeStream extends RewriteRuleElementStream {
 
-	public RewriteRuleTokenStream(TreeAdaptor adaptor, String elementDescription) {
+	public RewriteRuleNodeStream(TreeAdaptor adaptor, String elementDescription) {
 		super(adaptor, elementDescription);
 	}
 
 	/** Create a stream with one element */
-	public RewriteRuleTokenStream(TreeAdaptor adaptor,
-								  String elementDescription,
-								  Object oneElement)
+	public RewriteRuleNodeStream(TreeAdaptor adaptor,
+								 String elementDescription,
+								 Object oneElement)
 	{
 		super(adaptor, elementDescription, oneElement);
 	}
 
 	/** Create a stream, but feed off an existing list */
-	public RewriteRuleTokenStream(TreeAdaptor adaptor,
-								  String elementDescription,
-								  List elements)
+	public RewriteRuleNodeStream(TreeAdaptor adaptor,
+								 String elementDescription,
+								 List elements)
 	{
 		super(adaptor, elementDescription, elements);
 	}
 
-	public Object next() {
+	public Object nextNode() {
 		return _next();
 	}
 
 	protected Object toTree(Object el) {
-		return adaptor.create((Token)el);
+		return adaptor.dupNode(el);
 	}
 
 	protected Object dup(Object el) {
-		throw new UnsupportedOperationException("dup can't be called for a token stream.");
+		// we dup every node, so don't have to worry about calling dup; short-
+		// circuited next() so it doesn't call.
+		throw new UnsupportedOperationException("dup can't be called for a node stream.");
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
index 793f925..08aa72f 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
similarity index 85%
rename from runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
index 4e6e843..f25bf31 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteRuleTokenStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -53,12 +53,21 @@ public class RewriteRuleTokenStream extends RewriteRuleElementStream {
 		super(adaptor, elementDescription, elements);
 	}
 
-	public Object next() {
-		return _next();
+	/** Get next token from stream and make a node for it */
+	public Object nextNode() {
+		Token t = (Token)_next();
+		return adaptor.create(t);
 	}
 
+	public Token nextToken() {
+		return (Token)_next();
+	}
+
+	/** Don't convert to a tree unless they explicitly call nextTree.
+	 *  This way we can do hetero tree nodes in rewrite.
+	 */
 	protected Object toTree(Object el) {
-		return adaptor.create((Token)el);
+		return el;
 	}
 
 	protected Object dup(Object el) {
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java
new file mode 100644
index 0000000..79f5523
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java
@@ -0,0 +1,127 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.List;
+
+/** What does a tree look like?  ANTLR has a number of support classes
+ *  such as CommonTreeNodeStream that work on these kinds of trees.  You
+ *  don't have to make your trees implement this interface, but if you do,
+ *  you'll be able to use more support code.
+ *
+ *  NOTE: When constructing trees, ANTLR can build any kind of tree; it can
+ *  even use Token objects as trees if you add a child list to your tokens.
+ *
+ *  This is a tree node without any payload; just navigation and factory stuff.
+ */
+public interface Tree {
+	public static final Tree INVALID_NODE = new CommonTree(Token.INVALID_TOKEN);
+
+	Tree getChild(int i);
+
+	int getChildCount();
+
+	// Tree tracks parent and child index now > 3.0
+
+	public Tree getParent();
+
+	public void setParent(Tree t);
+
+    /** Is there is a node above with token type ttype? */
+    public boolean hasAncestor(int ttype);
+
+    /** Walk upwards and get first ancestor with this token type. */
+    public Tree getAncestor(int ttype);
+
+    /** Return a list of all ancestors of this node.  The first node of
+     *  list is the root and the last is the parent of this node.
+     */
+    public List getAncestors();
+
+    /** This node is what child index? 0..n-1 */
+	public int getChildIndex();
+
+	public void setChildIndex(int index);
+
+	/** Set the parent and child index values for all children */
+	public void freshenParentAndChildIndexes();
+
+	/** Add t as a child to this node.  If t is null, do nothing.  If t
+	 *  is nil, add all children of t to this' children.
+	 */
+	void addChild(Tree t);
+
+	/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
+	public void setChild(int i, Tree t);
+
+	public Object deleteChild(int i);
+
+	/** Delete children from start to stop and replace with t even if t is
+	 *  a list (nil-root tree).  num of children can increase or decrease.
+	 *  For huge child lists, inserting children can force walking rest of
+	 *  children to set their childindex; could be slow.
+	 */
+	public void replaceChildren(int startChildIndex, int stopChildIndex, Object t);	
+
+	/** Indicates the node is a nil node but may still have children, meaning
+	 *  the tree is a flat list.
+	 */
+	boolean isNil();
+
+	/**  What is the smallest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStartIndex();
+
+	void setTokenStartIndex(int index);
+
+	/**  What is the largest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStopIndex();
+
+	void setTokenStopIndex(int index);
+
+	Tree dupNode();
+
+	/** Return a token type; needed for tree parsing */
+	int getType();
+
+	String getText();
+
+	/** In case we don't have a token payload, what is the line for errors? */
+	int getLine();
+
+	int getCharPositionInLine();
+
+	String toStringTree();
+
+	String toString();
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java
similarity index 78%
rename from runtime/Java/src/org/antlr/runtime/tree/TreeAdaptor.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java
index ff847f0..d34d654 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreeAdaptor.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2007 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -28,6 +28,8 @@
 package org.antlr.runtime.tree;
 
 import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.RecognitionException;
 
 /** How to create and navigate trees.  Rather than have a separate factory
  *  and adaptor, I've merged them.  Makes sense to encapsulate.
@@ -44,22 +46,42 @@ public interface TreeAdaptor {
 	/** Create a tree node from Token object; for CommonTree type trees,
 	 *  then the token just becomes the payload.  This is the most
 	 *  common create call.
-     */
+	 *
+	 *  Override if you want another kind of node to be built.
+	 */
 	public Object create(Token payload);
 
+	/** Duplicate a single tree node.
+	 *  Override if you want another kind of node to be built.
+	 */
+	public Object dupNode(Object treeNode);
+
 	/** Duplicate tree recursively, using dupNode() for each node */
 	public Object dupTree(Object tree);
 
-	/** Duplicate a single tree node */
-	public Object dupNode(Object treeNode);
-
 	/** Return a nil node (an empty but non-null node) that can hold
 	 *  a list of element as the children.  If you want a flat tree (a list)
 	 *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
 	 */
 	public Object nil();
 
-	/** Is tree considered a nil node used to make lists of child nodes? */ 
+	/** Return a tree node representing an error.  This node records the
+	 *  tokens consumed during error recovery.  The start token indicates the
+	 *  input symbol at which the error was detected.  The stop token indicates
+	 *  the last symbol consumed during recovery.
+	 *
+	 *  You must specify the input stream so that the erroneous text can
+	 *  be packaged up in the error node.  The exception could be useful
+	 *  to some applications; default implementation stores ptr to it in
+	 *  the CommonErrorNode.
+	 *
+	 *  This only makes sense during token parsing, not tree parsing.
+	 *  Tree parsing should happen only when parsing and tree construction
+	 *  succeed.
+	 */
+	public Object errorNode(TokenStream input, Token start, Token stop, RecognitionException e);
+
+	/** Is tree considered a nil node used to make lists of child nodes? */
 	public boolean isNil(Object tree);
 
 	/** Add a child to the tree t.  If child is a flat tree (a list), make all
@@ -207,6 +229,35 @@ public interface TreeAdaptor {
 	/** Get a child 0..n-1 node */
 	public Object getChild(Object t, int i);
 
+	/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
+	public void setChild(Object t, int i, Object child);
+
+	/** Remove ith child and shift children down from right. */
+	public Object deleteChild(Object t, int i);
+
 	/** How many children?  If 0, then this is a leaf node */
 	public int getChildCount(Object t);
+
+	/** Who is the parent node of this node; if null, implies node is root.
+	 *  If your node type doesn't handle this, it's ok but the tree rewrites
+	 *  in tree parsers need this functionality.
+	 */
+	public Object getParent(Object t);
+	public void setParent(Object t, Object parent);
+
+	/** What index is this node in the child list? Range: 0..n-1
+	 *  If your node type doesn't handle this, it's ok but the tree rewrites
+	 *  in tree parsers need this functionality.
+	 */
+	public int getChildIndex(Object t);
+	public void setChildIndex(Object t, int index);
+
+	/** Replace from start to stop child index of parent with t, which might
+	 *  be a list.  Number of children may be different
+	 *  after this call.
+	 *
+	 *  If parent is null, don't do anything; must be at root of overall tree.
+	 *  Can't replace whatever points to the parent externally.  Do nothing.
+	 */
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t);
 }
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java
new file mode 100644
index 0000000..92a14bc
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java
@@ -0,0 +1,135 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognizerSharedState;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.TokenStream;
+
+/**
+ Cut-n-paste from material I'm not using in the book anymore (edit later
+ to make sense):
+
+ Now, how are we going to test these tree patterns against every
+subtree in our original tree?  In what order should we visit nodes?
+For this application, it turns out we need a simple ``apply once''
+rule application strategy and a ``down then up'' tree traversal
+strategy.  Let's look at rule application first.
+
+As we visit each node, we need to see if any of our patterns match. If
+a pattern matches, we execute the associated tree rewrite and move on
+to the next node. In other words, we only look for a single rule
+application opportunity (we'll see below that we sometimes need to
+repeatedly apply rules). The following method applies a rule in a @cl
+TreeParser (derived from a tree grammar) to a tree:
+
+here is where weReferenced code/walking/patterns/TreePatternMatcher.java
+
+It uses reflection to lookup the appropriate rule within the generated
+tree parser class (@cl Simplify in this case). Most of the time, the
+rule will not match the tree.  To avoid issuing syntax errors and
+attempting error recovery, it bumps up the backtracking level.  Upon
+failure, the invoked rule immediately returns. If you don't plan on
+using this technique in your own ANTLR-based application, don't sweat
+the details. This method boils down to ``call a rule to match a tree,
+executing any embedded actions and rewrite rules.''
+
+At this point, we know how to define tree grammar rules and how to
+apply them to a particular subtree. The final piece of the tree
+pattern matcher is the actual tree traversal. We have to get the
+correct node visitation order.  In particular, we need to perform the
+scalar-vector multiply transformation on the way down (preorder) and
+we need to reduce multiply-by-zero subtrees on the way up (postorder).
+
+To implement a top-down visitor, we do a depth first walk of the tree,
+executing an action in the preorder position. To get a bottom-up
+visitor, we execute an action in the postorder position.  ANTLR
+provides a standard @cl TreeVisitor class with a depth first search @v
+visit method. That method executes either a @m pre or @m post method
+or both. In our case, we need to call @m applyOnce in both. On the way
+down, we'll look for @r vmult patterns. On the way up,
+we'll look for @r mult0 patterns.
+ */
+public class TreeFilter extends TreeParser {
+    public interface fptr {
+        public void rule() throws RecognitionException;
+    }
+
+    protected TokenStream originalTokenStream;
+    protected TreeAdaptor originalAdaptor;
+
+    public TreeFilter(TreeNodeStream input) {
+        this(input, new RecognizerSharedState());
+    }
+    public TreeFilter(TreeNodeStream input, RecognizerSharedState state) {
+        super(input, state);
+        originalAdaptor = input.getTreeAdaptor();
+        originalTokenStream = input.getTokenStream();
+    }
+
+    public void applyOnce(Object t, fptr whichRule) {
+        if ( t==null ) return;
+        try {
+            // share TreeParser object but not parsing-related state
+            state = new RecognizerSharedState();
+            input = new CommonTreeNodeStream(originalAdaptor, t);
+            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
+            setBacktrackingLevel(1);
+            whichRule.rule();
+            setBacktrackingLevel(0);
+        }
+        catch (RecognitionException e) { ; }
+    }
+
+    public void downup(Object t) {
+        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+        TreeVisitorAction actions = new TreeVisitorAction() {
+            public Object pre(Object t)  { applyOnce(t, topdown_fptr); return t; }
+            public Object post(Object t) { applyOnce(t, bottomup_fptr); return t; }
+        };
+        v.visit(t, actions);
+    }
+        
+    fptr topdown_fptr = new fptr() {
+        public void rule() throws RecognitionException {
+            topdown();
+        }
+    };
+
+    fptr bottomup_fptr = new fptr() {
+        public void rule() throws RecognitionException {
+            bottomup();
+        }
+    };
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+    public void topdown() throws RecognitionException {;}
+    public void bottomup() throws RecognitionException {;}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java
new file mode 100644
index 0000000..b852ade
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java
@@ -0,0 +1,131 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.misc.FastQueue;
+
+import java.util.Iterator;
+
+/** Return a node stream from a doubly-linked tree whose nodes
+ *  know what child index they are.  No remove() is supported.
+ *
+ *  Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure.
+ */
+public class TreeIterator implements Iterator {
+    protected TreeAdaptor adaptor;
+    protected Object root;
+    protected Object tree;
+    protected boolean firstTime = true;
+
+    // navigation nodes to return during walk and at end
+    public Object up;
+    public Object down;
+    public Object eof;
+
+    /** If we emit UP/DOWN nodes, we need to spit out multiple nodes per
+     *  next() call.
+     */
+    protected FastQueue nodes;
+
+    public TreeIterator(Object tree) {
+        this(new CommonTreeAdaptor(),tree);
+    }
+
+    public TreeIterator(TreeAdaptor adaptor, Object tree) {
+        this.adaptor = adaptor;
+        this.tree = tree;
+        this.root = tree;
+        nodes = new FastQueue();
+        down = adaptor.create(Token.DOWN, "DOWN");
+        up = adaptor.create(Token.UP, "UP");
+        eof = adaptor.create(Token.EOF, "EOF");
+    }
+
+    public void reset() {
+        firstTime = true;
+        tree = root;
+        nodes.clear();
+    }
+
+    public boolean hasNext() {
+        if ( firstTime ) return root!=null;
+        if ( nodes!=null && nodes.size()>0 ) return true;
+        if ( tree==null ) return false;
+        if ( adaptor.getChildCount(tree)>0 ) return true;
+        return adaptor.getParent(tree)!=null; // back at root?
+    }
+
+    public Object next() {
+        if ( firstTime ) { // initial condition
+            firstTime = false;
+            if ( adaptor.getChildCount(tree)==0 ) { // single node tree (special)
+                nodes.add(eof);
+                return tree;
+            }
+            return tree;
+        }
+        // if any queued up, use those first
+        if ( nodes!=null && nodes.size()>0 ) return nodes.remove();
+
+        // no nodes left?
+        if ( tree==null ) return eof;
+
+        // next node will be child 0 if any children
+        if ( adaptor.getChildCount(tree)>0 ) {
+            tree = adaptor.getChild(tree, 0);
+            nodes.add(tree); // real node is next after DOWN
+            return down;
+        }
+        // if no children, look for next sibling of tree or ancestor
+        Object parent = adaptor.getParent(tree);
+        // while we're out of siblings, keep popping back up towards root
+        while ( parent!=null &&
+                adaptor.getChildIndex(tree)+1 >= adaptor.getChildCount(parent) )
+        {
+            nodes.add(up); // we're moving back up
+            tree = parent;
+            parent = adaptor.getParent(tree);
+        }
+        // no nodes left?
+        if ( parent==null ) {
+            tree = null; // back at root? nothing left then
+            nodes.add(eof); // add to queue, might have UP nodes in there
+            return nodes.remove();
+        }
+
+        // must have found a node with an unvisited sibling
+        // move to it and return it
+        int nextSiblingIndex = adaptor.getChildIndex(tree) + 1;
+        tree = adaptor.getChild(parent, nextSiblingIndex);
+        nodes.add(tree); // add to queue, might have UP nodes in there
+        return nodes.remove();
+    }
+
+    public void remove() { throw new UnsupportedOperationException(); }
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
similarity index 81%
rename from runtime/Java/src/org/antlr/runtime/tree/TreeNodeStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
index d945682..8b5eb0e 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreeNodeStream.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
@@ -1,6 +1,6 @@
 /*
 [The "BSD licence"]
-Copyright (c) 2005-2006 Terence Parr
+Copyright (c) 2005-2008 Terence Parr
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,11 @@ public interface TreeNodeStream extends IntStream {
 	 */
 	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes);
 
+    /** Reset the tree node stream in such a way that it acts like
+     *  a freshly constructed stream.
+     */
+    public void reset();
+
 	/** Return the text of all nodes from start to stop, inclusive.
 	 *  If the stream does not buffer all the nodes then it can still
 	 *  walk recursively from start until stop.  You can always return
@@ -83,5 +88,19 @@ public interface TreeNodeStream extends IntStream {
 	 *  an action of course in that case.
 	 */
 	public String toString(Object start, Object stop);
-}
 
+
+	// REWRITING TREES (used by tree parser)
+
+	/** Replace from start to stop child index of parent with t, which might
+	 *  be a list.  Number of children may be different
+	 *  after this call.  The stream is notified because it is walking the
+	 *  tree and might need to know you are monkeying with the underlying
+	 *  tree.  Also, it might be able to modify the node stream to avoid
+	 *  restreaming for future phases.
+	 *
+	 *  If parent is null, don't do anything; must be at root of overall tree.
+	 *  Can't replace whatever points to the parent externally.  Do nothing.
+	 */
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t);
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeParser.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
similarity index 69%
rename from runtime/Java/src/org/antlr/runtime/tree/TreeParser.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
index 0fad3c3..c02fd98 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreeParser.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2007 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,6 +29,9 @@ package org.antlr.runtime.tree;
 
 import org.antlr.runtime.*;
 
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+
 /** A parser for a stream of tree nodes.  "tree grammars" result in a subclass
  *  of this.  All the error reporting and recovery is shared with Parser via
  *  the BaseRecognizer superclass.
@@ -37,12 +40,24 @@ public class TreeParser extends BaseRecognizer {
 	public static final int DOWN = Token.DOWN;
 	public static final int UP = Token.UP;
 
+    // precompiled regex used by inContext
+    static String dotdot = ".*[^.]\\.\\.[^.].*";
+    static String doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*";
+    static Pattern dotdotPattern = Pattern.compile(dotdot);
+    static Pattern doubleEtcPattern = Pattern.compile(doubleEtc);
+
 	protected TreeNodeStream input;
 
 	public TreeParser(TreeNodeStream input) {
+		super(); // highlight that we go to super to set state object
 		setTreeNodeStream(input);
 	}
 
+	public TreeParser(TreeNodeStream input, RecognizerSharedState state) {
+		super(state); // share the state object with another parser
+		setTreeNodeStream(input);
+    }
+
 	public void reset() {
 		super.reset(); // reset all recognizer state variables
 		if ( input!=null ) {
@@ -59,13 +74,31 @@ public class TreeParser extends BaseRecognizer {
 		return input;
 	}
 
-	/** Match '.' in tree parser has special meaning.  Skip node or
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
+	protected Object getCurrentInputSymbol(IntStream input) {
+		return ((TreeNodeStream)input).LT(1);
+	}
+
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		String tokenText =
+			"<missing "+getTokenNames()[expectedTokenType]+">";
+		return new CommonTree(new CommonToken(expectedTokenType, tokenText));
+	}
+
+    /** Match '.' in tree parser has special meaning.  Skip node or
 	 *  entire tree if node has children.  If children, scan until
 	 *  corresponding UP node.
 	 */
-	public void matchAny(IntStream ignore) { // ignore stream, copy of this.input
-		errorRecovery = false;
-		failed = false;
+	public void matchAny(IntStream ignore) { // ignore stream, copy of input
+		state.errorRecovery = false;
+		state.failed = false;
 		Object look = input.LT(1);
 		if ( input.getTreeAdaptor().getChildCount(look)==0 ) {
 			input.consume(); // not subtree, consume 1 node and return
@@ -89,18 +122,19 @@ public class TreeParser extends BaseRecognizer {
 		input.consume(); // consume UP
 	}
 
-	/** We have DOWN/UP nodes in the stream that have no line info; override.
-	 *  plus we want to alter the exception type.
-	 */
-	protected void mismatch(IntStream input, int ttype, BitSet follow)
-		throws RecognitionException
-	{
-		MismatchedTreeNodeException mte =
-			new MismatchedTreeNodeException(ttype, (TreeNodeStream)input);
-		recoverFromMismatchedToken(input, mte, ttype, follow);
-	}
-
-	/** Prefix error message with the grammar name because message is
+    /** We have DOWN/UP nodes in the stream that have no line info; override.
+	 *  plus we want to alter the exception type.  Don't try to recover
+	 *  from tree parser errors inline...
+     */
+    protected Object recoverFromMismatchedToken(IntStream input,
+                                                int ttype,
+                                                BitSet follow)
+        throws RecognitionException
+    {
+        throw new MismatchedTreeNodeException(ttype, (TreeNodeStream)input);
+    }
+
+    /** Prefix error message with the grammar name because message is
 	 *  always intended for the programmer because the parser built
 	 *  the input tree not the user.
 	 */
@@ -131,5 +165,4 @@ public class TreeParser extends BaseRecognizer {
 	public void traceOut(String ruleName, int ruleIndex)  {
 		super.traceOut(ruleName, ruleIndex, input.LT(1));
 	}
-
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreePatternLexer.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java
similarity index 98%
rename from runtime/Java/src/org/antlr/runtime/tree/TreePatternLexer.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java
index a23149c..6f9cabf 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreePatternLexer.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2007 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreePatternParser.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java
similarity index 97%
rename from runtime/Java/src/org/antlr/runtime/tree/TreePatternParser.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java
index 9298f89..a0e8984 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreePatternParser.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2007 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -139,7 +139,7 @@ public class TreePatternParser {
 		}
 		
 		// create node
-		int treeNodeType = (Integer)wizard.getTokenType(tokenName);
+		int treeNodeType = wizard.getTokenType(tokenName);
 		if ( treeNodeType==Token.INVALID_TOKEN_TYPE ) {
 			return null;
 		}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java
new file mode 100644
index 0000000..6b6404d
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java
@@ -0,0 +1,120 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognizerSharedState;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.TokenStream;
+
+public class TreeRewriter extends TreeParser {
+    public interface fptr {
+        public Object rule() throws RecognitionException;
+    }
+
+    protected boolean showTransformations = false;
+
+    protected TokenStream originalTokenStream;
+    protected TreeAdaptor originalAdaptor;
+    
+    public TreeRewriter(TreeNodeStream input) {
+        this(input, new RecognizerSharedState());
+    }
+    public TreeRewriter(TreeNodeStream input, RecognizerSharedState state) {
+        super(input, state);
+        originalAdaptor = input.getTreeAdaptor();
+        originalTokenStream = input.getTokenStream();        
+    }
+
+    public Object applyOnce(Object t, fptr whichRule) {
+        if ( t==null ) return null;
+        try {
+            // share TreeParser object but not parsing-related state
+            state = new RecognizerSharedState();
+            input = new CommonTreeNodeStream(originalAdaptor, t);
+            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
+            setBacktrackingLevel(1);
+            TreeRuleReturnScope r = (TreeRuleReturnScope)whichRule.rule();
+            setBacktrackingLevel(0);
+            if ( failed() ) return t;
+            if ( showTransformations &&
+                 r!=null && !t.equals(r.getTree()) && r.getTree()!=null )
+            {
+                reportTransformation(t, r.getTree());
+            }
+            if ( r!=null && r.getTree()!=null ) return r.getTree();
+            else return t;
+        }
+        catch (RecognitionException e) { ; }
+        return t;
+    }
+
+    public Object applyRepeatedly(Object t, fptr whichRule) {
+        boolean treeChanged = true;
+        while ( treeChanged ) {
+            Object u = applyOnce(t, whichRule);
+            treeChanged = !t.equals(u);
+            t = u;
+        }
+        return t;
+    }
+
+    public Object downup(Object t) { return downup(t, false); }
+
+    public Object downup(Object t, boolean showTransformations) {
+        this.showTransformations = showTransformations;
+        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+        TreeVisitorAction actions = new TreeVisitorAction() {
+            public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
+            public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
+        };
+        t = v.visit(t, actions);
+        return t;
+    }
+
+    /** Override this if you need transformation tracing to go somewhere
+     *  other than stdout or if you're not using Tree-derived trees.
+     */
+    public void reportTransformation(Object oldTree, Object newTree) {
+        System.out.println(((Tree)oldTree).toStringTree()+" -> "+
+                           ((Tree)newTree).toStringTree());
+    }
+
+    fptr topdown_fptr = new fptr() {
+        public Object rule() throws RecognitionException { return topdown(); }
+    };
+    
+    fptr bottomup_ftpr = new fptr() {
+        public Object rule() throws RecognitionException { return bottomup(); }
+    };
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+    public Object topdown() throws RecognitionException { return null; }
+    public Object bottomup() throws RecognitionException { return null; }
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
similarity index 95%
rename from runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
index eca2c59..ffe0d93 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreeRuleReturnScope.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -37,4 +37,5 @@ import org.antlr.runtime.RuleReturnScope;
 public class TreeRuleReturnScope extends RuleReturnScope {
 	/** First node or root node of tree matched for this rule. */
 	public Object start;
+	public Object getStart() { return start; }	
 }
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java
new file mode 100644
index 0000000..04a8b48
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java
@@ -0,0 +1,42 @@
+package org.antlr.runtime.tree;
+
+/** Do a depth first walk of a tree, applying pre() and post() actions
+ *  as we discover and finish nodes.
+ */
+public class TreeVisitor {
+    protected TreeAdaptor adaptor;
+    
+    public TreeVisitor(TreeAdaptor adaptor) {
+        this.adaptor = adaptor;
+    }
+    public TreeVisitor() { this(new CommonTreeAdaptor()); }
+    
+    /** Visit every node in tree t and trigger an action for each node
+     *  before/after having visited all of its children.
+     *  Execute both actions even if t has no children.
+     *  If a child visit yields a new child, it can update its
+     *  parent's child list or just return the new child.  The
+     *  child update code works even if the child visit alters its parent
+     *  and returns the new tree.
+     *
+     *  Return result of applying post action to this node.
+     */
+    public Object visit(Object t, TreeVisitorAction action) {
+        // System.out.println("visit "+((Tree)t).toStringTree());
+        boolean isNil = adaptor.isNil(t);
+        if ( action!=null && !isNil ) {
+            t = action.pre(t); // if rewritten, walk children of new t
+        }
+        int n = adaptor.getChildCount(t);
+        for (int i=0; i<n; i++) {
+            Object child = adaptor.getChild(t, i);
+            Object visitResult = visit(child, action);
+            Object childAfterVisit = adaptor.getChild(t, i);
+            if ( visitResult !=  childAfterVisit ) { // result & child differ?
+                adaptor.setChild(t, i, visitResult);
+            }
+        }
+        if ( action!=null && !isNil ) t = action.post(t);
+        return t;
+    }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java
new file mode 100644
index 0000000..5d02d42
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java
@@ -0,0 +1,19 @@
+package org.antlr.runtime.tree;
+
+/** How to execute code for node t when a visitor visits node t.  Execute
+ *  pre() before visiting children and execute post() after visiting children.
+ */
+public interface TreeVisitorAction {
+    /** Execute an action before visiting children of t.  Return t or
+     *  a rewritten t.  It is up to the visitor to decide what to do
+     *  with the return value.  Children of returned value will be
+     *  visited if using TreeVisitor.visit().
+     */
+    public Object pre(Object t);
+
+    /** Execute an action after visiting children of t.  Return t or
+     *  a rewritten t.  It is up to the visitor to decide what to do
+     *  with the return value.
+     */
+    public Object post(Object t);
+}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/TreeWizard.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
similarity index 71%
rename from runtime/Java/src/org/antlr/runtime/tree/TreeWizard.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
index 162beb6..5172ede 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/TreeWizard.java
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2007 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,10 +29,7 @@ package org.antlr.runtime.tree;
 
 import org.antlr.runtime.Token;
 
-import java.util.Map;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
+import java.util.*;
 
 /** Build and navigate trees with this object.  Must know about the names
  *  of tokens so you have to pass in a map or array of token names (from which
@@ -98,6 +95,27 @@ public class TreeWizard {
 		}
 	}
 
+	// TODO: build indexes for the wizard
+
+	/** During fillBuffer(), we can make a reverse index from a set
+	 *  of token types of interest to the list of indexes into the
+	 *  node stream.  This lets us convert a node pointer to a
+	 *  stream index semi-efficiently for a list of interesting
+	 *  nodes such as function definition nodes (you'll want to seek
+	 *  to their bodies for an interpreter).  Also useful for doing
+	 *  dynamic searches; i.e., go find me all PLUS nodes.
+	protected Map tokenTypeToStreamIndexesMap;
+
+	/** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
+	 *  occurs for all token types.
+	public static final Set INDEX_ALL = new HashSet();
+
+	/** A set of token types user would like to index for faster lookup.
+	 *  If this is INDEX_ALL, then all token types are tracked.  If null,
+	 *  then none are indexed.
+	protected Set tokenTypesToReverseIndex = null;
+	*/
+
 	public TreeWizard(TreeAdaptor adaptor) {
 		this.adaptor = adaptor;
 	}
@@ -121,6 +139,9 @@ public class TreeWizard {
 	 */
 	public Map computeTokenTypes(String[] tokenNames) {
 		Map m = new HashMap();
+		if ( tokenNames==null ) {
+			return m;
+		}
 		for (int ttype = Token.MIN_TOKEN_TYPE; ttype < tokenNames.length; ttype++) {
 			String name = tokenNames[ttype];
 			m.put(name, new Integer(ttype));
@@ -159,7 +180,7 @@ public class TreeWizard {
 			return;
 		}
 		int ttype = adaptor.getType(t);
-		List elements = (List)m.get(ttype);
+		List elements = (List)m.get(new Integer(ttype));
 		if ( elements==null ) {
 			elements = new ArrayList();
 			m.put(new Integer(ttype), elements);
@@ -305,33 +326,32 @@ public class TreeWizard {
 	 *  text arguments on nodes.  Fill labels map with pointers to nodes
 	 *  in tree matched against nodes in pattern with labels.
 	 */
-	protected boolean _parse(Object t1, TreePattern t2, Map labels) {
+	protected boolean _parse(Object t1, TreePattern tpattern, Map labels) {
 		// make sure both are non-null
-		if ( t1==null || t2==null ) {
+		if ( t1==null || tpattern==null ) {
 			return false;
 		}
 		// check roots (wildcard matches anything)
-		if ( t2.getClass() != WildcardTreePattern.class ) {
-			if ( adaptor.getType(t1) != t2.getType() ) {
-				return false;
-			}
-			if ( t2.hasTextArg && !adaptor.getText(t1).equals(t2.getText()) ) {
+		if ( tpattern.getClass() != WildcardTreePattern.class ) {
+			if ( adaptor.getType(t1) != tpattern.getType() ) return false;
+            // if pattern has text, check node text
+			if ( tpattern.hasTextArg && !adaptor.getText(t1).equals(tpattern.getText()) ) {
 				return false;
 			}
 		}
-		if ( t2.label!=null && labels!=null ) {
+		if ( tpattern.label!=null && labels!=null ) {
 			// map label in pattern to node in t1
-			labels.put(t2.label, t1);
+			labels.put(tpattern.label, t1);
 		}
 		// check children
 		int n1 = adaptor.getChildCount(t1);
-		int n2 = t2.getChildCount();
+		int n2 = tpattern.getChildCount();
 		if ( n1 != n2 ) {
 			return false;
 		}
 		for (int i=0; i<n1; i++) {
 			Object child1 = adaptor.getChild(t1, i);
-			TreePattern child2 = (TreePattern)t2.getChild(i);
+			TreePattern child2 = (TreePattern)tpattern.getChild(i);
 			if ( !_parse(child1, child2, labels) ) {
 				return false;
 			}
@@ -406,4 +426,103 @@ public class TreeWizard {
 		}
 		return true;
 	}
+
+	// TODO: next stuff taken from CommonTreeNodeStream
+	
+		/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
+	 *  You can override this method to alter how indexing occurs.  The
+	 *  default is to create a
+	 *
+	 *    Map<Integer token type,ArrayList<Integer stream index>>
+	 *
+	 *  This data structure allows you to find all nodes with type INT in order.
+	 *
+	 *  If you really need to find a node of type, say, FUNC quickly then perhaps
+	 *
+	 *    Map<Integertoken type,Map<Object tree node,Integer stream index>>
+	 *
+	 *  would be better for you.  The interior maps map a tree node to
+	 *  the index so you don't have to search linearly for a specific node.
+	 *
+	 *  If you change this method, you will likely need to change
+	 *  getNodeIndex(), which extracts information.
+	protected void fillReverseIndex(Object node, int streamIndex) {
+		//System.out.println("revIndex "+node+"@"+streamIndex);
+		if ( tokenTypesToReverseIndex==null ) {
+			return; // no indexing if this is empty (nothing of interest)
+		}
+		if ( tokenTypeToStreamIndexesMap==null ) {
+			tokenTypeToStreamIndexesMap = new HashMap(); // first indexing op
+		}
+		int tokenType = adaptor.getType(node);
+		Integer tokenTypeI = new Integer(tokenType);
+		if ( !(tokenTypesToReverseIndex==INDEX_ALL ||
+			   tokenTypesToReverseIndex.contains(tokenTypeI)) )
+		{
+			return; // tokenType not of interest
+		}
+		Integer streamIndexI = new Integer(streamIndex);
+		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+		if ( indexes==null ) {
+			indexes = new ArrayList(); // no list yet for this token type
+			indexes.add(streamIndexI); // not there yet, add
+			tokenTypeToStreamIndexesMap.put(tokenTypeI, indexes);
+		}
+		else {
+			if ( !indexes.contains(streamIndexI) ) {
+				indexes.add(streamIndexI); // not there yet, add
+			}
+		}
+	}
+
+	/** Track the indicated token type in the reverse index.  Call this
+	 *  repeatedly for each type or use variant with Set argument to
+	 *  set all at once.
+	 * @param tokenType
+	public void reverseIndex(int tokenType) {
+		if ( tokenTypesToReverseIndex==null ) {
+			tokenTypesToReverseIndex = new HashSet();
+		}
+		else if ( tokenTypesToReverseIndex==INDEX_ALL ) {
+			return;
+		}
+		tokenTypesToReverseIndex.add(new Integer(tokenType));
+	}
+
+	/** Track the indicated token types in the reverse index. Set
+	 *  to INDEX_ALL to track all token types.
+	public void reverseIndex(Set tokenTypes) {
+		tokenTypesToReverseIndex = tokenTypes;
+	}
+
+	/** Given a node pointer, return its index into the node stream.
+	 *  This is not its Token stream index.  If there is no reverse map
+	 *  from node to stream index or the map does not contain entries
+	 *  for node's token type, a linear search of entire stream is used.
+	 *
+	 *  Return -1 if exact node pointer not in stream.
+	public int getNodeIndex(Object node) {
+		//System.out.println("get "+node);
+		if ( tokenTypeToStreamIndexesMap==null ) {
+			return getNodeIndexLinearly(node);
+		}
+		int tokenType = adaptor.getType(node);
+		Integer tokenTypeI = new Integer(tokenType);
+		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+		if ( indexes==null ) {
+			//System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
+			return getNodeIndexLinearly(node);
+		}
+		for (int i = 0; i < indexes.size(); i++) {
+			Integer streamIndexI = (Integer)indexes.get(i);
+			Object n = get(streamIndexI.intValue());
+			if ( n==node ) {
+				//System.out.println("found in index; stream index = "+streamIndexI);
+				return streamIndexI.intValue(); // found it!
+			}
+		}
+		return -1;
+	}
+
+	*/
 }
diff --git a/runtime/Java/src/org/antlr/runtime/ANTLRInputStream.java b/runtime/Java/src/org/antlr/runtime/ANTLRInputStream.java
deleted file mode 100644
index 957f6a4..0000000
--- a/runtime/Java/src/org/antlr/runtime/ANTLRInputStream.java
+++ /dev/null
@@ -1,43 +0,0 @@
-package org.antlr.runtime;
-
-import java.io.*;
-
-/** A kind of ReaderStream that pulls from an InputStream.
- *  Useful for reading from stdin and specifying file encodings etc...
-  */
-public class ANTLRInputStream extends ANTLRReaderStream {
-	public ANTLRInputStream() {
-	}
-
-	public ANTLRInputStream(InputStream input) throws IOException {
-		this(input, null);
-	}
-
-	public ANTLRInputStream(InputStream input, int size) throws IOException {
-		this(input, size, null);
-	}
-
-	public ANTLRInputStream(InputStream input, String encoding) throws IOException {
-		this(input, INITIAL_BUFFER_SIZE, encoding);
-	}
-
-	public ANTLRInputStream(InputStream input, int size, String encoding) throws IOException {
-		this(input, size, READ_BUFFER_SIZE, encoding);
-	}
-
-	public ANTLRInputStream(InputStream input,
-							int size,
-							int readBufferSize,
-							String encoding)
-		throws IOException
-	{
-		InputStreamReader isr;
-		if ( encoding!=null ) {
-			isr = new InputStreamReader(input, encoding);
-		}
-		else {
-			isr = new InputStreamReader(input);
-		}
-		load(isr, size, readBufferSize);
-	}
-}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTreeNodeException.java b/runtime/Java/src/org/antlr/runtime/MismatchedTreeNodeException.java
deleted file mode 100644
index e3d223a..0000000
--- a/runtime/Java/src/org/antlr/runtime/MismatchedTreeNodeException.java
+++ /dev/null
@@ -1,22 +0,0 @@
-package org.antlr.runtime;
-
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.runtime.tree.Tree;
-
-/**
- */
-public class MismatchedTreeNodeException extends RecognitionException {
-	public int expecting;
-
-	public MismatchedTreeNodeException() {
-	}
-
-	public MismatchedTreeNodeException(int expecting, TreeNodeStream input) {
-		super(input);
-		this.expecting = expecting;
-	}
-
-	public String toString() {
-		return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")";
-	}
-}
diff --git a/runtime/Java/src/org/antlr/runtime/RuleReturnScope.java b/runtime/Java/src/org/antlr/runtime/RuleReturnScope.java
deleted file mode 100644
index cb997c0..0000000
--- a/runtime/Java/src/org/antlr/runtime/RuleReturnScope.java
+++ /dev/null
@@ -1,15 +0,0 @@
-package org.antlr.runtime;
-
-/** Rules can return start/stop info as well as possible trees and templates */
-public class RuleReturnScope {
-	/** Return the start token or tree */
-	public Object getStart() { return null; }
-	/** Return the stop token or tree */
-	public Object getStop() { return null; }
-	/** Has a value potentially if output=AST; */
-	public Object getTree() { return null; }
-	/** Has a value potentially if output=template; Don't use StringTemplate
-	 *  type as it then causes a dependency with ST lib.
-	 */
-	public Object getTemplate() { return null; }
-}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/BaseTree.java b/runtime/Java/src/org/antlr/runtime/tree/BaseTree.java
deleted file mode 100644
index 2819f72..0000000
--- a/runtime/Java/src/org/antlr/runtime/tree/BaseTree.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.tree;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/** A generic tree implementation with no payload.  You must subclass to
- *  actually have any user data.  ANTLR v3 uses a list of children approach
- *  instead of the child-sibling approach in v2.  A flat tree (a list) is
- *  an empty node whose children represent the list.  An empty, but
- *  non-null node is called "nil".
- */
-public abstract class BaseTree implements Tree {
-	protected List children;
-
-	public BaseTree() {
-	}
-
-	/** Create a new node from an existing node does nothing for BaseTree
-	 *  as there are no fields other than the children list, which cannot
-	 *  be copied as the children are not considered part of this node. 
-	 */
-	public BaseTree(Tree node) {
-	}
-
-	public Tree getChild(int i) {
-		if ( children==null || i>=children.size() ) {
-			return null;
-		}
-		return (BaseTree)children.get(i);
-	}
-
-	public Tree getFirstChildWithType(int type) {
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			Tree t = (Tree) children.get(i);
-			if ( t.getType()==type ) {
-				return t;
-			}
-		}	
-		return null;
-	}
-
-	public int getChildCount() {
-		if ( children==null ) {
-			return 0;
-		}
-		return children.size();
-	}
-
-	/** Add t as child of this node.
-	 *
-	 *  Warning: if t has no children, but child does
-	 *  and child isNil then this routine moves children to t via
-	 *  t.children = child.children; i.e., without copying the array.
-	 */
-	public void addChild(Tree t) {
-		//System.out.println("add "+t.toStringTree()+" as child to "+this.toStringTree());
-		if ( t==null ) {
-			return; // do nothing upon addChild(null)
-		}
-		BaseTree childTree = (BaseTree)t;
-		if ( childTree.isNil() ) { // t is an empty node possibly with children
-			if ( this.children!=null && this.children == childTree.children ) {
-				throw new RuntimeException("attempt to add child list to itself");
-			}
-			// just add all of childTree's children to this
-			if ( childTree.children!=null ) {
-				if ( this.children!=null ) { // must copy, this has children already
-					int n = childTree.children.size();
-					for (int i = 0; i < n; i++) {
-						this.children.add(childTree.children.get(i));
-					}
-				}
-				else {
-					// no children for this but t has children; just set pointer
-					this.children = childTree.children;
-				}
-			}
-		}
-		else { // t is not empty and might have children
-			if ( children==null ) {
-				children = createChildrenList(); // create children list on demand
-			}
-			children.add(t);
-		}
-	}
-
-	/** Add all elements of kids list as children of this node */
-	public void addChildren(List kids) {
-		for (int i = 0; i < kids.size(); i++) {
-			Tree t = (Tree) kids.get(i);
-			addChild(t);
-		}
-	}
-
-	public void setChild(int i, BaseTree t) {
-		if ( children==null ) {
-			children = createChildrenList();
-		}
-		children.set(i, t);
-	}
-
-	public BaseTree deleteChild(int i) {
-		if ( children==null ) {
-			return null;
-		}
-		return (BaseTree)children.remove(i);
-	}
-
-	/** Override in a subclass to change the impl of children list */
-	protected List createChildrenList() {
-		return new ArrayList();
-	}
-
-	public boolean isNil() {
-		return false;
-	}
-
-	/** Recursively walk this tree, dup'ing nodes until you have copy of
-	 *  this tree.  This method should work for all subclasses as long
-	 *  as they override dupNode().
-	 */
-	public Tree dupTree() {
-		Tree newTree = this.dupNode();
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			Tree t = (Tree) children.get(i);
-			Tree newSubTree = t.dupTree();
-			newTree.addChild(newSubTree);
-		}
-		return newTree;
-	}
-
-	/** Print out a whole tree not just a node */
-    public String toStringTree() {
-		if ( children==null || children.size()==0 ) {
-			return this.toString();
-		}
-		StringBuffer buf = new StringBuffer();
-		if ( !isNil() ) {
-			buf.append("(");
-			buf.append(this.toString());
-			buf.append(' ');
-		}
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			BaseTree t = (BaseTree) children.get(i);
-			if ( i>0 ) {
-				buf.append(' ');
-			}
-			buf.append(t.toStringTree());
-		}
-		if ( !isNil() ) {
-			buf.append(")");
-		}
-		return buf.toString();
-	}
-
-    public int getLine() {
-		return 0;
-	}
-
-	public int getCharPositionInLine() {
-		return 0;
-	}
-
-	/** Override to say how a node (not a tree) should look as text */
-	public abstract String toString();
-}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/Tree.java b/runtime/Java/src/org/antlr/runtime/tree/Tree.java
deleted file mode 100644
index 2794ea6..0000000
--- a/runtime/Java/src/org/antlr/runtime/tree/Tree.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-
-/** What does a tree look like?  ANTLR has a number of support classes
- *  such as CommonTreeNodeStream that work on these kinds of trees.  You
- *  don't have to make your trees implement this interface, but if you do,
- *  you'll be able to use more support code.
- *
- *  NOTE: When constructing trees, ANTLR can build any kind of tree; it can
- *  even use Token objects as trees if you add a child list to your tokens.
- *
- *  This is a tree node without any payload; just navigation and factory stuff.
- */
-public interface Tree {
-	public static final Tree INVALID_NODE = new CommonTree(Token.INVALID_TOKEN);
-
-	Tree getChild(int i);
-
-	int getChildCount();
-
-	/** Add t as a child to this node.  If t is null, do nothing.  If t
-	 *  is nil, add all children of t to this' children.
-	 * @param t
-	 */
-	void addChild(Tree t);
-
-	/** Indicates the node is a nil node but may still have children, meaning
-	 *  the tree is a flat list.
-	 */
-	boolean isNil();
-
-	/**  What is the smallest token index (indexing from 0) for this node
-	 *   and its children?
-	 */
-	int getTokenStartIndex();
-
-	void setTokenStartIndex(int index);
-
-	/**  What is the largest token index (indexing from 0) for this node
-	 *   and its children?
-	 */
-	int getTokenStopIndex();
-
-	void setTokenStopIndex(int index);
-
-	Tree dupTree();
-
-	Tree dupNode();
-
-	/** Return a token type; needed for tree parsing */
-	int getType();
-
-	String getText();
-
-	/** In case we don't have a token payload, what is the line for errors? */
-	int getLine();
-
-	int getCharPositionInLine();
-
-	String toStringTree();
-
-	String toString();
-}
diff --git a/runtime/Java/src/org/antlr/runtime/tree/UnBufferedTreeNodeStream.java b/runtime/Java/src/org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
deleted file mode 100644
index a4aff58..0000000
--- a/runtime/Java/src/org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
-[The "BSD licence"]
-Copyright (c) 2005-2006 Terence Parr
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenStream;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Stack;
-
-/** A stream of tree nodes, accessing nodes from a tree of ANY kind.
- *  No new nodes should be created in tree during the walk.  A small buffer
- *  of tokens is kept to efficiently and easily handle LT(i) calls, though
- *  the lookahead mechanism is fairly complicated.
- *
- *  For tree rewriting during tree parsing, this must also be able
- *  to replace a set of children without "losing its place".
- *  That part is not yet implemented.  Will permit a rule to return
- *  a different tree and have it stitched into the output tree probably.
- *
- *  @see CommonTreeNodeStream
- */
-public class UnBufferedTreeNodeStream implements TreeNodeStream {
-	public static final int INITIAL_LOOKAHEAD_BUFFER_SIZE = 5;
-
-	/** Reuse same DOWN, UP navigation nodes unless this is true */
-	protected boolean uniqueNavigationNodes = false;
-
-	/** Pull nodes from which tree? */
-	protected Object root;
-
-	/** IF this tree (root) was created from a token stream, track it. */
-	protected TokenStream tokens;
-
-	/** What tree adaptor was used to build these trees */
-	TreeAdaptor adaptor;
-
-	/** As we walk down the nodes, we must track parent nodes so we know
-	 *  where to go after walking the last child of a node.  When visiting
-	 *  a child, push current node and current index.
-	 */
-	protected Stack nodeStack = new Stack();
-
-	/** Track which child index you are visiting for each node we push.
-	 *  TODO: pretty inefficient...use int[] when you have time
-	 */
-	protected Stack indexStack = new Stack();
-
-	/** Which node are we currently visiting? */
-	protected Object currentNode;
-
-	/** Which node did we visit last?  Used for LT(-1) calls. */
-	protected Object previousNode;
-
-	/** Which child are we currently visiting?  If -1 we have not visited
-	 *  this node yet; next consume() request will set currentIndex to 0.
-	 */
-	protected int currentChildIndex;
-
-	/** What node index did we just consume?  i=0..n-1 for n node trees.
-	 *  IntStream.next is hence 1 + this value.  Size will be same.
-	 */
-	protected int absoluteNodeIndex;
-
-	/** Buffer tree node stream for use with LT(i).  This list grows
-	 *  to fit new lookahead depths, but consume() wraps like a circular
-	 *  buffer.
-	 */
-	protected Object[] lookahead = new Object[INITIAL_LOOKAHEAD_BUFFER_SIZE];
-
-	/** lookahead[head] is the first symbol of lookahead, LT(1). */
-	protected int head;
-
-	/** Add new lookahead at lookahead[tail].  tail wraps around at the
-	 *  end of the lookahead buffer so tail could be less than head.
-	  */
-	protected int tail;
-
-	/** When walking ahead with cyclic DFA or for syntactic predicates,
-	  *  we need to record the state of the tree node stream.  This
-	 *  class wraps up the current state of the UnBufferedTreeNodeStream.
-	 *  Calling mark() will push another of these on the markers stack.
-	 */
-	protected class TreeWalkState {
-		int currentChildIndex;
-		int absoluteNodeIndex;
-		Object currentNode;
-		Object previousNode;
-		/** Record state of the nodeStack */
-		int nodeStackSize;
-		/** Record state of the indexStack */
-		int indexStackSize;
-		Object[] lookahead;
-	}
-
-	/** Calls to mark() may be nested so we have to track a stack of
-	 *  them.  The marker is an index into this stack.
-	 *  This is a List<TreeWalkState>.  Indexed from 1..markDepth.
-	 *  A null is kept @ index 0.  Create upon first call to mark().
-	 */
-	protected List markers;
-
-	/** tracks how deep mark() calls are nested */
-	protected int markDepth = 0;
-
-	/** Track the last mark() call result value for use in rewind(). */
-	protected int lastMarker;
-
-	// navigation nodes
-
-	protected Object down;
-	protected Object up;
-	protected Object eof;
-
-	public UnBufferedTreeNodeStream(Object tree) {
-		this(new CommonTreeAdaptor(), tree);
-	}
-
-	public UnBufferedTreeNodeStream(TreeAdaptor adaptor, Object tree) {
-		this.root = tree;
-		this.adaptor = adaptor;
-		reset();
-		down = adaptor.create(Token.DOWN, "DOWN");
-		up = adaptor.create(Token.UP, "UP");
-		eof = adaptor.create(Token.EOF, "EOF");
-	}
-
-	public void reset() {
-		currentNode = root;
-		previousNode = null;
-		currentChildIndex = -1;
-		absoluteNodeIndex = -1;
-		head = tail = 0;
-	}
-
-	// Satisfy TreeNodeStream
-
-	public Object get(int i) {
-		throw new UnsupportedOperationException("stream is unbuffered");
-	}
-
-	/** Get tree node at current input pointer + i ahead where i=1 is next node.
-	 *  i<0 indicates nodes in the past.  So -1 is previous node and -2 is
-	 *  two nodes ago. LT(0) is undefined.  For i>=n, return null.
-	 *  Return null for LT(0) and any index that results in an absolute address
-	 *  that is negative.
-	 *
-	 *  This is analogus to the LT() method of the TokenStream, but this
-	 *  returns a tree node instead of a token.  Makes code gen identical
-	 *  for both parser and tree grammars. :)
-	 */
-	public Object LT(int k) {
-		//System.out.println("LT("+k+"); head="+head+", tail="+tail);
-		if ( k==-1 ) {
-			return previousNode;
-		}
-		if ( k<0 ) {
-			throw new IllegalArgumentException("tree node streams cannot look backwards more than 1 node");
-		}
-		if ( k==0 ) {
-			return Tree.INVALID_NODE;
-		}
-		fill(k);
-		return lookahead[(head+k-1)%lookahead.length];
-	}
-
-	/** Where is this stream pulling nodes from?  This is not the name, but
-	 *  the object that provides node objects.
-	 */
-	public Object getTreeSource() {
-		return root;
-	}
-
-	public TokenStream getTokenStream() {
-		return tokens;
-	}
-
-	public void setTokenStream(TokenStream tokens) {
-		this.tokens = tokens;
-	}
-
-	/** Make sure we have at least k symbols in lookahead buffer */
-	protected void fill(int k) {
-		int n = getLookaheadSize();
-		//System.out.println("we have "+n+" nodes; need "+(k-n));
-		for (int i=1; i<=k-n; i++) {
-			next(); // get at least k-depth lookahead nodes
-		}
-	}
-
-	/** Add a node to the lookahead buffer.  Add at lookahead[tail].
-	 *  If you tail+1 == head, then we must create a bigger buffer
-	 *  and copy all the nodes over plus reset head, tail.  After
-	 *  this method, LT(1) will be lookahead[0].
-	 */
-	protected void addLookahead(Object node) {
-		//System.out.println("addLookahead head="+head+", tail="+tail);
-		lookahead[tail] = node;
-		tail = (tail+1)%lookahead.length;
-		if ( tail==head ) {
-			// buffer overflow: tail caught up with head
-			// allocate a buffer 2x as big
-			Object[] bigger = new Object[2*lookahead.length];
-			// copy head to end of buffer to beginning of bigger buffer
-			int remainderHeadToEnd = lookahead.length-head;
-			System.arraycopy(lookahead, head, bigger, 0, remainderHeadToEnd);
-			// copy 0..tail to after that
-			System.arraycopy(lookahead, 0, bigger, remainderHeadToEnd, tail);
-			lookahead = bigger; // reset to bigger buffer
-			head = 0;
-			tail += remainderHeadToEnd;
-		}
-	}
-
-	// Satisfy IntStream interface
-
-	public void consume() {
-		/*
-		System.out.println("consume: currentNode="+currentNode.getType()+
-						   " childIndex="+currentChildIndex+
-						   " nodeIndex="+absoluteNodeIndex);
-						   */
-		// make sure there is something in lookahead buf, which might call next()
-		fill(1);
-		absoluteNodeIndex++;
-		previousNode = lookahead[head]; // track previous node before moving on
-		head = (head+1) % lookahead.length;
-	}
-
-	public int LA(int i) {
-		Object t = LT(i);
-		if ( t==null ) {
-			return Token.INVALID_TOKEN_TYPE;
-		}
-		return adaptor.getType(t);
-	}
-
-	/** Record the current state of the tree walk which includes
-	 *  the current node and stack state as well as the lookahead
-	 *  buffer.
-	 */
-	public int mark() {
-		if ( markers==null ) {
-			markers = new ArrayList();
-			markers.add(null); // depth 0 means no backtracking, leave blank
-		}
-		markDepth++;
-		TreeWalkState state = null;
-		if ( markDepth>=markers.size() ) {
-			state = new TreeWalkState();
-			markers.add(state);
-		}
-		else {
-			state = (TreeWalkState)markers.get(markDepth);
-		}
-		state.absoluteNodeIndex = absoluteNodeIndex;
-		state.currentChildIndex = currentChildIndex;
-		state.currentNode = currentNode;
-		state.previousNode = previousNode;
-		state.nodeStackSize = nodeStack.size();
-		state.indexStackSize = indexStack.size();
-		// take snapshot of lookahead buffer
-		int n = getLookaheadSize();
-		int i=0;
-		state.lookahead = new Object[n];
-		for (int k=1; k<=n; k++,i++) {
-			state.lookahead[i] = LT(k);
-		}
-		lastMarker = markDepth;
-		return markDepth;
-	}
-
-	public void release(int marker) {
-		// unwind any other markers made after marker and release marker
-		markDepth = marker;
-		// release this marker
-		markDepth--;
-	}
-
-	/** Rewind the current state of the tree walk to the state it
-	 *  was in when mark() was called and it returned marker.  Also,
-	 *  wipe out the lookahead which will force reloading a few nodes
-	 *  but it is better than making a copy of the lookahead buffer
-	 *  upon mark().
-	 */
-	public void rewind(int marker) {
-		if ( markers==null ) {
-			return;
-		}
-		TreeWalkState state = (TreeWalkState)markers.get(marker);
-		absoluteNodeIndex = state.absoluteNodeIndex;
-		currentChildIndex = state.currentChildIndex;
-		currentNode = state.currentNode;
-		previousNode = state.previousNode;
-		// drop node and index stacks back to old size
-		nodeStack.setSize(state.nodeStackSize);
-		indexStack.setSize(state.indexStackSize);
-		head = tail = 0; // wack lookahead buffer and then refill
-		for (; tail<state.lookahead.length; tail++) {
-			lookahead[tail] = state.lookahead[tail];
-		}
-		release(marker);
-	}
-
-	public void rewind() {
-		rewind(lastMarker);
-	}
-
-	/** consume() ahead until we hit index.  Can't just jump ahead--must
-	 *  spit out the navigation nodes.
-	 */
-	public void seek(int index) {
-		if ( index<this.index() ) {
-			throw new IllegalArgumentException("can't seek backwards in node stream");
-		}
-		// seek forward, consume until we hit index
-		while ( this.index()<index ) {
-			consume();
-		}
-	}
-
-	public int index() {
-		return absoluteNodeIndex+1;
-	}
-
-	/** Expensive to compute; recursively walk tree to find size;
-	 *  include navigation nodes and EOF.  Reuse functionality
-	 *  in CommonTreeNodeStream as we only really use this
-	 *  for testing.
-	 */
-	public int size() {
-		CommonTreeNodeStream s = new CommonTreeNodeStream(root);
-		return s.size();
-	}
-
-	/** Return the next node found during a depth-first walk of root.
-	 *  Also, add these nodes and DOWN/UP imaginary nodes into the lokoahead
-	 *  buffer as a side-effect.  Normally side-effects are bad, but because
-	 *  we can emit many tokens for every next() call, it's pretty hard to
-	 *  use a single return value for that.  We must add these tokens to
-	 *  the lookahead buffer.
-	 *
-	 *  This does *not* return the DOWN/UP nodes; those are only returned
-	 *  by the LT() method.
-	 *
-	 *  Ugh.  This mechanism is much more complicated than a recursive
-	 *  solution, but it's the only way to provide nodes on-demand instead
-	 *  of walking once completely through and buffering up the nodes. :(
-	 */
-	public Object next() {
-		// already walked entire tree; nothing to return
-		if ( currentNode==null ) {
-			addLookahead(eof);
-			// this is infinite stream returning EOF at end forever
-			// so don't throw NoSuchElementException
-			return null;
-		}
-
-		// initial condition (first time method is called)
-		if ( currentChildIndex==-1 ) {
-			return handleRootNode();
-		}
-
-		// index is in the child list?
-		if ( currentChildIndex<adaptor.getChildCount(currentNode) ) {
-			return visitChild(currentChildIndex);
-		}
-
-		// hit end of child list, return to parent node or its parent ...
-		walkBackToMostRecentNodeWithUnvisitedChildren();
-		if ( currentNode!=null ) {
-			return visitChild(currentChildIndex);
-		}
-
-		return null;
-	}
-
-	protected Object handleRootNode() {
-		Object node;
-		node = currentNode;
-		// point to first child in prep for subsequent next()
-		currentChildIndex = 0;
-		if ( adaptor.isNil(node) ) {
-			// don't count this root nil node
-			node = visitChild(currentChildIndex);
-		}
-		else {
-			addLookahead(node);
-			if ( adaptor.getChildCount(currentNode)==0 ) {
-				// single node case
-				currentNode = null; // say we're done
-			}
-		}
-		return node;
-	}
-
-	protected Object visitChild(int child) {
-		Object node = null;
-		// save state
-		nodeStack.push(currentNode);
-		indexStack.push(new Integer(child));
-		if ( child==0 && !adaptor.isNil(currentNode) ) {
-			addNavigationNode(Token.DOWN);
-		}
-		// visit child
-		currentNode = adaptor.getChild(currentNode,child);
-		currentChildIndex = 0;
-		node = currentNode;  // record node to return
-		addLookahead(node);
-		walkBackToMostRecentNodeWithUnvisitedChildren();
-		return node;
-	}
-
-	/** As we flatten the tree, we use UP, DOWN nodes to represent
-	 *  the tree structure.  When debugging we need unique nodes
-	 *  so instantiate new ones when uniqueNavigationNodes is true.
-	 */
-	protected void addNavigationNode(final int ttype) {
-		Object navNode = null;
-		if ( ttype==Token.DOWN ) {
-			if ( hasUniqueNavigationNodes() ) {
-				navNode = adaptor.create(Token.DOWN, "DOWN");
-			}
-			else {
-				navNode = down;
-			}
-		}
-		else {
-			if ( hasUniqueNavigationNodes() ) {
-				navNode = adaptor.create(Token.UP, "UP");
-			}
-			else {
-				navNode = up;
-			}
-		}
-		addLookahead(navNode);
-	}
-
-	/** Walk upwards looking for a node with more children to walk. */
-	protected void walkBackToMostRecentNodeWithUnvisitedChildren() {
-		while ( currentNode!=null &&
-				currentChildIndex>=adaptor.getChildCount(currentNode) )
-		{
-			currentNode = nodeStack.pop();
-			if ( currentNode==null ) { // hit the root?
-				return;
-			}
-			currentChildIndex = ((Integer)indexStack.pop()).intValue();
-			currentChildIndex++; // move to next child
-			if ( currentChildIndex>=adaptor.getChildCount(currentNode) ) {
-				if ( !adaptor.isNil(currentNode) ) {
-					addNavigationNode(Token.UP);
-				}
-				if ( currentNode==root ) { // we done yet?
-					currentNode = null;
-				}
-			}
-		}
-	}
-
-	public TreeAdaptor getTreeAdaptor() {
-		return adaptor;
-	}
-
-	public boolean hasUniqueNavigationNodes() {
-		return uniqueNavigationNodes;
-	}
-
-	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
-		this.uniqueNavigationNodes = uniqueNavigationNodes;
-	}
-
-	/** Print out the entire tree including DOWN/UP nodes.  Uses
-	 *  a recursive walk.  Mostly useful for testing as it yields
-	 *  the token types not text.
-	 */
-	public String toString() {
-		return toString(root, null);
-	}
-
-	protected int getLookaheadSize() {
-		return tail<head?(lookahead.length-head+tail):(tail-head);
-	}
-
-	public String toString(Object start, Object stop) {
-		if ( start==null ) {
-			return null;
-		}
-		// if we have the token stream, use that to dump text in order
-		if ( tokens!=null ) {
-			// don't trust stop node as it's often an UP node etc...
-			// walk backwards until you find a non-UP, non-DOWN node
-			// and ask for it's token index.
-			int beginTokenIndex = adaptor.getTokenStartIndex(start);
-			int endTokenIndex = adaptor.getTokenStopIndex(stop);
-			if ( stop!=null && adaptor.getType(stop)==Token.UP ) {
-				endTokenIndex = adaptor.getTokenStopIndex(start);
-			}
-			else {
-				endTokenIndex = size()-1;
-			}
-			return tokens.toString(beginTokenIndex, endTokenIndex);
-		}
-		StringBuffer buf = new StringBuffer();
-		toStringWork(start, stop, buf);
-		return buf.toString();
-	}
-
-	protected void toStringWork(Object p, Object stop, StringBuffer buf) {
-		if ( !adaptor.isNil(p) ) {
-			String text = adaptor.getText(p);
-			if ( text==null ) {
-				text = " "+String.valueOf(adaptor.getType(p));
-			}
-			buf.append(text); // ask the node to go to string
-		}
-		if ( p==stop ) {
-			return;
-		}
-		int n = adaptor.getChildCount(p);
-		if ( n>0 && !adaptor.isNil(p) ) {
-			buf.append(" ");
-			buf.append(Token.DOWN);
-		}
-		for (int c=0; c<n; c++) {
-			Object child = adaptor.getChild(p,c);
-			toStringWork(child, stop, buf);
-		}
-		if ( n>0 && !adaptor.isNil(p) ) {
-			buf.append(" ");
-			buf.append(Token.UP);
-		}
-	}
-}
-
diff --git a/src/org/antlr/Tool.java b/src/org/antlr/Tool.java
deleted file mode 100644
index 399c86d..0000000
--- a/src/org/antlr/Tool.java
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr;
-
-import org.antlr.tool.*;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.analysis.*;
-import org.antlr.runtime.misc.Stats;
-
-import java.io.*;
-import java.util.*;
-
-/** The main ANTLR entry point.  Read a grammar and generate a parser. */
-public class Tool {
-	public static final String VERSION = "3.0.1";
-
-	public static final String UNINITIALIZED_DIR = "<unset-dir>";
-
-    // Input parameters / option
-
-    protected List grammarFileNames = new ArrayList();
-	protected boolean generate_NFA_dot = false;
-	protected boolean generate_DFA_dot = false;
-	protected String outputDirectory = UNINITIALIZED_DIR;
-	protected String libDirectory = ".";
-	protected boolean debug = false;
-	protected boolean trace = false;
-	protected boolean profile = false;
-	protected boolean report = false;
-	protected boolean printGrammar = false;
-	protected boolean depend = false;
-	protected boolean forceAllFilesToOutputDir = false;
-
-	// the internal options are for my use on the command line during dev
-
-	public static boolean internalOption_PrintGrammarTree = false;
-	public static boolean internalOption_PrintDFA = false;
-	public static boolean internalOption_ShowNFConfigsInDFA = false;
-	public static boolean internalOption_watchNFAConversion = false;
-
-    public static void main(String[] args) {
-		ErrorManager.info("ANTLR Parser Generator  Version " +
-						  VERSION + " (August 13, 2007)  1989-2007");
-		Tool antlr = new Tool(args);
-		antlr.process();
-		System.exit(0);
-	}
-
-	public Tool() {
-	}
-
-	public Tool(String[] args) {
-		processArgs(args);
-	}
-
-	public void processArgs(String[] args) {
-		if ( args==null || args.length==0 ) {
-			help();
-			return;
-		}
-		for (int i = 0; i < args.length; i++) {
-			if (args[i].equals("-o") || args[i].equals("-fo")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing output directory with -fo/-o option; ignoring");
-				}
-				else {
-					if ( args[i].equals("-fo") ) { // force output into dir
-						forceAllFilesToOutputDir = true;
-					}
-					i++;
-					outputDirectory = args[i];
-					if ( outputDirectory.endsWith("/") ||
-						 outputDirectory.endsWith("\\") )
-					{
-						outputDirectory =
-							outputDirectory.substring(0,outputDirectory.length()-1);
-					}
-					File outDir = new File(outputDirectory);
-					if( outDir.exists() && !outDir.isDirectory() ) {
-						ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE,outputDirectory);
-						libDirectory = ".";
-					}
-				}
-			}
-			else if (args[i].equals("-lib")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing library directory with -lib option; ignoring");
-				}
-				else {
-					i++;
-					libDirectory = args[i];
-					if ( libDirectory.endsWith("/") ||
-						 libDirectory.endsWith("\\") )
-					{
-						libDirectory =
-							libDirectory.substring(0,libDirectory.length()-1);
-					}
-					File outDir = new File(libDirectory);
-					if( !outDir.exists() ) {
-						ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND,libDirectory);
-						libDirectory = ".";
-					}
-				}
-			}
-			else if (args[i].equals("-nfa")) {
-				generate_NFA_dot=true;
-			}
-			else if (args[i].equals("-dfa")) {
-				generate_DFA_dot=true;
-			}
-			else if (args[i].equals("-debug")) {
-				debug=true;
-			}
-			else if (args[i].equals("-trace")) {
-				trace=true;
-			}
-			else if (args[i].equals("-report")) {
-				report=true;
-			}
-			else if (args[i].equals("-profile")) {
-				profile=true;
-			}
-			else if (args[i].equals("-print")) {
-				printGrammar = true;
-			}
-			else if (args[i].equals("-depend")) {
-				depend=true;
-			}
-			else if (args[i].equals("-message-format")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing output format with -message-format option; using default");
-				}
-				else {
-					i++;
-					ErrorManager.setFormat(args[i]);
-				}
-			}
-			else if (args[i].equals("-Xgrtree")) {
-				internalOption_PrintGrammarTree=true; // print grammar tree
-			}
-			else if (args[i].equals("-Xdfa")) {
-				internalOption_PrintDFA=true;
-			}
-			else if (args[i].equals("-Xnoprune")) {
-				DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES=false;
-			}
-			else if (args[i].equals("-Xnocollapse")) {
-				DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES=false;
-			}
-			else if (args[i].equals("-Xdbgconversion")) {
-				NFAToDFAConverter.debug = true;
-			}
-			else if (args[i].equals("-Xmultithreaded")) {
-				NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
-			}
-			else if (args[i].equals("-Xnomergestopstates")) {
-				DFAOptimizer.MERGE_STOP_STATES = false;
-			}
-			else if (args[i].equals("-Xdfaverbose")) {
-				internalOption_ShowNFConfigsInDFA = true;
-			}
-			else if (args[i].equals("-Xwatchconversion")) {
-				internalOption_watchNFAConversion = true;
-			}
-			else if (args[i].equals("-XdbgST")) {
-				CodeGenerator.EMIT_TEMPLATE_DELIMITERS = true;
-			}
-			else if (args[i].equals("-Xnoinlinedfa")) {
-				CodeGenerator.GEN_ACYCLIC_DFA_INLINE = false;
-			}
-			else if (args[i].equals("-Xm")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing max recursion with -Xm option; ignoring");
-				}
-				else {
-					i++;
-					NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
-				}
-			}
-			else if (args[i].equals("-Xmaxdfaedges")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
-				}
-				else {
-					i++;
-					DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
-				}
-			}
-			else if (args[i].equals("-Xconversiontimeout")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
-				}
-				else {
-					i++;
-					DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
-				}
-			}
-			else if (args[i].equals("-Xnfastates")) {
-				DecisionProbe.verbose=true;
-			}			
-			else if (args[i].equals("-X")) {
-				Xhelp();
-			}
-            else {
-                if (args[i].charAt(0) != '-') {
-                    // Must be the grammar file
-                    grammarFileNames.add(args[i]);
-                }
-            }
-        }
-    }
-
-    /*
-    protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
-        // check for invalid command line args
-        for (int a = 0; a < args.length; a++) {
-            if (!cmdLineArgValid.member(a)) {
-                System.err.println("invalid command-line argument: " + args[a] + "; ignored");
-            }
-        }
-    }
-    */
-
-    public void process()  {
-		int numFiles = grammarFileNames.size();
-		for (int i = 0; i < numFiles; i++) {
-			String grammarFileName = (String) grammarFileNames.get(i);
-			if ( numFiles > 1 && !depend ) {
-			    System.out.println(grammarFileName);
-			}
-			try {
-				if ( depend ) {
-					BuildDependencyGenerator dep =
-						new BuildDependencyGenerator(this, grammarFileName);
-					List outputFiles = dep.getGeneratedFileList();
-					List dependents = dep.getDependenciesFileList();
-					//System.out.println("output: "+outputFiles);
-					//System.out.println("dependents: "+dependents);
-					System.out.println(dep.getDependencies());
-					continue;
-				}
-				Grammar grammar = getGrammar(grammarFileName);
-				processGrammar(grammar);
-
-				if ( printGrammar ) {
-					grammar.printGrammar(System.out);
-				}
-
-				if ( generate_NFA_dot ) {
-					generateNFAs(grammar);
-				}
-				if ( generate_DFA_dot ) {
-					generateDFAs(grammar);
-				}
-				if ( report ) {
-					GrammarReport report = new GrammarReport(grammar);
-					System.out.println(report.toString());
-					// print out a backtracking report too (that is not encoded into log)
-					System.out.println(report.getBacktrackingReport());
-					// same for aborted NFA->DFA conversions
-					System.out.println(report.getEarlyTerminationReport());
-				}
-				if ( profile ) {
-					GrammarReport report = new GrammarReport(grammar);
-					Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
-											  report.toNotifyString());
-				}
-
-				// now handle the lexer if one was created for a merged spec
-				String lexerGrammarStr = grammar.getLexerGrammar();
-				if ( grammar.type==Grammar.COMBINED && lexerGrammarStr!=null ) {
-					String lexerGrammarFileName =
-						grammar.getImplicitlyGeneratedLexerFileName();
-					Writer w = getOutputFile(grammar,lexerGrammarFileName);
-					w.write(lexerGrammarStr);
-					w.close();
-					StringReader sr = new StringReader(lexerGrammarStr);
-					Grammar lexerGrammar = new Grammar();
-					lexerGrammar.setTool(this);
-					File lexerGrammarFullFile =
-						new File(getFileDirectory(lexerGrammarFileName),lexerGrammarFileName);
-					lexerGrammar.setFileName(lexerGrammarFullFile.toString());
-					lexerGrammar.importTokenVocabulary(grammar);
-					lexerGrammar.setGrammarContent(sr);
-					sr.close();
-					processGrammar(lexerGrammar);
-				}
-			}
-			catch (IOException e) {
-				ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
-								   grammarFileName);
-			}
-			catch (Exception e) {
-				ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
-			}
-		}
-    }
-
-	public Grammar getGrammar(String grammarFileName)
-		throws IOException, antlr.TokenStreamException, antlr.RecognitionException
-	{
-		//StringTemplate.setLintMode(true);
-		FileReader fr = null;
-		fr = new FileReader(grammarFileName);
-		BufferedReader br = new BufferedReader(fr);
-		Grammar grammar = new Grammar(this,grammarFileName,br);
-		grammar.setWatchNFAConversion(internalOption_watchNFAConversion);
-		br.close();
-		fr.close();
-		return grammar;
-	}
-
-	protected void processGrammar(Grammar grammar)
-	{
-		String language = (String)grammar.getOption("language");
-		if ( language!=null ) {
-			CodeGenerator generator = new CodeGenerator(this, grammar, language);
-			grammar.setCodeGenerator(generator);
-			generator.setDebug(debug);
-			generator.setProfile(profile);
-			generator.setTrace(trace);
-			generator.genRecognizer();
-		}
-	}
-
-	protected void generateDFAs(Grammar g) {
-		for (int d=1; d<=g.getNumberOfDecisions(); d++) {
-			DFA dfa = g.getLookaheadDFA(d);
-			if ( dfa==null ) {
-				continue; // not there for some reason, ignore
-			}
-			DOTGenerator dotGenerator = new DOTGenerator(g);
-			String dot = dotGenerator.getDOT( dfa.startState );
-			String dotFileName = g.name+"_dec-"+d;
-			try {
-				writeDOTFile(g, dotFileName, dot);
-			}
-			catch(IOException ioe) {
-				ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
-								   dotFileName,
-								   ioe);
-			}
-		}
-	}
-
-	protected void generateNFAs(Grammar g) {
-		DOTGenerator dotGenerator = new DOTGenerator(g);
-		Collection rules = g.getRules();
-		for (Iterator itr = rules.iterator(); itr.hasNext();) {
-			Rule r = (Rule) itr.next();
-			String ruleName = r.name;
-			try {
-				writeDOTFile(
-					g,
-					ruleName,
-					dotGenerator.getDOT(g.getRuleStartState(ruleName)));
-			}
-			catch (IOException ioe) {
-				ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
-			}
-		}
-	}
-
-	protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
-		Writer fw = getOutputFile(g, name+".dot");
-		fw.write(dot);
-		fw.close();
-	}
-
-	private static void help() {
-        System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
-		System.err.println("  -o outputDir          specify output directory where all output is generated");
-		System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
-		System.err.println("  -lib dir              specify location of token files");
-		System.err.println("  -depend               generate file dependencies");
-		System.err.println("  -report               print out a report about the grammar(s) processed");
-		System.err.println("  -print                print out the grammar without actions");
-		System.err.println("  -debug                generate a parser that emits debugging events");
-		System.err.println("  -profile              generate a parser that computes profiling information");
-		System.err.println("  -nfa                  generate an NFA for each rule");
-		System.err.println("  -dfa                  generate a DFA for each decision point");
-		System.err.println("  -message-format name  specify output style for messages");
-		System.err.println("  -X                    display extended argument list");
-    }
-
-	private static void Xhelp() {
-		System.err.println("  -Xgrtree               print the grammar AST");
-		System.err.println("  -Xdfa                  print DFA as text ");
-		System.err.println("  -Xnoprune              test lookahead against EBNF block exit branches");
-		System.err.println("  -Xnocollapse           collapse incident edges into DFA states");
-		System.err.println("  -Xdbgconversion        dump lots of info during NFA conversion");
-		System.err.println("  -Xmultithreaded        run the analysis in 2 threads");
-		System.err.println("  -Xnomergestopstates    do not merge stop states");
-		System.err.println("  -Xdfaverbose           generate DFA states in DOT with NFA configs");
-		System.err.println("  -Xwatchconversion      print a message for each NFA before converting");
-		System.err.println("  -XdbgST                put tags at start/stop of all templates in output");
-		System.err.println("  -Xm m                  max number of rule invocations during conversion");
-		System.err.println("  -Xmaxdfaedges m        max \"comfortable\" number of edges for single DFA state");
-		System.err.println("  -Xconversiontimeout t  set NFA conversion timeout for each decision");
-		System.err.println("  -Xnoinlinedfa          make all DFA with tables; no inline prediction with IFs");
-		System.err.println("  -Xnfastates            for nondeterminisms, list NFA states for each path");
-    }
-
-	public void setOutputDirectory(String outputDirectory) {
-		this.outputDirectory = outputDirectory;
-	}
-
-    /** This method is used by all code generators to create new output
-     *  files. If the outputDir set by -o is not present it will be created.
-	 *  The final filename is sensitive to the output directory and
-	 *  the directory where the grammar file was found.  If -o is /tmp
-	 *  and the original grammar file was foo/t.g then output files
-	 *  go in /tmp/foo.
-	 *
-	 *  The output dir -o spec takes precedence if it's absolute.
-	 *  E.g., if the grammar file dir is absolute the output dir is given
-	 *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
-	 *  output (assuming t.g holds T.java).
-	 *
-	 *  If no -o is specified, then just write to the directory where the
-	 *  grammar file was found.
-	 *
-	 *  If outputDirectory==null then write a String.
-     */
-    public Writer getOutputFile(Grammar g, String fileName) throws IOException {
-		if ( outputDirectory==null ) {
-			return new StringWriter();
-		}
-		// output directory is a function of where the grammar file lives
-		// for subdir/T.g, you get subdir here.  Well, depends on -o etc...
-		File outputDir = getOutputDirectory(g.getFileName());
-		File outputFile = new File(outputDir, fileName);
-
-		if( !outputDir.exists() ) {
-			outputDir.mkdirs();
-		}
-        FileWriter fw = new FileWriter(outputFile);
-		return new BufferedWriter(fw);
-    }
-
-	public File getOutputDirectory(String fileNameWithPath) {
-		File outputDir = new File(outputDirectory);
-		String fileDirectory = getFileDirectory(fileNameWithPath);
-		if ( outputDirectory!=UNINITIALIZED_DIR ) {
-			// -o /tmp /var/lib/t.g => /tmp/T.java
-			// -o subdir/output /usr/lib/t.g => subdir/output/T.java
-			// -o . /usr/lib/t.g => ./T.java
-			if ( fileDirectory!=null &&
-				 (new File(fileDirectory).isAbsolute() ||
-				  fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
-				  forceAllFilesToOutputDir
-				)
-			{
-				// somebody set the dir, it takes precendence; write new file there
-				outputDir = new File(outputDirectory);
-			}
-			else {
-				// -o /tmp subdir/t.g => /tmp/subdir/t.g
-				if ( fileDirectory!=null ) {
-					outputDir = new File(outputDirectory, fileDirectory);
-				}
-				else {
-					outputDir = new File(outputDirectory);
-				}
-			}
-		}
-		else {
-			// they didn't specify a -o dir so just write to location
-			// where grammar is, absolute or relative
-			String dir = ".";
-			if ( fileDirectory!=null ) {
-				dir = fileDirectory;
-			}
-			outputDir = new File(dir);
-		}
-		return outputDir;
-	}
-
-	/** Open a file in the -lib dir.  For now, it's just .tokens files */
-	public BufferedReader getLibraryFile(String fileName) throws IOException {
-		String fullName = libDirectory+File.separator+fileName;
-		FileReader fr = new FileReader(fullName);
-		BufferedReader br = new BufferedReader(fr);
-		return br;
-	}
-
-	public String getLibraryDirectory() {
-		return libDirectory;
-	}
-
-	/** Return the directory containing the grammar file for this grammar.
-	 *  normally this is a relative path from current directory.  People will
-	 *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
-	 *  "grammars/foo.g3" etc...  This method returns "grammars".
-	 */
-	public String getFileDirectory(String fileName) {
-		File f = new File(fileName);
-		return f.getParent();
-	}
-
-	/** If the tool needs to panic/exit, how do we do that? */
-	public void panic() {
-		throw new Error("ANTLR panic");
-	}
-
-	/** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss */
-	public static String getCurrentTimeStamp() {
-		GregorianCalendar calendar = new java.util.GregorianCalendar();
-		int y = calendar.get(Calendar.YEAR);
-		int m = calendar.get(Calendar.MONTH)+1; // zero-based for months
-		int d = calendar.get(Calendar.DAY_OF_MONTH);
-		int h = calendar.get(Calendar.HOUR_OF_DAY);
-		int min = calendar.get(Calendar.MINUTE);
-		int sec = calendar.get(Calendar.SECOND);
-		String sy = String.valueOf(y);
-		String sm = m<10?"0"+m:String.valueOf(m);
-		String sd = d<10?"0"+d:String.valueOf(d);
-		String sh = h<10?"0"+h:String.valueOf(h);
-		String smin = min<10?"0"+min:String.valueOf(min);
-		String ssec = sec<10?"0"+sec:String.valueOf(sec);
-		return new StringBuffer().append(sy).append("-").append(sm).append("-")
-			.append(sd).append(" ").append(sh).append(":").append(smin)
-			.append(":").append(ssec).toString();
-	}
-
-}
diff --git a/src/org/antlr/analysis/NFAConversionThread.java b/src/org/antlr/analysis/NFAConversionThread.java
deleted file mode 100644
index 29d2bee..0000000
--- a/src/org/antlr/analysis/NFAConversionThread.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package org.antlr.analysis;
-
-import org.antlr.misc.Barrier;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.ErrorManager;
-
-/** Convert all decisions i..j inclusive in a thread */
-public class NFAConversionThread implements Runnable {
-	Grammar grammar;
-	int i, j;
-	Barrier barrier;
-	public NFAConversionThread(Grammar grammar,
-							   Barrier barrier,
-							   int i,
-							   int j)
-	{
-		this.grammar = grammar;
-		this.barrier = barrier;
-		this.i = i;
-		this.j = j;
-	}
-	public void run() {
-		for (int decision=i; decision<=j; decision++) {
-			NFAState decisionStartState = grammar.getDecisionNFAStartState(decision);
-			if ( decisionStartState.getNumberOfTransitions()>1 ) {
-				grammar.createLookaheadDFA(decision);
-			}
-		}
-		// now wait for others to finish
-		try {
-			barrier.waitForRelease();
-		}
-		catch(InterruptedException e) {
-			ErrorManager.internalError("what the hell? DFA interruptus", e);
-		}
-	}
-}
-
diff --git a/src/org/antlr/codegen/ANTLRTokenTypes.txt b/src/org/antlr/codegen/ANTLRTokenTypes.txt
deleted file mode 100644
index 27eaa78..0000000
--- a/src/org/antlr/codegen/ANTLRTokenTypes.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): antlr.g -> ANTLRTokenTypes.txt$
-ANTLR    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
diff --git a/src/org/antlr/codegen/ActionTranslator.tokens b/src/org/antlr/codegen/ActionTranslator.tokens
deleted file mode 100644
index 8923b6a..0000000
--- a/src/org/antlr/codegen/ActionTranslator.tokens
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_ATTR=17
-SET_DYNAMIC_SCOPE_ATTR=18
-ISOLATED_DYNAMIC_SCOPE=24
-WS=5
-UNKNOWN_SYNTAX=35
-DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR=23
-SCOPE_INDEX_EXPR=21
-DYNAMIC_SCOPE_ATTR=19
-ISOLATED_TOKEN_REF=14
-SET_ATTRIBUTE=30
-SET_EXPR_ATTRIBUTE=29
-ACTION=27
-ERROR_X=34
-TEMPLATE_INSTANCE=26
-TOKEN_SCOPE_ATTR=10
-ISOLATED_LEXER_RULE_REF=15
-ESC=32
-SET_ENCLOSING_RULE_SCOPE_ATTR=7
-ATTR_VALUE_EXPR=6
-RULE_SCOPE_ATTR=12
-LABEL_REF=13
-INT=37
-ARG=25
-SET_LOCAL_ATTR=16
-TEXT=36
-Tokens=38
-DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR=22
-SET_TOKEN_SCOPE_ATTR=9
-ERROR_SCOPED_XY=20
-SET_RULE_SCOPE_ATTR=11
-ENCLOSING_RULE_SCOPE_ATTR=8
-ERROR_XY=33
-TEMPLATE_EXPR=31
-INDIRECT_TEMPLATE_INSTANCE=28
-ID=4
diff --git a/src/org/antlr/codegen/ActionTranslatorLexer.java b/src/org/antlr/codegen/ActionTranslatorLexer.java
deleted file mode 100644
index 04bd530..0000000
--- a/src/org/antlr/codegen/ActionTranslatorLexer.java
+++ /dev/null
@@ -1,3640 +0,0 @@
-// $ANTLR 3.0b5 ActionTranslator.g 2006-11-23 01:51:22
-
-package org.antlr.codegen;
-import org.antlr.runtime.*;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.tool.*;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-public class ActionTranslatorLexer extends Lexer {
-    public static final int LOCAL_ATTR=17;
-    public static final int SET_DYNAMIC_SCOPE_ATTR=18;
-    public static final int ISOLATED_DYNAMIC_SCOPE=24;
-    public static final int WS=5;
-    public static final int UNKNOWN_SYNTAX=35;
-    public static final int DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR=23;
-    public static final int SCOPE_INDEX_EXPR=21;
-    public static final int DYNAMIC_SCOPE_ATTR=19;
-    public static final int ISOLATED_TOKEN_REF=14;
-    public static final int SET_ATTRIBUTE=30;
-    public static final int SET_EXPR_ATTRIBUTE=29;
-    public static final int ACTION=27;
-    public static final int ERROR_X=34;
-    public static final int TEMPLATE_INSTANCE=26;
-    public static final int TOKEN_SCOPE_ATTR=10;
-    public static final int ISOLATED_LEXER_RULE_REF=15;
-    public static final int ESC=32;
-    public static final int SET_ENCLOSING_RULE_SCOPE_ATTR=7;
-    public static final int ATTR_VALUE_EXPR=6;
-    public static final int RULE_SCOPE_ATTR=12;
-    public static final int LABEL_REF=13;
-    public static final int INT=37;
-    public static final int ARG=25;
-    public static final int EOF=-1;
-    public static final int SET_LOCAL_ATTR=16;
-    public static final int TEXT=36;
-    public static final int Tokens=38;
-    public static final int DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR=22;
-    public static final int SET_TOKEN_SCOPE_ATTR=9;
-    public static final int ERROR_SCOPED_XY=20;
-    public static final int SET_RULE_SCOPE_ATTR=11;
-    public static final int ENCLOSING_RULE_SCOPE_ATTR=8;
-    public static final int ERROR_XY=33;
-    public static final int TEMPLATE_EXPR=31;
-    public static final int INDIRECT_TEMPLATE_INSTANCE=28;
-    public static final int ID=4;
-
-    public List chunks = new ArrayList();
-    Rule enclosingRule;
-    int outerAltNum;
-    Grammar grammar;
-    CodeGenerator generator;
-    antlr.Token actionToken;
-
-	int ruleNestingLevel = 0; 
-
-	public Token emit(int tokenType,
-					  int line, int charPosition,
-					  int channel,
-					  int start, int stop)
-	{
-		Token t = new CommonToken(input, tokenType, channel, start, stop);
-		t.setLine(line);
-		t.setText(text);
-		t.setCharPositionInLine(charPosition);
-		emit(t);
-		return t;
-	}
-
-		public ActionTranslatorLexer(CodeGenerator generator,
-    								 String ruleName,
-    								 GrammarAST actionAST)
-    	{
-    		this(new ANTLRStringStream(actionAST.token.getText()));
-    		this.generator = generator;
-    		this.grammar = generator.grammar;
-    	    this.enclosingRule = grammar.getRule(ruleName);
-    	    this.actionToken = actionAST.token;
-    	    this.outerAltNum = actionAST.outerAltNum;
-    	}
-
-    	public ActionTranslatorLexer(CodeGenerator generator,
-    								 String ruleName,
-    								 antlr.Token actionToken,
-    								 int outerAltNum)
-    	{
-    		this(new ANTLRStringStream(actionToken.getText()));
-    		this.generator = generator;
-    		grammar = generator.grammar;
-    	    this.enclosingRule = grammar.getRule(ruleName);
-    	    this.actionToken = actionToken;
-    		this.outerAltNum = outerAltNum;
-    	}
-
-    /*
-    public ActionTranslatorLexer(CharStream input, CodeGenerator generator,
-                                 Grammar grammar, Rule enclosingRule,
-                                 antlr.Token actionToken, int outerAltNum)
-    {
-        this(input);
-        this.grammar = grammar;
-        this.generator = generator;
-        this.enclosingRule = enclosingRule;
-        this.actionToken = actionToken;
-        this.outerAltNum = outerAltNum;
-    }
-    */
-
-    /** Return a list of strings and StringTemplate objects that
-     *  represent the translated action.
-     */
-    public List translateToChunks() {
-    	// System.out.println("###\naction="+action);
-    	Token t;
-    	do {
-    		t = nextToken();
-    	} while ( t.getType()!= Token.EOF );
-    	return chunks;
-    }
-
-    public String translate() {
-    	List theChunks = translateToChunks();
-    	//System.out.println("chunks="+a.chunks);
-    	StringBuffer buf = new StringBuffer();
-    	for (int i = 0; i < theChunks.size(); i++) {
-    		Object o = (Object) theChunks.get(i);
-    		buf.append(o);
-    	}
-    	//System.out.println("translated: "+buf.toString());
-    	return buf.toString();
-    }
-
-    public List translateAction(String action) {
-        ActionTranslatorLexer translator =
-            new ActionTranslatorLexer(generator,
-                                      enclosingRule.name,
-                                      new antlr.CommonToken(ANTLRParser.ACTION,action),outerAltNum);
-        return translator.translateToChunks();
-    }
-
-    public boolean isTokenRefInAlt(String id) {
-        return enclosingRule.getTokenRefsInAlt(id, outerAltNum)!=null;
-    }
-    public boolean isRuleRefInAlt(String id) {
-        return enclosingRule.getRuleRefsInAlt(id, outerAltNum)!=null;
-    }
-    public Grammar.LabelElementPair getElementLabel(String id) {
-        return enclosingRule.getLabel(id);
-    }
-
-    public void checkElementRefUniqueness(String ref, boolean isToken) {
-    		List refs = null;
-    		if ( isToken ) {
-    		    refs = enclosingRule.getTokenRefsInAlt(ref, outerAltNum);
-    		}
-    		else {
-    		    refs = enclosingRule.getRuleRefsInAlt(ref, outerAltNum);
-    		}
-    		if ( refs!=null && refs.size()>1 ) {
-    			ErrorManager.grammarError(ErrorManager.MSG_NONUNIQUE_REF,
-    									  grammar,
-    									  actionToken,
-    									  ref);
-    		}
-    }
-
-    /** For $rulelabel.name, return the Attribute found for name.  It
-     *  will be a predefined property or a return value.
-     */
-    public Attribute getRuleLabelAttribute(String ruleName, String attrName) {
-    	Rule r = grammar.getRule(ruleName);
-    	AttributeScope scope = r.getLocalAttributeScope(attrName);
-    	if ( scope!=null && !scope.isParameterScope ) {
-    		return scope.getAttribute(attrName);
-    	}
-    	return null;
-    }
-
-    AttributeScope resolveDynamicScope(String scopeName) {
-    	if ( grammar.getGlobalScope(scopeName)!=null ) {
-    		return grammar.getGlobalScope(scopeName);
-    	}
-    	Rule scopeRule = grammar.getRule(scopeName);
-    	if ( scopeRule!=null ) {
-    		return scopeRule.ruleScope;
-    	}
-    	return null; // not a valid dynamic scope
-    }
-
-    protected StringTemplate template(String name) {
-    	StringTemplate st = generator.getTemplates().getInstanceOf(name);
-    	chunks.add(st);
-    	return st;
-    }
-
-
-
-    public ActionTranslatorLexer() {;} 
-    public ActionTranslatorLexer(CharStream input) {
-        super(input);
-        ruleMemo = new HashMap[62+1];
-     }
-    public String getGrammarFileName() { return "ActionTranslator.g"; }
-
-    public Token nextToken() {
-        while (true) {
-            if ( input.LA(1)==CharStream.EOF ) {
-                return Token.EOF_TOKEN;
-            }
-            token = null;
-            tokenStartCharIndex = getCharIndex();
-    	text = null;
-            try {
-                int m = input.mark();
-                backtracking=1; 
-                failed=false;
-                mTokens();
-                backtracking=0;
-
-                if ( failed ) {
-                    input.rewind(m);
-                    input.consume(); 
-                }
-                else {
-                    return token;
-                }
-            }
-            catch (RecognitionException re) {
-                // shouldn't happen in backtracking mode, but...
-                reportError(re);
-                recover(re);
-            }
-        }
-    }
-
-    public void memoize(IntStream input,
-    		int ruleIndex,
-    		int ruleStartIndex)
-    {
-    if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
-    }
-
-    public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-    if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
-    return false;
-    }// $ANTLR start SET_ENCLOSING_RULE_SCOPE_ATTR
-    public void mSET_ENCLOSING_RULE_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_ENCLOSING_RULE_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:201:4: ( '$' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?)
-            // ActionTranslator.g:201:4: '$' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            // ActionTranslator.g:201:22: ( WS )?
-            int alt1=2;
-            int LA1_0 = input.LA(1);
-            if ( ((LA1_0>='\t' && LA1_0<='\n')||LA1_0==' ') ) {
-                alt1=1;
-            }
-            switch (alt1) {
-                case 1 :
-                    // ActionTranslator.g:201:22: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            int exprStart = getCharIndex();
-            mATTR_VALUE_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(';'); if (failed) return ;
-            if ( !(enclosingRule!=null &&
-            	                         x.getText().equals(enclosingRule.name) &&
-            	                         enclosingRule.getLocalAttributeScope(y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "SET_ENCLOSING_RULE_SCOPE_ATTR", "enclosingRule!=null &&\n\t                         $x.text.equals(enclosingRule.name) &&\n\t                         enclosingRule.getLocalAttributeScope($y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = null;
-              		AttributeScope scope = enclosingRule.getLocalAttributeScope(y.getText());
-              		if ( scope.isPredefinedRuleScope ) {
-              			if ( y.getText().equals("st") || y.getText().equals("tree") ) {
-              				st = template("ruleSetPropertyRef_"+y.getText());
-              				grammar.referenceRuleLabelPredefinedAttribute(x.getText());
-              				st.setAttribute("scope", x.getText());
-              				st.setAttribute("attr", y.getText());
-              				st.setAttribute("expr", translateAction(expr.getText()));
-              			} else {
-              				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-              										  grammar,
-              										  actionToken,
-              										  x.getText(),
-              										  y.getText());
-              			}
-              		}
-              	    else if ( scope.isPredefinedLexerRuleScope ) {
-              	    	// this is a better message to emit than the previous one...
-              			ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-              									  grammar,
-              									  actionToken,
-              									  x.getText(),
-              									  y.getText());
-              	    }
-              		else if ( scope.isParameterScope ) {
-              			st = template("parameterSetAttributeRef");
-              			st.setAttribute("attr", scope.getAttribute(y.getText()));
-              			st.setAttribute("expr", translateAction(expr.getText()));
-              		}
-              		else { // must be return value
-              			st = template("returnSetAttributeRef");
-              			st.setAttribute("ruleDescriptor", enclosingRule);
-              			st.setAttribute("attr", scope.getAttribute(y.getText()));
-              			st.setAttribute("expr", translateAction(expr.getText()));
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_ENCLOSING_RULE_SCOPE_ATTR
-
-    // $ANTLR start ENCLOSING_RULE_SCOPE_ATTR
-    public void mENCLOSING_RULE_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ENCLOSING_RULE_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:246:4: ( '$' x= ID '.' y= ID {...}?)
-            // ActionTranslator.g:246:4: '$' x= ID '.' y= ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( !(enclosingRule!=null &&
-            	                         x.getText().equals(enclosingRule.name) &&
-            	                         enclosingRule.getLocalAttributeScope(y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "ENCLOSING_RULE_SCOPE_ATTR", "enclosingRule!=null &&\n\t                         $x.text.equals(enclosingRule.name) &&\n\t                         enclosingRule.getLocalAttributeScope($y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = null;
-              		AttributeScope scope = enclosingRule.getLocalAttributeScope(y.getText());
-              		if ( scope.isPredefinedRuleScope ) {
-              			st = template("rulePropertyRef_"+y.getText());
-              			grammar.referenceRuleLabelPredefinedAttribute(x.getText());
-              			st.setAttribute("scope", x.getText());
-              			st.setAttribute("attr", y.getText());
-              		}
-              	    else if ( scope.isPredefinedLexerRuleScope ) {
-              	    	// perhaps not the most precise error message to use, but...
-              			ErrorManager.grammarError(ErrorManager.MSG_RULE_HAS_NO_ARGS,
-              									  grammar,
-              									  actionToken,
-              									  x.getText());
-              	    }
-              		else if ( scope.isParameterScope ) {
-              			st = template("parameterAttributeRef");
-              			st.setAttribute("attr", scope.getAttribute(y.getText()));
-              		}
-              		else { // must be return value
-              			st = template("returnAttributeRef");
-              			st.setAttribute("ruleDescriptor", enclosingRule);
-              			st.setAttribute("attr", scope.getAttribute(y.getText()));
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ENCLOSING_RULE_SCOPE_ATTR
-
-    // $ANTLR start SET_TOKEN_SCOPE_ATTR
-    public void mSET_TOKEN_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_TOKEN_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:280:4: ( '$' x= ID '.' y= ID ( WS )? '=' {...}?)
-            // ActionTranslator.g:280:4: '$' x= ID '.' y= ID ( WS )? '=' {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            // ActionTranslator.g:280:22: ( WS )?
-            int alt2=2;
-            int LA2_0 = input.LA(1);
-            if ( ((LA2_0>='\t' && LA2_0<='\n')||LA2_0==' ') ) {
-                alt2=1;
-            }
-            switch (alt2) {
-                case 1 :
-                    // ActionTranslator.g:280:22: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            if ( !(enclosingRule!=null && input.LA(1)!='=' &&
-            	                         (enclosingRule.getTokenLabel(x.getText())!=null||
-            	                          isTokenRefInAlt(x.getText())) &&
-            	                         AttributeScope.tokenScope.getAttribute(y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "SET_TOKEN_SCOPE_ATTR", "enclosingRule!=null && input.LA(1)!='=' &&\n\t                         (enclosingRule.getTokenLabel($x.text)!=null||\n\t                          isTokenRefInAlt($x.text)) &&\n\t                         AttributeScope.tokenScope.getAttribute($y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-              								  grammar,
-              								  actionToken,
-              								  x.getText(),
-              								  y.getText());
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_TOKEN_SCOPE_ATTR
-
-    // $ANTLR start TOKEN_SCOPE_ATTR
-    public void mTOKEN_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = TOKEN_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:299:4: ( '$' x= ID '.' y= ID {...}?)
-            // ActionTranslator.g:299:4: '$' x= ID '.' y= ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( !(enclosingRule!=null &&
-            	                         (enclosingRule.getTokenLabel(x.getText())!=null||
-            	                          isTokenRefInAlt(x.getText())) &&
-            	                         AttributeScope.tokenScope.getAttribute(y.getText())!=null &&
-            	                         (grammar.type!=Grammar.LEXER ||
-            	                         getElementLabel(x.getText()).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||
-            	                         getElementLabel(x.getText()).elementRef.token.getType()==ANTLRParser.STRING_LITERAL)) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "TOKEN_SCOPE_ATTR", "enclosingRule!=null &&\n\t                         (enclosingRule.getTokenLabel($x.text)!=null||\n\t                          isTokenRefInAlt($x.text)) &&\n\t                         AttributeScope.tokenScope.getAttribute($y.text)!=null &&\n\t                         (grammar.type!=Grammar.LEXER ||\n\t                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||\n\t     [...]
-            }
-            if ( backtracking==1 ) {
-
-              		String label = x.getText();
-              		if ( enclosingRule.getTokenLabel(x.getText())==null ) {
-              			// $tokenref.attr  gotta get old label or compute new one
-              			checkElementRefUniqueness(x.getText(), true);
-              			label = enclosingRule.getElementLabel(x.getText(), outerAltNum, generator);
-              			if ( label==null ) {
-              				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-              										  grammar,
-              										  actionToken,
-              										  "$"+x.getText()+"."+y.getText());
-              				label = x.getText();
-              			}
-              		}
-              		StringTemplate st = template("tokenLabelPropertyRef_"+y.getText());
-              		st.setAttribute("scope", label);
-              		st.setAttribute("attr", AttributeScope.tokenScope.getAttribute(y.getText()));
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end TOKEN_SCOPE_ATTR
-
-    // $ANTLR start SET_RULE_SCOPE_ATTR
-    public void mSET_RULE_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_RULE_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-
-            Grammar.LabelElementPair pair=null;
-            String refdRuleName=null;
-
-            // ActionTranslator.g:337:4: ( '$' x= ID '.' y= ID ( WS )? '=' {...}?{...}?)
-            // ActionTranslator.g:337:4: '$' x= ID '.' y= ID ( WS )? '=' {...}?{...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            // ActionTranslator.g:337:22: ( WS )?
-            int alt3=2;
-            int LA3_0 = input.LA(1);
-            if ( ((LA3_0>='\t' && LA3_0<='\n')||LA3_0==' ') ) {
-                alt3=1;
-            }
-            switch (alt3) {
-                case 1 :
-                    // ActionTranslator.g:337:22: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            if ( !(enclosingRule!=null && input.LA(1)!='=') ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "SET_RULE_SCOPE_ATTR", "enclosingRule!=null && input.LA(1)!='='");
-            }
-            if ( backtracking==1 ) {
-
-              		pair = enclosingRule.getRuleLabel(x.getText());
-              		refdRuleName = x.getText();
-              		if ( pair!=null ) {
-              			refdRuleName = pair.referencedRuleName;
-              		}
-              		
-            }
-            if ( !((enclosingRule.getRuleLabel(x.getText())!=null || isRuleRefInAlt(x.getText())) &&
-            	      getRuleLabelAttribute(enclosingRule.getRuleLabel(x.getText())!=null?enclosingRule.getRuleLabel(x.getText()).referencedRuleName:x.getText(),y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "SET_RULE_SCOPE_ATTR", "(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&\n\t      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-              								  grammar,
-              								  actionToken,
-              								  x.getText(),
-              								  y.getText());
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_RULE_SCOPE_ATTR
-
-    // $ANTLR start RULE_SCOPE_ATTR
-    public void mRULE_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = RULE_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-
-            Grammar.LabelElementPair pair=null;
-            String refdRuleName=null;
-
-            // ActionTranslator.g:366:4: ( '$' x= ID '.' y= ID {...}?{...}?)
-            // ActionTranslator.g:366:4: '$' x= ID '.' y= ID {...}?{...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( !(enclosingRule!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "RULE_SCOPE_ATTR", "enclosingRule!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		pair = enclosingRule.getRuleLabel(x.getText());
-              		refdRuleName = x.getText();
-              		if ( pair!=null ) {
-              			refdRuleName = pair.referencedRuleName;
-              		}
-              		
-            }
-            if ( !((enclosingRule.getRuleLabel(x.getText())!=null || isRuleRefInAlt(x.getText())) &&
-            	      getRuleLabelAttribute(enclosingRule.getRuleLabel(x.getText())!=null?enclosingRule.getRuleLabel(x.getText()).referencedRuleName:x.getText(),y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "RULE_SCOPE_ATTR", "(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&\n\t      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		String label = x.getText();
-              		if ( pair==null ) {
-              			// $ruleref.attr  gotta get old label or compute new one
-              			checkElementRefUniqueness(x.getText(), false);
-              			label = enclosingRule.getElementLabel(x.getText(), outerAltNum, generator);
-              			if ( label==null ) {
-              				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-              										  grammar,
-              										  actionToken,
-              										  "$"+x.getText()+"."+y.getText());
-              				label = x.getText();
-              			}
-              		}
-              		StringTemplate st;
-              		Rule refdRule = grammar.getRule(refdRuleName);
-              		AttributeScope scope = refdRule.getLocalAttributeScope(y.getText());
-              		if ( scope.isPredefinedRuleScope ) {
-              			st = template("ruleLabelPropertyRef_"+y.getText());
-              			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
-              			st.setAttribute("scope", label);
-              			st.setAttribute("attr", y.getText());
-              		}
-              		else if ( scope.isPredefinedLexerRuleScope ) {
-              			st = template("lexerRuleLabelPropertyRef_"+y.getText());
-              			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
-              			st.setAttribute("scope", label);
-              			st.setAttribute("attr", y.getText());
-              		}
-              		else if ( scope.isParameterScope ) {
-              			// TODO: error!
-					  }
-              		else {
-              			st = template("ruleLabelRef");
-              			st.setAttribute("referencedRule", refdRule);
-              			st.setAttribute("scope", label);
-              			st.setAttribute("attr", scope.getAttribute(y.getText()));
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end RULE_SCOPE_ATTR
-
-    // $ANTLR start LABEL_REF
-    public void mLABEL_REF() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = LABEL_REF;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:424:4: ( '$' ID {...}?)
-            // ActionTranslator.g:424:4: '$' ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID1Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID1 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID1Start, getCharIndex()-1);
-            if ( !(enclosingRule!=null &&
-            	            getElementLabel(ID1.getText())!=null &&
-            		        enclosingRule.getRuleLabel(ID1.getText())==null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "LABEL_REF", "enclosingRule!=null &&\n\t            getElementLabel($ID.text)!=null &&\n\t\t        enclosingRule.getRuleLabel($ID.text)==null");
-            }
-            if ( backtracking==1 ) {
-
-              		StringTemplate st;
-              		Grammar.LabelElementPair pair = getElementLabel(ID1.getText());
-              		if ( pair.type==Grammar.TOKEN_LABEL ||
-             			 pair.type==Grammar.CHAR_LABEL )
-					{
-              			st = template("tokenLabelRef");
-              		}
-              		else {
-              			st = template("listLabelRef");
-              		}
-              		st.setAttribute("label", ID1.getText());
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end LABEL_REF
-
-    // $ANTLR start ISOLATED_TOKEN_REF
-    public void mISOLATED_TOKEN_REF() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ISOLATED_TOKEN_REF;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:443:4: ( '$' ID {...}?)
-            // ActionTranslator.g:443:4: '$' ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID2Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID2 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID2Start, getCharIndex()-1);
-            if ( !(grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt(ID2.getText())) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "ISOLATED_TOKEN_REF", "grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt($ID.text)");
-            }
-            if ( backtracking==1 ) {
-
-              		String label = enclosingRule.getElementLabel(ID2.getText(), outerAltNum, generator);
-              		checkElementRefUniqueness(ID2.getText(), true);
-              		if ( label==null ) {
-              			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-              									  grammar,
-              									  actionToken,
-              									  ID2.getText());
-              		}
-              		else {
-              			StringTemplate st = template("tokenLabelRef");
-              			st.setAttribute("label", label);
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ISOLATED_TOKEN_REF
-
-    // $ANTLR start ISOLATED_LEXER_RULE_REF
-    public void mISOLATED_LEXER_RULE_REF() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ISOLATED_LEXER_RULE_REF;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:463:4: ( '$' ID {...}?)
-            // ActionTranslator.g:463:4: '$' ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID3Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID3 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID3Start, getCharIndex()-1);
-            if ( !(grammar.type==Grammar.LEXER &&
-            	             enclosingRule!=null &&
-            	             isRuleRefInAlt(ID3.getText())) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "ISOLATED_LEXER_RULE_REF", "grammar.type==Grammar.LEXER &&\n\t             enclosingRule!=null &&\n\t             isRuleRefInAlt($ID.text)");
-            }
-            if ( backtracking==1 ) {
-
-              		String label = enclosingRule.getElementLabel(ID3.getText(), outerAltNum, generator);
-              		checkElementRefUniqueness(ID3.getText(), false);
-              		if ( label==null ) {
-              			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-              									  grammar,
-              									  actionToken,
-              									  ID3.getText());
-              		}
-              		else {
-              			StringTemplate st = template("lexerRuleLabel");
-              			st.setAttribute("label", label);
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ISOLATED_LEXER_RULE_REF
-
-    // $ANTLR start SET_LOCAL_ATTR
-    public void mSET_LOCAL_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_LOCAL_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:495:4: ( '$' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?)
-            // ActionTranslator.g:495:4: '$' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID4Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID4 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID4Start, getCharIndex()-1);
-            // ActionTranslator.g:495:11: ( WS )?
-            int alt4=2;
-            int LA4_0 = input.LA(1);
-            if ( ((LA4_0>='\t' && LA4_0<='\n')||LA4_0==' ') ) {
-                alt4=1;
-            }
-            switch (alt4) {
-                case 1 :
-                    // ActionTranslator.g:495:11: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            int exprStart = getCharIndex();
-            mATTR_VALUE_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(';'); if (failed) return ;
-            if ( !(enclosingRule!=null
-            													&& enclosingRule.getLocalAttributeScope(ID4.getText())!=null
-            													&& !enclosingRule.getLocalAttributeScope(ID4.getText()).isPredefinedLexerRuleScope) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "SET_LOCAL_ATTR", "enclosingRule!=null\n\t\t\t\t\t\t\t\t\t\t\t\t\t&& enclosingRule.getLocalAttributeScope($ID.text)!=null\n\t\t\t\t\t\t\t\t\t\t\t\t\t&& !enclosingRule.getLocalAttributeScope($ID.text).isPredefinedLexerRuleScope");
-            }
-            if ( backtracking==1 ) {
-
-              		StringTemplate st;
-              		AttributeScope scope = enclosingRule.getLocalAttributeScope(ID4.getText());
-              		if ( scope.isPredefinedRuleScope ) {
-              			if (ID4.getText().equals("tree") || ID4.getText().equals("st")) {
-              				st = template("ruleSetPropertyRef_"+ID4.getText());
-              				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
-              				st.setAttribute("scope", enclosingRule.name);
-              				st.setAttribute("attr", ID4.getText());
-              				st.setAttribute("expr", translateAction(expr.getText()));
-              			} else {
-              				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-              										 grammar,
-              										 actionToken,
-              										 ID4.getText(),
-              										 "");
-              			}
-              		}
-              		else if ( scope.isParameterScope ) {
-              			st = template("parameterSetAttributeRef");
-              			st.setAttribute("attr", scope.getAttribute(ID4.getText()));
-              			st.setAttribute("expr", translateAction(expr.getText()));
-              		}
-              		else {
-              			st = template("returnSetAttributeRef");
-              			st.setAttribute("ruleDescriptor", enclosingRule);
-              			st.setAttribute("attr", scope.getAttribute(ID4.getText()));
-              			st.setAttribute("expr", translateAction(expr.getText()));
-              			}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_LOCAL_ATTR
-
-    // $ANTLR start LOCAL_ATTR
-    public void mLOCAL_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = LOCAL_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:531:4: ( '$' ID {...}?)
-            // ActionTranslator.g:531:4: '$' ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID5Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID5 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID5Start, getCharIndex()-1);
-            if ( !(enclosingRule!=null && enclosingRule.getLocalAttributeScope(ID5.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "LOCAL_ATTR", "enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		StringTemplate st;
-              		AttributeScope scope = enclosingRule.getLocalAttributeScope(ID5.getText());
-              		if ( scope.isPredefinedRuleScope ) {
-              			st = template("rulePropertyRef_"+ID5.getText());
-              			grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
-              			st.setAttribute("scope", enclosingRule.name);
-              			st.setAttribute("attr", ID5.getText());
-              		}
-              		else if ( scope.isPredefinedLexerRuleScope ) {
-              			st = template("lexerRulePropertyRef_"+ID5.getText());
-              			st.setAttribute("scope", enclosingRule.name);
-              			st.setAttribute("attr", ID5.getText());
-              		}
-              		else if ( scope.isParameterScope ) {
-              			st = template("parameterAttributeRef");
-              			st.setAttribute("attr", scope.getAttribute(ID5.getText()));
-              		}
-              		else {
-              			st = template("returnAttributeRef");
-              			st.setAttribute("ruleDescriptor", enclosingRule);
-              			st.setAttribute("attr", scope.getAttribute(ID5.getText()));
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end LOCAL_ATTR
-
-    // $ANTLR start SET_DYNAMIC_SCOPE_ATTR
-    public void mSET_DYNAMIC_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_DYNAMIC_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:572:4: ( '$' x= ID '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?)
-            // ActionTranslator.g:572:4: '$' x= ID '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match("::"); if (failed) return ;
-
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            // ActionTranslator.g:572:23: ( WS )?
-            int alt5=2;
-            int LA5_0 = input.LA(1);
-            if ( ((LA5_0>='\t' && LA5_0<='\n')||LA5_0==' ') ) {
-                alt5=1;
-            }
-            switch (alt5) {
-                case 1 :
-                    // ActionTranslator.g:572:23: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            int exprStart = getCharIndex();
-            mATTR_VALUE_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(';'); if (failed) return ;
-            if ( !(resolveDynamicScope(x.getText())!=null &&
-            						     resolveDynamicScope(x.getText()).getAttribute(y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "SET_DYNAMIC_SCOPE_ATTR", "resolveDynamicScope($x.text)!=null &&\n\t\t\t\t\t\t     resolveDynamicScope($x.text).getAttribute($y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		AttributeScope scope = resolveDynamicScope(x.getText());
-              		if ( scope!=null ) {
-              			StringTemplate st = template("scopeSetAttributeRef");
-              			st.setAttribute("scope", x.getText());
-              			st.setAttribute("attr",  scope.getAttribute(y.getText()));
-              			st.setAttribute("expr",  translateAction(expr.getText()));
-              		}
-              		else {
-              			// error: invalid dynamic attribute
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_DYNAMIC_SCOPE_ATTR
-
-    // $ANTLR start DYNAMIC_SCOPE_ATTR
-    public void mDYNAMIC_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = DYNAMIC_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:591:4: ( '$' x= ID '::' y= ID {...}?)
-            // ActionTranslator.g:591:4: '$' x= ID '::' y= ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match("::"); if (failed) return ;
-
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( !(resolveDynamicScope(x.getText())!=null &&
-            						     resolveDynamicScope(x.getText()).getAttribute(y.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "DYNAMIC_SCOPE_ATTR", "resolveDynamicScope($x.text)!=null &&\n\t\t\t\t\t\t     resolveDynamicScope($x.text).getAttribute($y.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		AttributeScope scope = resolveDynamicScope(x.getText());
-              		if ( scope!=null ) {
-              			StringTemplate st = template("scopeAttributeRef");
-              			st.setAttribute("scope", x.getText());
-              			st.setAttribute("attr",  scope.getAttribute(y.getText()));
-              		}
-              		else {
-              			// error: invalid dynamic attribute
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end DYNAMIC_SCOPE_ATTR
-
-    // $ANTLR start ERROR_SCOPED_XY
-    public void mERROR_SCOPED_XY() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ERROR_SCOPED_XY;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:610:4: ( '$' x= ID '::' y= ID )
-            // ActionTranslator.g:610:4: '$' x= ID '::' y= ID
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match("::"); if (failed) return ;
-
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( backtracking==1 ) {
-
-              		chunks.add(getText());
-              		generator.issueInvalidScopeError(x.getText(),y.getText(),
-              		                                 enclosingRule,actionToken,
-              		                                 outerAltNum);		
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ERROR_SCOPED_XY
-
-    // $ANTLR start DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
-    public void mDYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:628:4: ( '$' x= ID '[' '-' expr= SCOPE_INDEX_EXPR ']' '::' y= ID )
-            // ActionTranslator.g:628:4: '$' x= ID '[' '-' expr= SCOPE_INDEX_EXPR ']' '::' y= ID
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('['); if (failed) return ;
-            match('-'); if (failed) return ;
-            int exprStart = getCharIndex();
-            mSCOPE_INDEX_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(']'); if (failed) return ;
-            match("::"); if (failed) return ;
-
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = template("scopeAttributeRef");
-              		st.setAttribute("scope",    x.getText());
-              		st.setAttribute("attr",     resolveDynamicScope(x.getText()).getAttribute(y.getText()));
-              		st.setAttribute("negIndex", expr.getText());
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
-
-    // $ANTLR start DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
-    public void mDYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:639:4: ( '$' x= ID '[' expr= SCOPE_INDEX_EXPR ']' '::' y= ID )
-            // ActionTranslator.g:639:4: '$' x= ID '[' expr= SCOPE_INDEX_EXPR ']' '::' y= ID
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('['); if (failed) return ;
-            int exprStart = getCharIndex();
-            mSCOPE_INDEX_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(']'); if (failed) return ;
-            match("::"); if (failed) return ;
-
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = template("scopeAttributeRef");
-              		st.setAttribute("scope", x.getText());
-              		st.setAttribute("attr",  resolveDynamicScope(x.getText()).getAttribute(y.getText()));
-              		st.setAttribute("index", expr.getText());
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
-
-    // $ANTLR start SCOPE_INDEX_EXPR
-    public void mSCOPE_INDEX_EXPR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:651:4: ( (~ ']' )+ )
-            // ActionTranslator.g:651:4: (~ ']' )+
-            {
-            // ActionTranslator.g:651:4: (~ ']' )+
-            int cnt6=0;
-            loop6:
-            do {
-                int alt6=2;
-                int LA6_0 = input.LA(1);
-                if ( ((LA6_0>='\u0000' && LA6_0<='\\')||(LA6_0>='^' && LA6_0<='\uFFFE')) ) {
-                    alt6=1;
-                }
-
-
-                switch (alt6) {
-            	case 1 :
-            	    // ActionTranslator.g:651:5: ~ ']'
-            	    {
-            	    if ( (input.LA(1)>='\u0000' && input.LA(1)<='\\')||(input.LA(1)>='^' && input.LA(1)<='\uFFFE') ) {
-            	        input.consume();
-            	    failed=false;
-            	    }
-            	    else {
-            	        if (backtracking>0) {failed=true; return ;}
-            	        MismatchedSetException mse =
-            	            new MismatchedSetException(null,input);
-            	        recover(mse);    throw mse;
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt6 >= 1 ) break loop6;
-            	    if (backtracking>0) {failed=true; return ;}
-                        EarlyExitException eee =
-                            new EarlyExitException(6, input);
-                        throw eee;
-                }
-                cnt6++;
-            } while (true);
-
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SCOPE_INDEX_EXPR
-
-    // $ANTLR start ISOLATED_DYNAMIC_SCOPE
-    public void mISOLATED_DYNAMIC_SCOPE() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ISOLATED_DYNAMIC_SCOPE;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:660:4: ( '$' ID {...}?)
-            // ActionTranslator.g:660:4: '$' ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID6Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID6 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID6Start, getCharIndex()-1);
-            if ( !(resolveDynamicScope(ID6.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "ISOLATED_DYNAMIC_SCOPE", "resolveDynamicScope($ID.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = template("isolatedDynamicScopeRef");
-              		st.setAttribute("scope", ID6.getText());
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ISOLATED_DYNAMIC_SCOPE
-
-    // $ANTLR start TEMPLATE_INSTANCE
-    public void mTEMPLATE_INSTANCE() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = TEMPLATE_INSTANCE;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:673:4: ( '%' ID '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')' )
-            // ActionTranslator.g:673:4: '%' ID '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')'
-            {
-            match('%'); if (failed) return ;
-            mID(); if (failed) return ;
-            match('('); if (failed) return ;
-            // ActionTranslator.g:673:15: ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )?
-            int alt11=2;
-            int LA11_0 = input.LA(1);
-            if ( ((LA11_0>='\t' && LA11_0<='\n')||LA11_0==' '||(LA11_0>='A' && LA11_0<='Z')||LA11_0=='_'||(LA11_0>='a' && LA11_0<='z')) ) {
-                alt11=1;
-            }
-            switch (alt11) {
-                case 1 :
-                    // ActionTranslator.g:673:17: ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )?
-                    {
-                    // ActionTranslator.g:673:17: ( WS )?
-                    int alt7=2;
-                    int LA7_0 = input.LA(1);
-                    if ( ((LA7_0>='\t' && LA7_0<='\n')||LA7_0==' ') ) {
-                        alt7=1;
-                    }
-                    switch (alt7) {
-                        case 1 :
-                            // ActionTranslator.g:673:17: WS
-                            {
-                            mWS(); if (failed) return ;
-
-                            }
-                            break;
-
-                    }
-
-                    mARG(); if (failed) return ;
-                    // ActionTranslator.g:673:25: ( ',' ( WS )? ARG )*
-                    loop9:
-                    do {
-                        int alt9=2;
-                        int LA9_0 = input.LA(1);
-                        if ( (LA9_0==',') ) {
-                            alt9=1;
-                        }
-
-
-                        switch (alt9) {
-                    	case 1 :
-                    	    // ActionTranslator.g:673:26: ',' ( WS )? ARG
-                    	    {
-                    	    match(','); if (failed) return ;
-                    	    // ActionTranslator.g:673:30: ( WS )?
-                    	    int alt8=2;
-                    	    int LA8_0 = input.LA(1);
-                    	    if ( ((LA8_0>='\t' && LA8_0<='\n')||LA8_0==' ') ) {
-                    	        alt8=1;
-                    	    }
-                    	    switch (alt8) {
-                    	        case 1 :
-                    	            // ActionTranslator.g:673:30: WS
-                    	            {
-                    	            mWS(); if (failed) return ;
-
-                    	            }
-                    	            break;
-
-                    	    }
-
-                    	    mARG(); if (failed) return ;
-
-                    	    }
-                    	    break;
-
-                    	default :
-                    	    break loop9;
-                        }
-                    } while (true);
-
-                    // ActionTranslator.g:673:40: ( WS )?
-                    int alt10=2;
-                    int LA10_0 = input.LA(1);
-                    if ( ((LA10_0>='\t' && LA10_0<='\n')||LA10_0==' ') ) {
-                        alt10=1;
-                    }
-                    switch (alt10) {
-                        case 1 :
-                            // ActionTranslator.g:673:40: WS
-                            {
-                            mWS(); if (failed) return ;
-
-                            }
-                            break;
-
-                    }
-
-
-                    }
-                    break;
-
-            }
-
-            match(')'); if (failed) return ;
-            if ( backtracking==1 ) {
-
-              		String action = getText().substring(1,getText().length());
-              		String ruleName = "<outside-of-rule>";
-              		if ( enclosingRule!=null ) {
-              			ruleName = enclosingRule.name;
-              		}
-              		StringTemplate st =
-              			generator.translateTemplateConstructor(ruleName,
-              												   outerAltNum,
-              												   actionToken,
-              												   action);
-              		if ( st!=null ) {
-              			chunks.add(st);
-              		}
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end TEMPLATE_INSTANCE
-
-    // $ANTLR start INDIRECT_TEMPLATE_INSTANCE
-    public void mINDIRECT_TEMPLATE_INSTANCE() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = INDIRECT_TEMPLATE_INSTANCE;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:694:4: ( '%' '(' ACTION ')' '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')' )
-            // ActionTranslator.g:694:4: '%' '(' ACTION ')' '(' ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )? ')'
-            {
-            match('%'); if (failed) return ;
-            match('('); if (failed) return ;
-            mACTION(); if (failed) return ;
-            match(')'); if (failed) return ;
-            match('('); if (failed) return ;
-            // ActionTranslator.g:694:27: ( ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )? )?
-            int alt16=2;
-            int LA16_0 = input.LA(1);
-            if ( ((LA16_0>='\t' && LA16_0<='\n')||LA16_0==' '||(LA16_0>='A' && LA16_0<='Z')||LA16_0=='_'||(LA16_0>='a' && LA16_0<='z')) ) {
-                alt16=1;
-            }
-            switch (alt16) {
-                case 1 :
-                    // ActionTranslator.g:694:29: ( WS )? ARG ( ',' ( WS )? ARG )* ( WS )?
-                    {
-                    // ActionTranslator.g:694:29: ( WS )?
-                    int alt12=2;
-                    int LA12_0 = input.LA(1);
-                    if ( ((LA12_0>='\t' && LA12_0<='\n')||LA12_0==' ') ) {
-                        alt12=1;
-                    }
-                    switch (alt12) {
-                        case 1 :
-                            // ActionTranslator.g:694:29: WS
-                            {
-                            mWS(); if (failed) return ;
-
-                            }
-                            break;
-
-                    }
-
-                    mARG(); if (failed) return ;
-                    // ActionTranslator.g:694:37: ( ',' ( WS )? ARG )*
-                    loop14:
-                    do {
-                        int alt14=2;
-                        int LA14_0 = input.LA(1);
-                        if ( (LA14_0==',') ) {
-                            alt14=1;
-                        }
-
-
-                        switch (alt14) {
-                    	case 1 :
-                    	    // ActionTranslator.g:694:38: ',' ( WS )? ARG
-                    	    {
-                    	    match(','); if (failed) return ;
-                    	    // ActionTranslator.g:694:42: ( WS )?
-                    	    int alt13=2;
-                    	    int LA13_0 = input.LA(1);
-                    	    if ( ((LA13_0>='\t' && LA13_0<='\n')||LA13_0==' ') ) {
-                    	        alt13=1;
-                    	    }
-                    	    switch (alt13) {
-                    	        case 1 :
-                    	            // ActionTranslator.g:694:42: WS
-                    	            {
-                    	            mWS(); if (failed) return ;
-
-                    	            }
-                    	            break;
-
-                    	    }
-
-                    	    mARG(); if (failed) return ;
-
-                    	    }
-                    	    break;
-
-                    	default :
-                    	    break loop14;
-                        }
-                    } while (true);
-
-                    // ActionTranslator.g:694:52: ( WS )?
-                    int alt15=2;
-                    int LA15_0 = input.LA(1);
-                    if ( ((LA15_0>='\t' && LA15_0<='\n')||LA15_0==' ') ) {
-                        alt15=1;
-                    }
-                    switch (alt15) {
-                        case 1 :
-                            // ActionTranslator.g:694:52: WS
-                            {
-                            mWS(); if (failed) return ;
-
-                            }
-                            break;
-
-                    }
-
-
-                    }
-                    break;
-
-            }
-
-            match(')'); if (failed) return ;
-            if ( backtracking==1 ) {
-
-              		String action = getText().substring(1,getText().length());
-              		StringTemplate st =
-              			generator.translateTemplateConstructor(enclosingRule.name,
-              												   outerAltNum,
-              												   actionToken,
-              												   action);
-              		chunks.add(st);
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end INDIRECT_TEMPLATE_INSTANCE
-
-    // $ANTLR start ARG
-    public void mARG() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:708:7: ( ID '=' ACTION )
-            // ActionTranslator.g:708:7: ID '=' ACTION
-            {
-            mID(); if (failed) return ;
-            match('='); if (failed) return ;
-            mACTION(); if (failed) return ;
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ARG
-
-    // $ANTLR start SET_EXPR_ATTRIBUTE
-    public void mSET_EXPR_ATTRIBUTE() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_EXPR_ATTRIBUTE;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:713:4: ( '%' a= ACTION '.' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' )
-            // ActionTranslator.g:713:4: '%' a= ACTION '.' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
-            {
-            match('%'); if (failed) return ;
-            int aStart = getCharIndex();
-            mACTION(); if (failed) return ;
-            Token a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int ID7Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID7 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID7Start, getCharIndex()-1);
-            // ActionTranslator.g:713:24: ( WS )?
-            int alt17=2;
-            int LA17_0 = input.LA(1);
-            if ( ((LA17_0>='\t' && LA17_0<='\n')||LA17_0==' ') ) {
-                alt17=1;
-            }
-            switch (alt17) {
-                case 1 :
-                    // ActionTranslator.g:713:24: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            int exprStart = getCharIndex();
-            mATTR_VALUE_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(';'); if (failed) return ;
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = template("actionSetAttribute");
-              		String action = a.getText();
-              		action = action.substring(1,action.length()-1); // stuff inside {...}
-              		st.setAttribute("st", translateAction(action));
-              		st.setAttribute("attrName", ID7.getText());
-              		st.setAttribute("expr", translateAction(expr.getText()));
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_EXPR_ATTRIBUTE
-
-    // $ANTLR start SET_ATTRIBUTE
-    public void mSET_ATTRIBUTE() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = SET_ATTRIBUTE;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:730:4: ( '%' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';' )
-            // ActionTranslator.g:730:4: '%' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
-            {
-            match('%'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            // ActionTranslator.g:730:22: ( WS )?
-            int alt18=2;
-            int LA18_0 = input.LA(1);
-            if ( ((LA18_0>='\t' && LA18_0<='\n')||LA18_0==' ') ) {
-                alt18=1;
-            }
-            switch (alt18) {
-                case 1 :
-                    // ActionTranslator.g:730:22: WS
-                    {
-                    mWS(); if (failed) return ;
-
-                    }
-                    break;
-
-            }
-
-            match('='); if (failed) return ;
-            int exprStart = getCharIndex();
-            mATTR_VALUE_EXPR(); if (failed) return ;
-            Token expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart, getCharIndex()-1);
-            match(';'); if (failed) return ;
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = template("actionSetAttribute");
-              		st.setAttribute("st", x.getText());
-              		st.setAttribute("attrName", y.getText());
-              		st.setAttribute("expr", translateAction(expr.getText()));
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end SET_ATTRIBUTE
-
-    // $ANTLR start ATTR_VALUE_EXPR
-    public void mATTR_VALUE_EXPR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:743:4: (~ '=' (~ ';' )* )
-            // ActionTranslator.g:743:4: ~ '=' (~ ';' )*
-            {
-            if ( (input.LA(1)>='\u0000' && input.LA(1)<='<')||(input.LA(1)>='>' && input.LA(1)<='\uFFFE') ) {
-                input.consume();
-            failed=false;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                MismatchedSetException mse =
-                    new MismatchedSetException(null,input);
-                recover(mse);    throw mse;
-            }
-
-            // ActionTranslator.g:743:9: (~ ';' )*
-            loop19:
-            do {
-                int alt19=2;
-                int LA19_0 = input.LA(1);
-                if ( ((LA19_0>='\u0000' && LA19_0<=':')||(LA19_0>='<' && LA19_0<='\uFFFE')) ) {
-                    alt19=1;
-                }
-
-
-                switch (alt19) {
-            	case 1 :
-            	    // ActionTranslator.g:743:10: ~ ';'
-            	    {
-            	    if ( (input.LA(1)>='\u0000' && input.LA(1)<=':')||(input.LA(1)>='<' && input.LA(1)<='\uFFFE') ) {
-            	        input.consume();
-            	    failed=false;
-            	    }
-            	    else {
-            	        if (backtracking>0) {failed=true; return ;}
-            	        MismatchedSetException mse =
-            	            new MismatchedSetException(null,input);
-            	        recover(mse);    throw mse;
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop19;
-                }
-            } while (true);
-
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ATTR_VALUE_EXPR
-
-    // $ANTLR start TEMPLATE_EXPR
-    public void mTEMPLATE_EXPR() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = TEMPLATE_EXPR;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:748:4: ( '%' a= ACTION )
-            // ActionTranslator.g:748:4: '%' a= ACTION
-            {
-            match('%'); if (failed) return ;
-            int aStart = getCharIndex();
-            mACTION(); if (failed) return ;
-            Token a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart, getCharIndex()-1);
-            if ( backtracking==1 ) {
-
-              		StringTemplate st = template("actionStringConstructor");
-              		String action = a.getText();
-              		action = action.substring(1,action.length()-1); // stuff inside {...}
-              		st.setAttribute("stringExpr", translateAction(action));
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end TEMPLATE_EXPR
-
-    // $ANTLR start ACTION
-    public void mACTION() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:760:4: ( '{' ( options {greedy=false; } : . )* '}' )
-            // ActionTranslator.g:760:4: '{' ( options {greedy=false; } : . )* '}'
-            {
-            match('{'); if (failed) return ;
-            // ActionTranslator.g:760:8: ( options {greedy=false; } : . )*
-            loop20:
-            do {
-                int alt20=2;
-                int LA20_0 = input.LA(1);
-                if ( (LA20_0=='}') ) {
-                    alt20=2;
-                }
-                else if ( ((LA20_0>='\u0000' && LA20_0<='|')||(LA20_0>='~' && LA20_0<='\uFFFE')) ) {
-                    alt20=1;
-                }
-
-
-                switch (alt20) {
-            	case 1 :
-            	    // ActionTranslator.g:760:33: .
-            	    {
-            	    matchAny(); if (failed) return ;
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop20;
-                }
-            } while (true);
-
-            match('}'); if (failed) return ;
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ACTION
-
-    // $ANTLR start ESC
-    public void mESC() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ESC;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:763:9: ( '\\\\' '$' | '\\\\' '%' | '\\\\' ~ ('$'|'%'))
-            int alt21=3;
-            int LA21_0 = input.LA(1);
-            if ( (LA21_0=='\\') ) {
-                int LA21_1 = input.LA(2);
-                if ( (LA21_1=='%') ) {
-                    alt21=2;
-                }
-                else if ( (LA21_1=='$') ) {
-                    alt21=1;
-                }
-                else if ( ((LA21_1>='\u0000' && LA21_1<='#')||(LA21_1>='&' && LA21_1<='\uFFFE')) ) {
-                    alt21=3;
-                }
-                else {
-                    if (backtracking>0) {failed=true; return ;}
-                    NoViableAltException nvae =
-                        new NoViableAltException("763:1: ESC : ( '\\\\' '$' | '\\\\' '%' | '\\\\' ~ ('$'|'%'));", 21, 1, input);
-
-                    throw nvae;
-                }
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                NoViableAltException nvae =
-                    new NoViableAltException("763:1: ESC : ( '\\\\' '$' | '\\\\' '%' | '\\\\' ~ ('$'|'%'));", 21, 0, input);
-
-                throw nvae;
-            }
-            switch (alt21) {
-                case 1 :
-                    // ActionTranslator.g:763:9: '\\\\' '$'
-                    {
-                    match('\\'); if (failed) return ;
-                    match('$'); if (failed) return ;
-                    if ( backtracking==1 ) {
-                      chunks.add("$");
-                    }
-
-                    }
-                    break;
-                case 2 :
-                    // ActionTranslator.g:764:4: '\\\\' '%'
-                    {
-                    match('\\'); if (failed) return ;
-                    match('%'); if (failed) return ;
-                    if ( backtracking==1 ) {
-                      chunks.add("%");
-                    }
-
-                    }
-                    break;
-                case 3 :
-                    // ActionTranslator.g:765:4: '\\\\' ~ ('$'|'%')
-                    {
-                    match('\\'); if (failed) return ;
-                    if ( (input.LA(1)>='\u0000' && input.LA(1)<='#')||(input.LA(1)>='&' && input.LA(1)<='\uFFFE') ) {
-                        input.consume();
-                    failed=false;
-                    }
-                    else {
-                        if (backtracking>0) {failed=true; return ;}
-                        MismatchedSetException mse =
-                            new MismatchedSetException(null,input);
-                        recover(mse);    throw mse;
-                    }
-
-                    if ( backtracking==1 ) {
-                      chunks.add(getText());
-                    }
-
-                    }
-                    break;
-
-            }
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ESC
-
-    // $ANTLR start ERROR_XY
-    public void mERROR_XY() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ERROR_XY;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:769:4: ( '$' x= ID '.' y= ID )
-            // ActionTranslator.g:769:4: '$' x= ID '.' y= ID
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( backtracking==1 ) {
-
-              		chunks.add(getText());
-              		generator.issueInvalidAttributeError(x.getText(),y.getText(),
-              		                                     enclosingRule,actionToken,
-              		                                     outerAltNum);
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ERROR_XY
-
-    // $ANTLR start ERROR_X
-    public void mERROR_X() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = ERROR_X;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:779:4: ( '$' x= ID )
-            // ActionTranslator.g:779:4: '$' x= ID
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            if ( backtracking==1 ) {
-
-              		chunks.add(getText());
-              		generator.issueInvalidAttributeError(x.getText(),
-              		                                     enclosingRule,actionToken,
-              		                                     outerAltNum);
-              		
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ERROR_X
-
-    // $ANTLR start UNKNOWN_SYNTAX
-    public void mUNKNOWN_SYNTAX() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = UNKNOWN_SYNTAX;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:789:4: ( '$' | '%' ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )* )
-            int alt23=2;
-            int LA23_0 = input.LA(1);
-            if ( (LA23_0=='$') ) {
-                alt23=1;
-            }
-            else if ( (LA23_0=='%') ) {
-                alt23=2;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                NoViableAltException nvae =
-                    new NoViableAltException("788:1: UNKNOWN_SYNTAX : ( '$' | '%' ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )* );", 23, 0, input);
-
-                throw nvae;
-            }
-            switch (alt23) {
-                case 1 :
-                    // ActionTranslator.g:789:4: '$'
-                    {
-                    match('$'); if (failed) return ;
-                    if ( backtracking==1 ) {
-
-                      		chunks.add(getText());
-                      		// shouldn't need an error here.  Just accept $ if it doesn't look like anything
-                      		
-                    }
-
-                    }
-                    break;
-                case 2 :
-                    // ActionTranslator.g:794:4: '%' ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )*
-                    {
-                    match('%'); if (failed) return ;
-                    // ActionTranslator.g:794:8: ( ID | '.' | '(' | ')' | ',' | '{' | '}' | '\"' )*
-                    loop22:
-                    do {
-                        int alt22=9;
-                        switch ( input.LA(1) ) {
-                        case 'A':
-                        case 'B':
-                        case 'C':
-                        case 'D':
-                        case 'E':
-                        case 'F':
-                        case 'G':
-                        case 'H':
-                        case 'I':
-                        case 'J':
-                        case 'K':
-                        case 'L':
-                        case 'M':
-                        case 'N':
-                        case 'O':
-                        case 'P':
-                        case 'Q':
-                        case 'R':
-                        case 'S':
-                        case 'T':
-                        case 'U':
-                        case 'V':
-                        case 'W':
-                        case 'X':
-                        case 'Y':
-                        case 'Z':
-                        case '_':
-                        case 'a':
-                        case 'b':
-                        case 'c':
-                        case 'd':
-                        case 'e':
-                        case 'f':
-                        case 'g':
-                        case 'h':
-                        case 'i':
-                        case 'j':
-                        case 'k':
-                        case 'l':
-                        case 'm':
-                        case 'n':
-                        case 'o':
-                        case 'p':
-                        case 'q':
-                        case 'r':
-                        case 's':
-                        case 't':
-                        case 'u':
-                        case 'v':
-                        case 'w':
-                        case 'x':
-                        case 'y':
-                        case 'z':
-                            alt22=1;
-                            break;
-                        case '.':
-                            alt22=2;
-                            break;
-                        case '(':
-                            alt22=3;
-                            break;
-                        case ')':
-                            alt22=4;
-                            break;
-                        case ',':
-                            alt22=5;
-                            break;
-                        case '{':
-                            alt22=6;
-                            break;
-                        case '}':
-                            alt22=7;
-                            break;
-                        case '\"':
-                            alt22=8;
-                            break;
-
-                        }
-
-                        switch (alt22) {
-                    	case 1 :
-                    	    // ActionTranslator.g:794:9: ID
-                    	    {
-                    	    mID(); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 2 :
-                    	    // ActionTranslator.g:794:12: '.'
-                    	    {
-                    	    match('.'); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 3 :
-                    	    // ActionTranslator.g:794:16: '('
-                    	    {
-                    	    match('('); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 4 :
-                    	    // ActionTranslator.g:794:20: ')'
-                    	    {
-                    	    match(')'); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 5 :
-                    	    // ActionTranslator.g:794:24: ','
-                    	    {
-                    	    match(','); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 6 :
-                    	    // ActionTranslator.g:794:28: '{'
-                    	    {
-                    	    match('{'); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 7 :
-                    	    // ActionTranslator.g:794:32: '}'
-                    	    {
-                    	    match('}'); if (failed) return ;
-
-                    	    }
-                    	    break;
-                    	case 8 :
-                    	    // ActionTranslator.g:794:36: '\"'
-                    	    {
-                    	    match('\"'); if (failed) return ;
-
-                    	    }
-                    	    break;
-
-                    	default :
-                    	    break loop22;
-                        }
-                    } while (true);
-
-                    if ( backtracking==1 ) {
-
-                      		chunks.add(getText());
-                      		ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
-                      								  grammar,
-                      								  actionToken,
-                      								  getText());
-                      		
-                    }
-
-                    }
-                    break;
-
-            }
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end UNKNOWN_SYNTAX
-
-    // $ANTLR start TEXT
-    public void mTEXT() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            int _type = TEXT;
-            int _start = getCharIndex();
-            int _line = getLine();
-            int _charPosition = getCharPositionInLine();
-            int _channel = Token.DEFAULT_CHANNEL;
-            // ActionTranslator.g:804:7: ( (~ ('$'|'%'|'\\\\'))+ )
-            // ActionTranslator.g:804:7: (~ ('$'|'%'|'\\\\'))+
-            {
-            // ActionTranslator.g:804:7: (~ ('$'|'%'|'\\\\'))+
-            int cnt24=0;
-            loop24:
-            do {
-                int alt24=2;
-                int LA24_0 = input.LA(1);
-                if ( ((LA24_0>='\u0000' && LA24_0<='#')||(LA24_0>='&' && LA24_0<='[')||(LA24_0>=']' && LA24_0<='\uFFFE')) ) {
-                    alt24=1;
-                }
-
-
-                switch (alt24) {
-            	case 1 :
-            	    // ActionTranslator.g:804:7: ~ ('$'|'%'|'\\\\')
-            	    {
-            	    if ( (input.LA(1)>='\u0000' && input.LA(1)<='#')||(input.LA(1)>='&' && input.LA(1)<='[')||(input.LA(1)>=']' && input.LA(1)<='\uFFFE') ) {
-            	        input.consume();
-            	    failed=false;
-            	    }
-            	    else {
-            	        if (backtracking>0) {failed=true; return ;}
-            	        MismatchedSetException mse =
-            	            new MismatchedSetException(null,input);
-            	        recover(mse);    throw mse;
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt24 >= 1 ) break loop24;
-            	    if (backtracking>0) {failed=true; return ;}
-                        EarlyExitException eee =
-                            new EarlyExitException(24, input);
-                        throw eee;
-                }
-                cnt24++;
-            } while (true);
-
-            if ( backtracking==1 ) {
-              chunks.add(getText());
-            }
-
-            }
-
-
-            if ( backtracking==1 ) {
-
-                      if ( token==null && ruleNestingLevel==1 ) {
-                          emit(_type,_line,_charPosition,_channel,_start,getCharIndex()-1);
-                      }
-
-                      
-            }    }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end TEXT
-
-    // $ANTLR start ID
-    public void mID() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:808:9: ( ('a'..'z'|'A'..'Z'|'_') ( ('a'..'z'|'A'..'Z'|'_'|'0'..'9'))* )
-            // ActionTranslator.g:808:9: ('a'..'z'|'A'..'Z'|'_') ( ('a'..'z'|'A'..'Z'|'_'|'0'..'9'))*
-            {
-            if ( (input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
-                input.consume();
-            failed=false;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                MismatchedSetException mse =
-                    new MismatchedSetException(null,input);
-                recover(mse);    throw mse;
-            }
-
-            // ActionTranslator.g:808:33: ( ('a'..'z'|'A'..'Z'|'_'|'0'..'9'))*
-            loop25:
-            do {
-                int alt25=2;
-                int LA25_0 = input.LA(1);
-                if ( ((LA25_0>='0' && LA25_0<='9')||(LA25_0>='A' && LA25_0<='Z')||LA25_0=='_'||(LA25_0>='a' && LA25_0<='z')) ) {
-                    alt25=1;
-                }
-
-
-                switch (alt25) {
-            	case 1 :
-            	    // ActionTranslator.g:808:34: ('a'..'z'|'A'..'Z'|'_'|'0'..'9')
-            	    {
-            	    if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
-            	        input.consume();
-            	    failed=false;
-            	    }
-            	    else {
-            	        if (backtracking>0) {failed=true; return ;}
-            	        MismatchedSetException mse =
-            	            new MismatchedSetException(null,input);
-            	        recover(mse);    throw mse;
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop25;
-                }
-            } while (true);
-
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end ID
-
-    // $ANTLR start INT
-    public void mINT() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:812:7: ( ( '0' .. '9' )+ )
-            // ActionTranslator.g:812:7: ( '0' .. '9' )+
-            {
-            // ActionTranslator.g:812:7: ( '0' .. '9' )+
-            int cnt26=0;
-            loop26:
-            do {
-                int alt26=2;
-                int LA26_0 = input.LA(1);
-                if ( ((LA26_0>='0' && LA26_0<='9')) ) {
-                    alt26=1;
-                }
-
-
-                switch (alt26) {
-            	case 1 :
-            	    // ActionTranslator.g:812:7: '0' .. '9'
-            	    {
-            	    matchRange('0','9'); if (failed) return ;
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt26 >= 1 ) break loop26;
-            	    if (backtracking>0) {failed=true; return ;}
-                        EarlyExitException eee =
-                            new EarlyExitException(26, input);
-                        throw eee;
-                }
-                cnt26++;
-            } while (true);
-
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end INT
-
-    // $ANTLR start WS
-    public void mWS() throws RecognitionException {
-        try {
-            ruleNestingLevel++;
-            // ActionTranslator.g:816:6: ( ( (' '|'\\t'|'\\n'))+ )
-            // ActionTranslator.g:816:6: ( (' '|'\\t'|'\\n'))+
-            {
-            // ActionTranslator.g:816:6: ( (' '|'\\t'|'\\n'))+
-            int cnt27=0;
-            loop27:
-            do {
-                int alt27=2;
-                int LA27_0 = input.LA(1);
-                if ( ((LA27_0>='\t' && LA27_0<='\n')||LA27_0==' ') ) {
-                    alt27=1;
-                }
-
-
-                switch (alt27) {
-            	case 1 :
-            	    // ActionTranslator.g:816:7: (' '|'\\t'|'\\n')
-            	    {
-            	    if ( (input.LA(1)>='\t' && input.LA(1)<='\n')||input.LA(1)==' ' ) {
-            	        input.consume();
-            	    failed=false;
-            	    }
-            	    else {
-            	        if (backtracking>0) {failed=true; return ;}
-            	        MismatchedSetException mse =
-            	            new MismatchedSetException(null,input);
-            	        recover(mse);    throw mse;
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt27 >= 1 ) break loop27;
-            	    if (backtracking>0) {failed=true; return ;}
-                        EarlyExitException eee =
-                            new EarlyExitException(27, input);
-                        throw eee;
-                }
-                cnt27++;
-            } while (true);
-
-
-            }
-
-        }
-        finally {
-            ruleNestingLevel--;
-        }
-    }
-    // $ANTLR end WS
-
-    public void mTokens() throws RecognitionException {
-        // ActionTranslator.g:1:25: ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> ISOLATED_LEXER_RULE_REF | ( SET_LOCAL_ATTR [...]
-        int alt28=27;
-        int LA28_0 = input.LA(1);
-        if ( (LA28_0=='$') ) {
-            if ( (synpred1()) ) {
-                alt28=1;
-            }
-            else if ( (synpred2()) ) {
-                alt28=2;
-            }
-            else if ( (synpred3()) ) {
-                alt28=3;
-            }
-            else if ( (synpred4()) ) {
-                alt28=4;
-            }
-            else if ( (synpred5()) ) {
-                alt28=5;
-            }
-            else if ( (synpred6()) ) {
-                alt28=6;
-            }
-            else if ( (synpred7()) ) {
-                alt28=7;
-            }
-            else if ( (synpred8()) ) {
-                alt28=8;
-            }
-            else if ( (synpred9()) ) {
-                alt28=9;
-            }
-            else if ( (synpred10()) ) {
-                alt28=10;
-            }
-            else if ( (synpred11()) ) {
-                alt28=11;
-            }
-            else if ( (synpred12()) ) {
-                alt28=12;
-            }
-            else if ( (synpred13()) ) {
-                alt28=13;
-            }
-            else if ( (synpred14()) ) {
-                alt28=14;
-            }
-            else if ( (synpred15()) ) {
-                alt28=15;
-            }
-            else if ( (synpred16()) ) {
-                alt28=16;
-            }
-            else if ( (synpred17()) ) {
-                alt28=17;
-            }
-            else if ( (synpred24()) ) {
-                alt28=24;
-            }
-            else if ( (synpred25()) ) {
-                alt28=25;
-            }
-            else if ( (synpred26()) ) {
-                alt28=26;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                NoViableAltException nvae =
-                    new NoViableAltException("1:1: Tokens options {k=1; } : ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> IS [...]
-
-                throw nvae;
-            }
-        }
-        else if ( (LA28_0=='%') ) {
-            if ( (synpred18()) ) {
-                alt28=18;
-            }
-            else if ( (synpred19()) ) {
-                alt28=19;
-            }
-            else if ( (synpred20()) ) {
-                alt28=20;
-            }
-            else if ( (synpred21()) ) {
-                alt28=21;
-            }
-            else if ( (synpred22()) ) {
-                alt28=22;
-            }
-            else if ( (synpred26()) ) {
-                alt28=26;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                NoViableAltException nvae =
-                    new NoViableAltException("1:1: Tokens options {k=1; } : ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> IS [...]
-
-                throw nvae;
-            }
-        }
-        else if ( (LA28_0=='\\') ) {
-            alt28=23;
-        }
-        else if ( ((LA28_0>='\u0000' && LA28_0<='#')||(LA28_0>='&' && LA28_0<='[')||(LA28_0>=']' && LA28_0<='\uFFFE')) ) {
-            alt28=27;
-        }
-        else {
-            if (backtracking>0) {failed=true; return ;}
-            NoViableAltException nvae =
-                new NoViableAltException("1:1: Tokens options {k=1; } : ( ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR | ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR | ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR | ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR | ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR | ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR | ( LABEL_REF )=> LABEL_REF | ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF | ( ISOLATED_LEXER_RULE_REF )=> ISOLAT [...]
-
-            throw nvae;
-        }
-        switch (alt28) {
-            case 1 :
-                // ActionTranslator.g:1:25: ( SET_ENCLOSING_RULE_SCOPE_ATTR )=> SET_ENCLOSING_RULE_SCOPE_ATTR
-                {
-                mSET_ENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 2 :
-                // ActionTranslator.g:1:88: ( ENCLOSING_RULE_SCOPE_ATTR )=> ENCLOSING_RULE_SCOPE_ATTR
-                {
-                mENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 3 :
-                // ActionTranslator.g:1:143: ( SET_TOKEN_SCOPE_ATTR )=> SET_TOKEN_SCOPE_ATTR
-                {
-                mSET_TOKEN_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 4 :
-                // ActionTranslator.g:1:188: ( TOKEN_SCOPE_ATTR )=> TOKEN_SCOPE_ATTR
-                {
-                mTOKEN_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 5 :
-                // ActionTranslator.g:1:225: ( SET_RULE_SCOPE_ATTR )=> SET_RULE_SCOPE_ATTR
-                {
-                mSET_RULE_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 6 :
-                // ActionTranslator.g:1:268: ( RULE_SCOPE_ATTR )=> RULE_SCOPE_ATTR
-                {
-                mRULE_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 7 :
-                // ActionTranslator.g:1:303: ( LABEL_REF )=> LABEL_REF
-                {
-                mLABEL_REF(); if (failed) return ;
-
-                }
-                break;
-            case 8 :
-                // ActionTranslator.g:1:326: ( ISOLATED_TOKEN_REF )=> ISOLATED_TOKEN_REF
-                {
-                mISOLATED_TOKEN_REF(); if (failed) return ;
-
-                }
-                break;
-            case 9 :
-                // ActionTranslator.g:1:367: ( ISOLATED_LEXER_RULE_REF )=> ISOLATED_LEXER_RULE_REF
-                {
-                mISOLATED_LEXER_RULE_REF(); if (failed) return ;
-
-                }
-                break;
-            case 10 :
-                // ActionTranslator.g:1:418: ( SET_LOCAL_ATTR )=> SET_LOCAL_ATTR
-                {
-                mSET_LOCAL_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 11 :
-                // ActionTranslator.g:1:451: ( LOCAL_ATTR )=> LOCAL_ATTR
-                {
-                mLOCAL_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 12 :
-                // ActionTranslator.g:1:476: ( SET_DYNAMIC_SCOPE_ATTR )=> SET_DYNAMIC_SCOPE_ATTR
-                {
-                mSET_DYNAMIC_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 13 :
-                // ActionTranslator.g:1:525: ( DYNAMIC_SCOPE_ATTR )=> DYNAMIC_SCOPE_ATTR
-                {
-                mDYNAMIC_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 14 :
-                // ActionTranslator.g:1:566: ( ERROR_SCOPED_XY )=> ERROR_SCOPED_XY
-                {
-                mERROR_SCOPED_XY(); if (failed) return ;
-
-                }
-                break;
-            case 15 :
-                // ActionTranslator.g:1:601: ( DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR )=> DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
-                {
-                mDYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 16 :
-                // ActionTranslator.g:1:676: ( DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR )=> DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
-                {
-                mDYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR(); if (failed) return ;
-
-                }
-                break;
-            case 17 :
-                // ActionTranslator.g:1:751: ( ISOLATED_DYNAMIC_SCOPE )=> ISOLATED_DYNAMIC_SCOPE
-                {
-                mISOLATED_DYNAMIC_SCOPE(); if (failed) return ;
-
-                }
-                break;
-            case 18 :
-                // ActionTranslator.g:1:800: ( TEMPLATE_INSTANCE )=> TEMPLATE_INSTANCE
-                {
-                mTEMPLATE_INSTANCE(); if (failed) return ;
-
-                }
-                break;
-            case 19 :
-                // ActionTranslator.g:1:839: ( INDIRECT_TEMPLATE_INSTANCE )=> INDIRECT_TEMPLATE_INSTANCE
-                {
-                mINDIRECT_TEMPLATE_INSTANCE(); if (failed) return ;
-
-                }
-                break;
-            case 20 :
-                // ActionTranslator.g:1:896: ( SET_EXPR_ATTRIBUTE )=> SET_EXPR_ATTRIBUTE
-                {
-                mSET_EXPR_ATTRIBUTE(); if (failed) return ;
-
-                }
-                break;
-            case 21 :
-                // ActionTranslator.g:1:937: ( SET_ATTRIBUTE )=> SET_ATTRIBUTE
-                {
-                mSET_ATTRIBUTE(); if (failed) return ;
-
-                }
-                break;
-            case 22 :
-                // ActionTranslator.g:1:968: ( TEMPLATE_EXPR )=> TEMPLATE_EXPR
-                {
-                mTEMPLATE_EXPR(); if (failed) return ;
-
-                }
-                break;
-            case 23 :
-                // ActionTranslator.g:1:999: ( ESC )=> ESC
-                {
-                mESC(); if (failed) return ;
-
-                }
-                break;
-            case 24 :
-                // ActionTranslator.g:1:1010: ( ERROR_XY )=> ERROR_XY
-                {
-                mERROR_XY(); if (failed) return ;
-
-                }
-                break;
-            case 25 :
-                // ActionTranslator.g:1:1031: ( ERROR_X )=> ERROR_X
-                {
-                mERROR_X(); if (failed) return ;
-
-                }
-                break;
-            case 26 :
-                // ActionTranslator.g:1:1050: ( UNKNOWN_SYNTAX )=> UNKNOWN_SYNTAX
-                {
-                mUNKNOWN_SYNTAX(); if (failed) return ;
-
-                }
-                break;
-            case 27 :
-                // ActionTranslator.g:1:1083: ( TEXT )=> TEXT
-                {
-                mTEXT(); if (failed) return ;
-
-                }
-                break;
-
-        }
-
-    }
-
-    // $ANTLR start synpred1
-    public void synpred1_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:25: ( SET_ENCLOSING_RULE_SCOPE_ATTR )
-        // ActionTranslator.g:1:26: SET_ENCLOSING_RULE_SCOPE_ATTR
-        {
-        mSET_ENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred1
-
-    // $ANTLR start synpred2
-    public void synpred2_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:88: ( ENCLOSING_RULE_SCOPE_ATTR )
-        // ActionTranslator.g:1:89: ENCLOSING_RULE_SCOPE_ATTR
-        {
-        mENCLOSING_RULE_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred2
-
-    // $ANTLR start synpred3
-    public void synpred3_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:143: ( SET_TOKEN_SCOPE_ATTR )
-        // ActionTranslator.g:1:144: SET_TOKEN_SCOPE_ATTR
-        {
-        mSET_TOKEN_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred3
-
-    // $ANTLR start synpred4
-    public void synpred4_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:188: ( TOKEN_SCOPE_ATTR )
-        // ActionTranslator.g:1:189: TOKEN_SCOPE_ATTR
-        {
-        mTOKEN_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred4
-
-    // $ANTLR start synpred5
-    public void synpred5_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:225: ( SET_RULE_SCOPE_ATTR )
-        // ActionTranslator.g:1:226: SET_RULE_SCOPE_ATTR
-        {
-        mSET_RULE_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred5
-
-    // $ANTLR start synpred6
-    public void synpred6_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:268: ( RULE_SCOPE_ATTR )
-        // ActionTranslator.g:1:269: RULE_SCOPE_ATTR
-        {
-        mRULE_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred6
-
-    // $ANTLR start synpred7
-    public void synpred7_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:303: ( LABEL_REF )
-        // ActionTranslator.g:1:304: LABEL_REF
-        {
-        mLABEL_REF(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred7
-
-    // $ANTLR start synpred8
-    public void synpred8_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:326: ( ISOLATED_TOKEN_REF )
-        // ActionTranslator.g:1:327: ISOLATED_TOKEN_REF
-        {
-        mISOLATED_TOKEN_REF(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred8
-
-    // $ANTLR start synpred9
-    public void synpred9_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:367: ( ISOLATED_LEXER_RULE_REF )
-        // ActionTranslator.g:1:368: ISOLATED_LEXER_RULE_REF
-        {
-        mISOLATED_LEXER_RULE_REF(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred9
-
-    // $ANTLR start synpred10
-    public void synpred10_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:418: ( SET_LOCAL_ATTR )
-        // ActionTranslator.g:1:419: SET_LOCAL_ATTR
-        {
-        mSET_LOCAL_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred10
-
-    // $ANTLR start synpred11
-    public void synpred11_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:451: ( LOCAL_ATTR )
-        // ActionTranslator.g:1:452: LOCAL_ATTR
-        {
-        mLOCAL_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred11
-
-    // $ANTLR start synpred12
-    public void synpred12_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:476: ( SET_DYNAMIC_SCOPE_ATTR )
-        // ActionTranslator.g:1:477: SET_DYNAMIC_SCOPE_ATTR
-        {
-        mSET_DYNAMIC_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred12
-
-    // $ANTLR start synpred13
-    public void synpred13_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:525: ( DYNAMIC_SCOPE_ATTR )
-        // ActionTranslator.g:1:526: DYNAMIC_SCOPE_ATTR
-        {
-        mDYNAMIC_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred13
-
-    // $ANTLR start synpred14
-    public void synpred14_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:566: ( ERROR_SCOPED_XY )
-        // ActionTranslator.g:1:567: ERROR_SCOPED_XY
-        {
-        mERROR_SCOPED_XY(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred14
-
-    // $ANTLR start synpred15
-    public void synpred15_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:601: ( DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR )
-        // ActionTranslator.g:1:602: DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
-        {
-        mDYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred15
-
-    // $ANTLR start synpred16
-    public void synpred16_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:676: ( DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR )
-        // ActionTranslator.g:1:677: DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
-        {
-        mDYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred16
-
-    // $ANTLR start synpred17
-    public void synpred17_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:751: ( ISOLATED_DYNAMIC_SCOPE )
-        // ActionTranslator.g:1:752: ISOLATED_DYNAMIC_SCOPE
-        {
-        mISOLATED_DYNAMIC_SCOPE(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred17
-
-    // $ANTLR start synpred18
-    public void synpred18_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:800: ( TEMPLATE_INSTANCE )
-        // ActionTranslator.g:1:801: TEMPLATE_INSTANCE
-        {
-        mTEMPLATE_INSTANCE(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred18
-
-    // $ANTLR start synpred19
-    public void synpred19_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:839: ( INDIRECT_TEMPLATE_INSTANCE )
-        // ActionTranslator.g:1:840: INDIRECT_TEMPLATE_INSTANCE
-        {
-        mINDIRECT_TEMPLATE_INSTANCE(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred19
-
-    // $ANTLR start synpred20
-    public void synpred20_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:896: ( SET_EXPR_ATTRIBUTE )
-        // ActionTranslator.g:1:897: SET_EXPR_ATTRIBUTE
-        {
-        mSET_EXPR_ATTRIBUTE(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred20
-
-    // $ANTLR start synpred21
-    public void synpred21_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:937: ( SET_ATTRIBUTE )
-        // ActionTranslator.g:1:938: SET_ATTRIBUTE
-        {
-        mSET_ATTRIBUTE(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred21
-
-    // $ANTLR start synpred22
-    public void synpred22_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:968: ( TEMPLATE_EXPR )
-        // ActionTranslator.g:1:969: TEMPLATE_EXPR
-        {
-        mTEMPLATE_EXPR(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred22
-
-    // $ANTLR start synpred24
-    public void synpred24_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:1010: ( ERROR_XY )
-        // ActionTranslator.g:1:1011: ERROR_XY
-        {
-        mERROR_XY(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred24
-
-    // $ANTLR start synpred25
-    public void synpred25_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:1031: ( ERROR_X )
-        // ActionTranslator.g:1:1032: ERROR_X
-        {
-        mERROR_X(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred25
-
-    // $ANTLR start synpred26
-    public void synpred26_fragment() throws RecognitionException {   
-        // ActionTranslator.g:1:1050: ( UNKNOWN_SYNTAX )
-        // ActionTranslator.g:1:1051: UNKNOWN_SYNTAX
-        {
-        mUNKNOWN_SYNTAX(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred26
-
-    public boolean synpred25() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred25_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred7() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred7_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred14() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred14_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred15() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred15_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred22() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred22_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred12() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred12_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred4() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred4_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred9() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred9_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred1() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred1_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred20() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred20_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred17() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred17_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred2() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred2_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred18() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred18_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred3() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred3_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred11() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred11_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred26() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred26_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred8() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred8_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred10() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred10_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred21() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred21_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred16() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred16_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred19() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred19_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred24() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred24_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred5() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred5_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred6() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred6_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public boolean synpred13() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred13_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-
-
- 
-
-}
\ No newline at end of file
diff --git a/src/org/antlr/codegen/CTarget.java b/src/org/antlr/codegen/CTarget.java
deleted file mode 100644
index ac01022..0000000
--- a/src/org/antlr/codegen/CTarget.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
- 
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.tool.Grammar;
-
-import java.io.IOException;
-import java.util.ArrayList;
-        
-public class CTarget extends Target {
-    
-        ArrayList strings = new ArrayList();
- 
-        protected void genRecognizerFile(Tool tool,
-									CodeGenerator generator,
-									Grammar grammar,
-									StringTemplate outputFileST)
-		throws IOException
-	{
-                // Before we write this, and cause it to generate its string,
-                // we need to add all the string literals that we are going to match
-                //
-                outputFileST.setAttribute("literals", strings);
-                //System.out.println(outputFileST.toStructureString());
-		String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
-		generator.write(outputFileST, fileName);
-	}
-                
-	protected void genRecognizerHeaderFile(Tool tool,
-										   CodeGenerator generator,
-										   Grammar grammar,
-										   StringTemplate headerFileST,
-										   String extName)
-		throws IOException
-	{
-            generator.write(headerFileST, grammar.name+ Grammar.grammarTypeToFileNameSuffix[grammar.type] +extName);
-	}
-        
-        protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
-										   CodeGenerator generator,
-										   Grammar grammar,
-										   StringTemplate recognizerST,
-										   StringTemplate cyclicDFAST)
-	{
-		return recognizerST;
-	}
-        
- 	/** Is scope in @scope::name {action} valid for this kind of grammar?
-	 *  Targets like C++ may want to allow new scopes like headerfile or
-	 *  some such.  The action names themselves are not policed at the
-	 *  moment so targets can add template actions w/o having to recompile
-	 *  ANTLR.
-	 */
-	public boolean isValidActionScope(int grammarType, String scope) {
-		switch (grammarType) {
-			case Grammar.LEXER :
-				if ( scope.equals("lexer") ) {return true;}
-                                if ( scope.equals("header") ) {return true;}
-                                if ( scope.equals("includes") ) {return true;}
-                                if ( scope.equals("preincludes") ) {return true;}
-                                if ( scope.equals("overrides") ) {return true;}
-				break;
-			case Grammar.PARSER :
-				if ( scope.equals("parser") ) {return true;}
-                                if ( scope.equals("header") ) {return true;}
-                                if ( scope.equals("includes") ) {return true;}
-                                if ( scope.equals("preincludes") ) {return true;}
-                                if ( scope.equals("overrides") ) {return true;}
-				break;
-			case Grammar.COMBINED :
-				if ( scope.equals("parser") ) {return true;}
-				if ( scope.equals("lexer") ) {return true;}
-                                if ( scope.equals("header") ) {return true;}
-                                if ( scope.equals("includes") ) {return true;}
-                                if ( scope.equals("preincludes") ) {return true;}
-                                if ( scope.equals("overrides") ) {return true;}
-				break;
-			case Grammar.TREE_PARSER :
-				if ( scope.equals("treeparser") ) {return true;}
-                                if ( scope.equals("header") ) {return true;}
-                                if ( scope.equals("includes") ) {return true;}
-                                if ( scope.equals("preincludes") ) {return true;}
-                                if ( scope.equals("overrides") ) {return true;}
-				break;
-		}
-		return false;
-	}
-        
-        public String getTargetCharLiteralFromANTLRCharLiteral(
-		CodeGenerator generator,
-		String literal)
-	{
-                
-                if  (literal.startsWith("'\\u") )
-                {
-                    literal = "0x" +literal.substring(3, 7);
-                }
-                else
-                {
-                    int c = literal.charAt(1);
-                      
-                    if  (c < 32 || c > 127) {
-                        literal  =  "0x" + Integer.toHexString(c);
-                    }
-                }
-                
-                return literal;
-	}
-        
-	/** Convert from an ANTLR string literal found in a grammar file to
-	 *  an equivalent string literal in the C target.
-         *  Because we msut support Unicode character sets and have chosen
-         *  to have the lexer match UTF32 characters, then we must encode
-         *  string matches to use 32 bit character arrays. Here then we
-         *  must produce the C array and cater for the case where the 
-         *  lexer has been eoncded with a string such as "xyz\n", which looks
-         *  slightly incogrous to me but is not incorrect.
-	 */
-	public String getTargetStringLiteralFromANTLRStringLiteral(
-		CodeGenerator generator,
-		String literal)
-	{
-            int             index;
-            int             outc;
-            String          bytes;
-            StringBuffer    buf     = new StringBuffer();
-            
-            buf.append("{ ");
-            
-            // We need ot lose any escaped characters of the form \x and just
-            // replace them with their actual values as well as lose the surrounding
-            // quote marks.
-            //
-            for (int i = 1; i< literal.length()-1; i++)
-            {
-                buf.append("0x");
-                                
-                if  (literal.charAt(i) == '\\') 
-                {
-                    i++; // Assume that there is a next character, this will just yield
-                         // invalid strings if not, which is what the input would be of course - invalid
-                    switch (literal.charAt(i))
-                    {
-                        case 'u':
-                        case 'U':
-                            buf.append(literal.substring(i+1, i+5));  // Already a hex string
-                            i = i + 5;                                // Move to next string/char/escape
-                            break;
-                            
-                        case    'n':
-                        case    'N':
-                            
-                            buf.append("0A");
-                            break;
-                            
-                        case    'r':
-                        case    'R':
-                            
-                            buf.append("0D");
-                            break;
-                            
-                        case    't':
-                        case    'T':
-                            
-                            buf.append("09");
-                            break;
-                        
-                        case    'b':
-                        case    'B':
-                            
-                            buf.append("08");
-                            break;
-                            
-                        case    'f':
-                        case    'F':
-                            
-                            buf.append("0C");
-                            break;
-                            
-                        default:
-                            
-                            // Anything else is what it is!
-                            //
-                            buf.append(Integer.toHexString((int)literal.charAt(i)).toUpperCase());
-                            break;
-                    }
-                }
-                else
-                {
-                    buf.append(Integer.toHexString((int)literal.charAt(i)).toUpperCase());
-                }
-                buf.append(", ");               
-            }
-            buf.append(" ANTLR3_STRING_TERMINATOR}");
-            
-            bytes   = buf.toString();            
-            index   = strings.indexOf(bytes);
-            
-            if  (index == -1)
-            {
-                strings.add(bytes);
-                index = strings.indexOf(bytes);
-            }
-             
-            String strref = "lit_" + String.valueOf(index+1);
-
-            return strref;
-	}
-       
-}
-
diff --git a/src/org/antlr/codegen/CodeGenTreeWalker.java b/src/org/antlr/codegen/CodeGenTreeWalker.java
deleted file mode 100644
index 35be1e2..0000000
--- a/src/org/antlr/codegen/CodeGenTreeWalker.java
+++ /dev/null
@@ -1,3132 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "codegen.g" -> "CodeGenTreeWalker.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.codegen;
-    import org.antlr.tool.*;
-    import org.antlr.analysis.*;
-    import org.antlr.misc.*;
-	import java.util.*;
-	import org.antlr.stringtemplate.*;
-    import antlr.TokenWithIndex;
-    import antlr.CommonToken;
-
-import antlr.TreeParser;
-import antlr.Token;
-import antlr.collections.AST;
-import antlr.RecognitionException;
-import antlr.ANTLRException;
-import antlr.NoViableAltException;
-import antlr.MismatchedTokenException;
-import antlr.SemanticException;
-import antlr.collections.impl.BitSet;
-import antlr.ASTPair;
-import antlr.collections.impl.ASTArray;
-
-
-/** Walk a grammar and generate code by gradually building up
- *  a bigger and bigger StringTemplate.
- *
- *  Terence Parr
- *  University of San Francisco
- *  June 15, 2004
- */
-public class CodeGenTreeWalker extends antlr.TreeParser       implements CodeGenTreeWalkerTokenTypes
- {
-
-	protected static final int RULE_BLOCK_NESTING_LEVEL = 0;
-	protected static final int OUTER_REWRITE_NESTING_LEVEL = 0;
-
-    protected String currentRuleName = null;
-    protected int blockNestingLevel = 0;
-    protected int rewriteBlockNestingLevel = 0;
-	protected int outerAltNum = 0;
-    protected StringTemplate currentBlockST = null;
-    protected boolean currentAltHasASTRewrite = false;
-    protected int rewriteTreeNestingLevel = 0;
-    protected Set rewriteRuleRefs = null;
-
-    public void reportError(RecognitionException ex) {
-		Token token = null;
-		if ( ex instanceof MismatchedTokenException ) {
-			token = ((MismatchedTokenException)ex).token;
-		}
-		else if ( ex instanceof NoViableAltException ) {
-			token = ((NoViableAltException)ex).token;
-		}
-        ErrorManager.syntaxError(
-            ErrorManager.MSG_SYNTAX_ERROR,
-            grammar,
-            token,
-            "codegen: "+ex.toString(),
-            ex);
-    }
-
-    public void reportError(String s) {
-        System.out.println("codegen: error: " + s);
-    }
-
-    protected CodeGenerator generator;
-    protected Grammar grammar;
-    protected StringTemplateGroup templates;
-
-    /** The overall lexer/parser template; simulate dynamically scoped
-     *  attributes by making this an instance var of the walker.
-     */
-    protected StringTemplate recognizerST;
-
-    protected StringTemplate outputFileST;
-    protected StringTemplate headerFileST;
-
-    protected String outputOption = "";
-
-	protected StringTemplate getWildcardST(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
-		String name = "wildcard";
-		if ( grammar.type==Grammar.LEXER ) {
-			name = "wildcardChar";
-		}
-		return getTokenElementST(name, name, elementAST, ast_suffix, label);
-	}
-
-	protected StringTemplate getRuleElementST(String name,
-										      String elementName,
-											  GrammarAST elementAST,
-    										  GrammarAST ast_suffix,
-    										  String label)
-	{
-		String suffix = getSTSuffix(ast_suffix,label);
-		name += suffix;
-		// if we're building trees and there is no label, gen a label
-		// unless we're in a synpred rule.
-		Rule r = grammar.getRule(currentRuleName);
-		if ( (grammar.buildAST()||suffix.length()>0) && label==null &&
-		     (r==null || !r.isSynPred) )
-		{
-			// we will need a label to do the AST or tracking, make one
-			label = generator.createUniqueLabel(elementName);
-			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
-			grammar.defineRuleRefLabel(currentRuleName, labelTok, elementAST);
-		}
-		StringTemplate elementST = templates.getInstanceOf(name);
-		if ( label!=null ) {
-			elementST.setAttribute("label", label);
-		}
-		return elementST;
-	}
-
-	protected StringTemplate getTokenElementST(String name,
-											   String elementName,
-											   GrammarAST elementAST,
-											   GrammarAST ast_suffix,
-											   String label)
-	{
-		String suffix = getSTSuffix(ast_suffix,label);
-		name += suffix;
-		// if we're building trees and there is no label, gen a label
-		// unless we're in a synpred rule.
-		Rule r = grammar.getRule(currentRuleName);
-		if ( (grammar.buildAST()||suffix.length()>0) && label==null &&
-		     (r==null || !r.isSynPred) )
-		{
-			label = generator.createUniqueLabel(elementName);
-			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
-			grammar.defineTokenRefLabel(currentRuleName, labelTok, elementAST);
-		}
-		StringTemplate elementST = templates.getInstanceOf(name);
-		if ( label!=null ) {
-			elementST.setAttribute("label", label);
-		}
-		return elementST;
-	}
-
-    public boolean isListLabel(String label) {
-		boolean hasListLabel=false;
-		if ( label!=null ) {
-			Rule r = grammar.getRule(currentRuleName);
-			String stName = null;
-			if ( r!=null ) {
-				Grammar.LabelElementPair pair = r.getLabel(label);
-				if ( pair!=null &&
-					 (pair.type==Grammar.TOKEN_LIST_LABEL||
-					  pair.type==Grammar.RULE_LIST_LABEL) )
-				{
-					hasListLabel=true;
-				}
-			}
-		}
-        return hasListLabel;
-    }
-
-	/** Return a non-empty template name suffix if the token is to be
-	 *  tracked, added to a tree, or both.
-	 */
-	protected String getSTSuffix(GrammarAST ast_suffix, String label) {
-		if ( grammar.type==Grammar.LEXER ) {
-			return "";
-		}
-		// handle list label stuff; make element use "Track"
-
-		String astPart = "";
-		String operatorPart = "";
-		String rewritePart = "";
-		String listLabelPart = "";
-		if ( grammar.buildAST() ) {
-			astPart = "AST";
-		}
-		if ( ast_suffix!=null ) {
-			if ( ast_suffix.getType()==ANTLRParser.ROOT ) {
-    			operatorPart = "RuleRoot";
-    		}
-    		else if ( ast_suffix.getType()==ANTLRParser.BANG ) {
-    			operatorPart = "Bang";
-    		}
-   		}
-		if ( currentAltHasASTRewrite ) {
-			rewritePart = "Track";
-		}
-		if ( isListLabel(label) ) {
-			listLabelPart = "AndListLabel";
-		}
-		String STsuffix = operatorPart+rewritePart+listLabelPart;
-		//System.out.println("suffix = "+STsuffix);
-
-    	return STsuffix;
-	}
-
-    /** Convert rewrite AST lists to target labels list */
-    protected List<String> getTokenTypesAsTargetLabels(Set<GrammarAST> refs) {
-        if ( refs==null || refs.size()==0 ) {
-            return null;
-        }
-        List<String> labels = new ArrayList<String>(refs.size());
-        for (GrammarAST t : refs) {
-            String label;
-            if ( t.getType()==ANTLRParser.RULE_REF ) {
-                label = t.getText();
-            }
-            else if ( t.getType()==ANTLRParser.LABEL ) {
-                label = t.getText();
-            }
-            else {
-                // must be char or string literal
-                label = generator.getTokenTypeAsTargetLabel(
-                            grammar.getTokenType(t.getText()));
-            }
-            labels.add(label);
-        }
-        return labels;
-    }
-
-    protected void init(Grammar g) {
-        this.grammar = g;
-        this.generator = grammar.getCodeGenerator();
-        this.templates = generator.getTemplates();
-    }
-public CodeGenTreeWalker() {
-	tokenNames = _tokenNames;
-}
-
-	public final void grammar(AST _t,
-		Grammar g,
-        StringTemplate recognizerST,
-        StringTemplate outputFileST,
-        StringTemplate headerFileST
-	) throws RecognitionException {
-		
-		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		init(g);
-		this.recognizerST = recognizerST;
-		this.outputFileST = outputFileST;
-		this.headerFileST = headerFileST;
-		String superClass = (String)g.getOption("superClass");
-		outputOption = (String)g.getOption("output");
-		recognizerST.setAttribute("superClass", superClass);
-		if ( g.type!=Grammar.LEXER ) {
-				recognizerST.setAttribute("ASTLabelType", g.getOption("ASTLabelType"));
-			}
-		if ( g.type==Grammar.TREE_PARSER && g.getOption("ASTLabelType")==null ) {
-				ErrorManager.grammarWarning(ErrorManager.MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR,
-										   g,
-										   null,
-										   g.name);
-			}
-		if ( g.type!=Grammar.TREE_PARSER ) {
-				recognizerST.setAttribute("labelType", g.getOption("TokenLabelType"));
-			}
-			recognizerST.setAttribute("numRules", grammar.getRules().size());
-			outputFileST.setAttribute("numRules", grammar.getRules().size());
-			headerFileST.setAttribute("numRules", grammar.getRules().size());
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LEXER_GRAMMAR:
-			{
-				AST __t3 = _t;
-				GrammarAST tmp1_AST_in = (GrammarAST)_t;
-				match(_t,LEXER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t3;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PARSER_GRAMMAR:
-			{
-				AST __t4 = _t;
-				GrammarAST tmp2_AST_in = (GrammarAST)_t;
-				match(_t,PARSER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t4;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TREE_GRAMMAR:
-			{
-				AST __t5 = _t;
-				GrammarAST tmp3_AST_in = (GrammarAST)_t;
-				match(_t,TREE_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t5;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case COMBINED_GRAMMAR:
-			{
-				AST __t6 = _t;
-				GrammarAST tmp4_AST_in = (GrammarAST)_t;
-				match(_t,COMBINED_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t6;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void grammarSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST name = null;
-		GrammarAST cmt = null;
-		
-		try {      // for error handling
-			name = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case DOC_COMMENT:
-			{
-				cmt = (GrammarAST)_t;
-				match(_t,DOC_COMMENT);
-				_t = _t.getNextSibling();
-				
-						 outputFileST.setAttribute("docComment", cmt.getText());
-						 headerFileST.setAttribute("docComment", cmt.getText());
-						
-				break;
-			}
-			case OPTIONS:
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			
-					String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
-			String n = name.getText()+suffix;
-					recognizerST.setAttribute("name", n);
-					outputFileST.setAttribute("name", n);
-					headerFileST.setAttribute("name", n);
-					recognizerST.setAttribute("scopes", grammar.getGlobalScopes());
-					headerFileST.setAttribute("scopes", grammar.getGlobalScopes());
-					
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				AST __t12 = _t;
-				GrammarAST tmp5_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getFirstChild();
-				GrammarAST tmp6_AST_in = (GrammarAST)_t;
-				if ( _t==null ) throw new MismatchedTokenException();
-				_t = _t.getNextSibling();
-				_t = __t12;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKENS:
-			{
-				AST __t14 = _t;
-				GrammarAST tmp7_AST_in = (GrammarAST)_t;
-				match(_t,TOKENS);
-				_t = _t.getFirstChild();
-				GrammarAST tmp8_AST_in = (GrammarAST)_t;
-				if ( _t==null ) throw new MismatchedTokenException();
-				_t = _t.getNextSibling();
-				_t = __t14;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop16:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==SCOPE)) {
-					attrScope(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop16;
-				}
-				
-			} while (true);
-			}
-			{
-			_loop18:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp9_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop18;
-				}
-				
-			} while (true);
-			}
-			rules(_t,recognizerST);
-			_t = _retTree;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void attrScope(AST _t) throws RecognitionException {
-		
-		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t8 = _t;
-			GrammarAST tmp10_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			GrammarAST tmp11_AST_in = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			GrammarAST tmp12_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t8;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rules(AST _t,
-		StringTemplate recognizerST
-	) throws RecognitionException {
-		
-		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StringTemplate rST;
-		
-		
-		try {      // for error handling
-			{
-			int _cnt22=0;
-			_loop22:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==RULE)) {
-					{
-					
-								String ruleName = _t.getFirstChild().getText();
-								Rule r = grammar.getRule(ruleName);
-								
-					if (_t==null) _t=ASTNULL;
-					if (((_t.getType()==RULE))&&(!r.isSynPred || grammar.synPredNamesUsedInDFA.contains(ruleName))) {
-						rST=rule(_t);
-						_t = _retTree;
-						
-										if ( rST!=null ) {
-											recognizerST.setAttribute("rules", rST);
-											outputFileST.setAttribute("rules", rST);
-											headerFileST.setAttribute("rules", rST);
-										}
-										
-					}
-					else if ((_t.getType()==RULE)) {
-						GrammarAST tmp13_AST_in = (GrammarAST)_t;
-						match(_t,RULE);
-						_t = _t.getNextSibling();
-					}
-					else {
-						throw new NoViableAltException(_t);
-					}
-					
-					}
-				}
-				else {
-					if ( _cnt22>=1 ) { break _loop22; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt22++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StringTemplate  rule(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST mod = null;
-		
-		String r;
-		String initAction = null;
-		StringTemplate b;
-			// get the dfa for the BLOCK
-		GrammarAST block=rule_AST_in.getFirstChildWithType(BLOCK);
-		DFA dfa=block.getLookaheadDFA();
-			// init blockNestingLevel so it's block level RULE_BLOCK_NESTING_LEVEL
-			// for alts of rule
-			blockNestingLevel = RULE_BLOCK_NESTING_LEVEL-1;
-			Rule ruleDescr = grammar.getRule(rule_AST_in.getFirstChild().getText());
-		
-			// For syn preds, we don't want any AST code etc... in there.
-			// Save old templates ptr and restore later.  Base templates include Dbg.
-			StringTemplateGroup saveGroup = templates;
-			if ( ruleDescr.isSynPred ) {
-				templates = generator.getBaseTemplates();
-			}
-		
-		
-		try {      // for error handling
-			AST __t24 = _t;
-			GrammarAST tmp14_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			r=id.getText(); currentRuleName = r;
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				mod = _t==ASTNULL ? null : (GrammarAST)_t;
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			AST __t26 = _t;
-			GrammarAST tmp15_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				GrammarAST tmp16_AST_in = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t26;
-			_t = _t.getNextSibling();
-			AST __t28 = _t;
-			GrammarAST tmp17_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				GrammarAST tmp18_AST_in = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t28;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				AST __t31 = _t;
-				GrammarAST tmp19_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getFirstChild();
-				GrammarAST tmp20_AST_in = (GrammarAST)_t;
-				if ( _t==null ) throw new MismatchedTokenException();
-				_t = _t.getNextSibling();
-				_t = __t31;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop34:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp21_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop34;
-				}
-				
-			} while (true);
-			}
-			b=block(_t,"ruleBlock", dfa);
-			_t = _retTree;
-			
-						String description =
-							grammar.grammarTreeToString(rule_AST_in.getFirstChildWithType(BLOCK),
-			false);
-						description =
-			generator.target.getTargetStringLiteralFromString(description);
-						b.setAttribute("description", description);
-						// do not generate lexer rules in combined grammar
-						String stName = null;
-						if ( ruleDescr.isSynPred ) {
-							stName = "synpredRule";
-						}
-						else if ( grammar.type==Grammar.LEXER ) {
-							if ( r.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
-								stName = "tokensRule";
-							}
-							else {
-								stName = "lexerRule";
-							}
-						}
-						else {
-							if ( !(grammar.type==Grammar.COMBINED &&
-								 Character.isUpperCase(r.charAt(0))) )
-							{
-								stName = "rule";
-							}
-						}
-						code = templates.getInstanceOf(stName);
-						if ( code.getName().equals("rule") ) {
-							code.setAttribute("emptyRule",
-								Boolean.valueOf(grammar.isEmptyRule(block)));
-						}
-						code.setAttribute("ruleDescriptor", ruleDescr);
-						String memo = (String)rule_AST_in.getOption("memoize");
-						if ( memo==null ) {
-							memo = (String)grammar.getOption("memoize");
-						}
-						if ( memo!=null && memo.equals("true") &&
-						     (stName.equals("rule")||stName.equals("lexerRule")) )
-						{
-				code.setAttribute("memoize",
-					Boolean.valueOf(memo!=null && memo.equals("true")));
-			}
-						
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t,code);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp22_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			_t = __t24;
-			_t = _t.getNextSibling();
-			
-			if ( code!=null ) {
-						if ( grammar.type==Grammar.LEXER ) {
-					    	boolean naked =
-					    		r.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
-					    	    (mod!=null&&mod.getText().equals(Grammar.FRAGMENT_RULE_MODIFIER));
-					    	code.setAttribute("nakedBlock", Boolean.valueOf(naked));
-						}
-						else {
-							description =
-								grammar.grammarTreeToString(rule_AST_in,false);
-							description =
-							    generator.target.getTargetStringLiteralFromString(description);
-							code.setAttribute("description", description);
-						}
-						Rule theRule = grammar.getRule(r);
-						generator.translateActionAttributeReferencesForSingleScope(
-							theRule,
-							theRule.getActions()
-						);
-						code.setAttribute("ruleName", r);
-						code.setAttribute("block", b);
-						if ( initAction!=null ) {
-							code.setAttribute("initAction", initAction);
-						}
-			}
-					templates = saveGroup;
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final void modifier(AST _t) throws RecognitionException {
-		
-		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_protected:
-			{
-				GrammarAST tmp23_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_protected);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_public:
-			{
-				GrammarAST tmp24_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_public);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_private:
-			{
-				GrammarAST tmp25_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_private);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case FRAGMENT:
-			{
-				GrammarAST tmp26_AST_in = (GrammarAST)_t;
-				match(_t,FRAGMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ruleScopeSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t38 = _t;
-			GrammarAST tmp27_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ACTION:
-			{
-				GrammarAST tmp28_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			case ID:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop41:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ID)) {
-					GrammarAST tmp29_AST_in = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop41;
-				}
-				
-			} while (true);
-			}
-			_t = __t38;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StringTemplate  block(AST _t,
-		String blockTemplateName, DFA dfa
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StringTemplate decision = null;
-		if ( dfa!=null ) {
-		code = templates.getInstanceOf(blockTemplateName);
-		decision = generator.genLookaheadDecision(recognizerST,dfa);
-		code.setAttribute("decision", decision);
-		code.setAttribute("decisionNumber", dfa.getDecisionNumber());
-				code.setAttribute("maxK",dfa.getMaxLookaheadDepth());
-				code.setAttribute("maxAlt",dfa.getNumberOfAlts());
-		}
-		else {
-		code = templates.getInstanceOf(blockTemplateName+"SingleAlt");
-		}
-		blockNestingLevel++;
-		code.setAttribute("blockLevel", blockNestingLevel);
-		code.setAttribute("enclosingBlockLevel", blockNestingLevel-1);
-		StringTemplate alt = null;
-		StringTemplate rew = null;
-		StringTemplate sb = null;
-		GrammarAST r = null;
-		int altNum = 1;
-			if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
-		this.outerAltNum=1;
-		}
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			if (((_t.getType()==BLOCK))&&(block_AST_in.getSetValue()!=null)) {
-				sb=setBlock(_t);
-				_t = _retTree;
-				
-				code.setAttribute("alts",sb);
-				blockNestingLevel--;
-				
-			}
-			else if ((_t.getType()==BLOCK)) {
-				AST __t43 = _t;
-				GrammarAST tmp30_AST_in = (GrammarAST)_t;
-				match(_t,BLOCK);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case OPTIONS:
-				{
-					GrammarAST tmp31_AST_in = (GrammarAST)_t;
-					match(_t,OPTIONS);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case ALT:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				{
-				int _cnt46=0;
-				_loop46:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==ALT)) {
-						alt=alternative(_t);
-						_t = _retTree;
-						r=(GrammarAST)_t;
-						rew=rewrite(_t);
-						_t = _retTree;
-						
-						if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
-							this.outerAltNum++;
-						}
-						// add the rewrite code as just another element in the alt :)
-								  if ( rew!=null ) {
-								  	alt.setAttribute("elements.{el,line,pos}",
-								  		rew, Utils.integer(r.getLine()), Utils.integer(r.getColumn()));
-								  }
-								  // add this alt to the list of alts for this block
-						code.setAttribute("alts",alt);
-						alt.setAttribute("altNum", Utils.integer(altNum));
-						alt.setAttribute("outerAlt",
-						Boolean.valueOf(blockNestingLevel==RULE_BLOCK_NESTING_LEVEL));
-						altNum++;
-						
-					}
-					else {
-						if ( _cnt46>=1 ) { break _loop46; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt46++;
-				} while (true);
-				}
-				GrammarAST tmp32_AST_in = (GrammarAST)_t;
-				match(_t,EOB);
-				_t = _t.getNextSibling();
-				_t = __t43;
-				_t = _t.getNextSibling();
-				blockNestingLevel--;
-			}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final void exceptionGroup(AST _t,
-		StringTemplate ruleST
-	) throws RecognitionException {
-		
-		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			{
-				{
-				int _cnt50=0;
-				_loop50:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==LITERAL_catch)) {
-						exceptionHandler(_t,ruleST);
-						_t = _retTree;
-					}
-					else {
-						if ( _cnt50>=1 ) { break _loop50; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt50++;
-				} while (true);
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case LITERAL_finally:
-				{
-					finallyClause(_t,ruleST);
-					_t = _retTree;
-					break;
-				}
-				case EOR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				break;
-			}
-			case LITERAL_finally:
-			{
-				finallyClause(_t,ruleST);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StringTemplate  setBlock(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST setBlock_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST s = null;
-		
-		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
-		Rule r = grammar.getRule(currentRuleName);
-		currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
-		if ( currentAltHasASTRewrite ) {
-		r.trackTokenReferenceInAlt(setBlock_AST_in, outerAltNum);
-		}
-		}
-		
-		
-		try {      // for error handling
-			s = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getNextSibling();
-			
-			StringTemplate setcode =
-			getTokenElementST("matchSet", "set", s, null, null);
-			int i = ((TokenWithIndex)s.getToken()).getIndex();
-					setcode.setAttribute("elementIndex", i);
-					if ( grammar.type!=Grammar.LEXER ) {
-						generator.generateLocalFOLLOW(s,"set",currentRuleName,i);
-			}
-			setcode.setAttribute("s",
-			generator.genSetExpr(templates,s.getSetValue(),1,false));
-			StringTemplate altcode=templates.getInstanceOf("alt");
-					altcode.setAttribute("elements.{el,line,pos}",
-									     setcode,
-			Utils.integer(s.getLine()),
-			Utils.integer(s.getColumn())
-			);
-			altcode.setAttribute("altNum", Utils.integer(1));
-			altcode.setAttribute("outerAlt",
-			Boolean.valueOf(blockNestingLevel==RULE_BLOCK_NESTING_LEVEL));
-			if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
-			altcode.setAttribute("autoAST", Boolean.valueOf(true));
-			}
-			code = altcode;
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  alternative(AST _t) throws RecognitionException {
-		StringTemplate code=templates.getInstanceOf("alt");
-		
-		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST a = null;
-		
-		/*
-		// TODO: can we use Rule.altsWithRewrites???
-		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
-			GrammarAST aRewriteNode = #alternative.findFirstType(REWRITE);
-			if ( grammar.buildAST() &&
-				 (aRewriteNode!=null||
-				 (#alternative.getNextSibling()!=null &&
-				  #alternative.getNextSibling().getType()==REWRITE)) )
-			{
-				currentAltHasASTRewrite = true;
-			}
-			else {
-				currentAltHasASTRewrite = false;
-			}
-		}
-		*/
-		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
-		Rule r = grammar.getRule(currentRuleName);
-		currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
-		}
-		String description = grammar.grammarTreeToString(alternative_AST_in, false);
-		description = generator.target.getTargetStringLiteralFromString(description);
-		code.setAttribute("description", description);
-		if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
-			code.setAttribute("autoAST", Boolean.valueOf(true));
-		}
-		StringTemplate e;
-		
-		
-		try {      // for error handling
-			AST __t57 = _t;
-			a = _t==ASTNULL ? null :(GrammarAST)_t;
-			match(_t,ALT);
-			_t = _t.getFirstChild();
-			{
-			int _cnt59=0;
-			_loop59:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.getType()==ROOT||_t.getType()==RULE_REF||_t.g [...]
-					GrammarAST elAST=(GrammarAST)_t;
-					e=element(_t,null,null);
-					_t = _retTree;
-					
-								if ( e!=null ) {
-										code.setAttribute("elements.{el,line,pos}",
-														  e,
-														  Utils.integer(elAST.getLine()),
-														  Utils.integer(elAST.getColumn())
-														 );
-								}
-								
-				}
-				else {
-					if ( _cnt59>=1 ) { break _loop59; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt59++;
-			} while (true);
-			}
-			GrammarAST tmp33_AST_in = (GrammarAST)_t;
-			match(_t,EOA);
-			_t = _t.getNextSibling();
-			_t = __t57;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST r = null;
-		GrammarAST pred = null;
-		
-		StringTemplate alt;
-		if ( rewrite_AST_in.getType()==REWRITE ) {
-			if ( generator.grammar.buildTemplate() ) {
-				code = templates.getInstanceOf("rewriteTemplate");
-			}
-			else {
-				code = templates.getInstanceOf("rewriteCode");
-				code.setAttribute("treeLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
-				code.setAttribute("rewriteBlockLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
-		code.setAttribute("referencedElementsDeep",
-		getTokenTypesAsTargetLabels(rewrite_AST_in.rewriteRefsDeep));
-		Set<String> tokenLabels =
-		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.TOKEN_LABEL);
-		Set<String> tokenListLabels =
-		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.TOKEN_LIST_LABEL);
-		Set<String> ruleLabels =
-		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.RULE_LABEL);
-		Set<String> ruleListLabels =
-		grammar.getLabels(rewrite_AST_in.rewriteRefsDeep, Grammar.RULE_LIST_LABEL);
-		// just in case they ref $r for "previous value", make a stream
-		// from retval.tree
-		StringTemplate retvalST = templates.getInstanceOf("prevRuleRootRef");
-		ruleLabels.add(retvalST.toString());
-		code.setAttribute("referencedTokenLabels", tokenLabels);
-		code.setAttribute("referencedTokenListLabels", tokenListLabels);
-		code.setAttribute("referencedRuleLabels", ruleLabels);
-		code.setAttribute("referencedRuleListLabels", ruleListLabels);
-			}
-		}
-		
-		
-		try {      // for error handling
-			{
-			_loop95:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==REWRITE)) {
-					rewriteRuleRefs = new HashSet();
-					AST __t93 = _t;
-					r = _t==ASTNULL ? null :(GrammarAST)_t;
-					match(_t,REWRITE);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case SEMPRED:
-					{
-						pred = (GrammarAST)_t;
-						match(_t,SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case ALT:
-					case TEMPLATE:
-					case ACTION:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					alt=rewrite_alternative(_t);
-					_t = _retTree;
-					_t = __t93;
-					_t = _t.getNextSibling();
-					
-					rewriteBlockNestingLevel = OUTER_REWRITE_NESTING_LEVEL;
-								List predChunks = null;
-								if ( pred!=null ) {
-									//predText = #pred.getText();
-							predChunks = generator.translateAction(currentRuleName,pred);
-								}
-								String description =
-								    grammar.grammarTreeToString(r,false);
-								description = generator.target.getTargetStringLiteralFromString(description);
-								code.setAttribute("alts.{pred,alt,description}",
-												  predChunks,
-												  alt,
-												  description);
-								pred=null;
-								
-				}
-				else {
-					break _loop95;
-				}
-				
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final void exceptionHandler(AST _t,
-		StringTemplate ruleST
-	) throws RecognitionException {
-		
-		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t53 = _t;
-			GrammarAST tmp34_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_catch);
-			_t = _t.getFirstChild();
-			GrammarAST tmp35_AST_in = (GrammarAST)_t;
-			match(_t,ARG_ACTION);
-			_t = _t.getNextSibling();
-			GrammarAST tmp36_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t53;
-			_t = _t.getNextSibling();
-			
-				List chunks = generator.translateAction(currentRuleName,tmp36_AST_in);
-				ruleST.setAttribute("exceptions.{decl,action}",tmp35_AST_in.getText(),chunks);
-				
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void finallyClause(AST _t,
-		StringTemplate ruleST
-	) throws RecognitionException {
-		
-		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t55 = _t;
-			GrammarAST tmp37_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_finally);
-			_t = _t.getFirstChild();
-			GrammarAST tmp38_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t55;
-			_t = _t.getNextSibling();
-			
-				List chunks = generator.translateAction(currentRuleName,tmp38_AST_in);
-				ruleST.setAttribute("finally",chunks);
-				
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StringTemplate  element(AST _t,
-		GrammarAST label, GrammarAST astSuffix
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST n = null;
-		GrammarAST alabel = null;
-		GrammarAST label2 = null;
-		GrammarAST a = null;
-		GrammarAST b = null;
-		GrammarAST sp = null;
-		GrammarAST gsp = null;
-		
-		IntSet elements=null;
-		GrammarAST ast = null;
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				AST __t61 = _t;
-				GrammarAST tmp39_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getFirstChild();
-				code=element(_t,label,tmp39_AST_in);
-				_t = _retTree;
-				_t = __t61;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				AST __t62 = _t;
-				GrammarAST tmp40_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getFirstChild();
-				code=element(_t,label,tmp40_AST_in);
-				_t = _retTree;
-				_t = __t62;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case NOT:
-			{
-				AST __t63 = _t;
-				n = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				code=notElement(_t,n, label, astSuffix);
-				_t = _retTree;
-				_t = __t63;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t64 = _t;
-				GrammarAST tmp41_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				alabel = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				code=element(_t,alabel,astSuffix);
-				_t = _retTree;
-				_t = __t64;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PLUS_ASSIGN:
-			{
-				AST __t65 = _t;
-				GrammarAST tmp42_AST_in = (GrammarAST)_t;
-				match(_t,PLUS_ASSIGN);
-				_t = _t.getFirstChild();
-				label2 = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				code=element(_t,label2,astSuffix);
-				_t = _retTree;
-				_t = __t65;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t66 = _t;
-				GrammarAST tmp43_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				a = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				b = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t66;
-				_t = _t.getNextSibling();
-				code = templates.getInstanceOf("charRangeRef");
-						 String low =
-						 	generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,a.getText());
-						 String high =
-						 	generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,b.getText());
-				code.setAttribute("a", low);
-				code.setAttribute("b", high);
-				if ( label!=null ) {
-				code.setAttribute("label", label.getText());
-				}
-				
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				code=tree(_t);
-				_t = _retTree;
-				break;
-			}
-			case ACTION:
-			{
-				code=element_action(_t);
-				_t = _retTree;
-				break;
-			}
-			case GATED_SEMPRED:
-			case SEMPRED:
-			{
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case SEMPRED:
-				{
-					sp = (GrammarAST)_t;
-					match(_t,SEMPRED);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case GATED_SEMPRED:
-				{
-					gsp = (GrammarAST)_t;
-					match(_t,GATED_SEMPRED);
-					_t = _t.getNextSibling();
-					sp=gsp;
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				
-				code = templates.getInstanceOf("validateSemanticPredicate");
-				code.setAttribute("pred", generator.translateAction(currentRuleName,sp));
-						String description =
-							generator.target.getTargetStringLiteralFromString(sp.getText());
-						code.setAttribute("description", description);
-				
-				break;
-			}
-			case SYN_SEMPRED:
-			{
-				GrammarAST tmp44_AST_in = (GrammarAST)_t;
-				match(_t,SYN_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BACKTRACK_SEMPRED:
-			{
-				GrammarAST tmp45_AST_in = (GrammarAST)_t;
-				match(_t,BACKTRACK_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case EPSILON:
-			{
-				GrammarAST tmp46_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-				if (_t==null) _t=ASTNULL;
-				if ((((_t.getType() >= BLOCK && _t.getType() <= POSITIVE_CLOSURE)))&&(element_AST_in.getSetValue()==null)) {
-					code=ebnf(_t);
-					_t = _retTree;
-				}
-				else if ((_t.getType()==BLOCK||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==WILDCARD)) {
-					code=atom(_t,label, astSuffix);
-					_t = _retTree;
-				}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  notElement(AST _t,
-		GrammarAST n, GrammarAST label, GrammarAST astSuffix
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST notElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST assign_c = null;
-		GrammarAST assign_s = null;
-		GrammarAST assign_t = null;
-		GrammarAST assign_st = null;
-		
-		IntSet elements=null;
-		String labelText = null;
-		if ( label!=null ) {
-		labelText = label.getText();
-		}
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case CHAR_LITERAL:
-			{
-				assign_c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				
-				int ttype=0;
-				if ( grammar.type==Grammar.LEXER ) {
-				ttype = Grammar.getCharValueFromGrammarCharLiteral(assign_c.getText());
-				}
-				else {
-				ttype = grammar.getTokenType(assign_c.getText());
-				}
-				elements = grammar.complement(ttype);
-				
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				assign_s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				
-				int ttype=0;
-				if ( grammar.type==Grammar.LEXER ) {
-				// TODO: error!
-				}
-				else {
-				ttype = grammar.getTokenType(assign_s.getText());
-				}
-				elements = grammar.complement(ttype);
-				
-				break;
-			}
-			case TOKEN_REF:
-			{
-				assign_t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				
-				int ttype = grammar.getTokenType(assign_t.getText());
-				elements = grammar.complement(ttype);
-				
-				break;
-			}
-			case BLOCK:
-			{
-				assign_st = (GrammarAST)_t;
-				match(_t,BLOCK);
-				_t = _t.getNextSibling();
-				
-				elements = assign_st.getSetValue();
-				elements = grammar.complement(elements);
-				
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			
-			code = getTokenElementST("matchSet",
-			"set",
-			(GrammarAST)n.getFirstChild(),
-			astSuffix,
-			labelText);
-			code.setAttribute("s",generator.genSetExpr(templates,elements,1,false));
-			int i = ((TokenWithIndex)n.getToken()).getIndex();
-			code.setAttribute("elementIndex", i);
-			if ( grammar.type!=Grammar.LEXER ) {
-			generator.generateLocalFOLLOW(n,"set",currentRuleName,i);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  ebnf(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		DFA dfa=null;
-		GrammarAST b = (GrammarAST)ebnf_AST_in.getFirstChild();
-		GrammarAST eob = (GrammarAST)b.getLastChild(); // loops will use EOB DFA
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case BLOCK:
-			{
-				dfa = ebnf_AST_in.getLookaheadDFA();
-				code=block(_t,"block", dfa);
-				_t = _retTree;
-				break;
-			}
-			case OPTIONAL:
-			{
-				dfa = ebnf_AST_in.getLookaheadDFA();
-				AST __t73 = _t;
-				GrammarAST tmp47_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				code=block(_t,"optionalBlock", dfa);
-				_t = _retTree;
-				_t = __t73;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CLOSURE:
-			{
-				dfa = eob.getLookaheadDFA();
-				AST __t74 = _t;
-				GrammarAST tmp48_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				code=block(_t,"closureBlock", dfa);
-				_t = _retTree;
-				_t = __t74;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				dfa = eob.getLookaheadDFA();
-				AST __t75 = _t;
-				GrammarAST tmp49_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				code=block(_t,"positiveClosureBlock", dfa);
-				_t = _retTree;
-				_t = __t75;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			
-					String description = grammar.grammarTreeToString(ebnf_AST_in, false);
-					description = generator.target.getTargetStringLiteralFromString(description);
-				code.setAttribute("description", description);
-				
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  atom(AST _t,
-		GrammarAST label, GrammarAST astSuffix
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST r = null;
-		GrammarAST rarg = null;
-		GrammarAST t = null;
-		GrammarAST targ = null;
-		GrammarAST c = null;
-		GrammarAST s = null;
-		GrammarAST w = null;
-		
-		String labelText=null;
-		if ( label!=null ) {
-		labelText = label.getText();
-		}
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case RULE_REF:
-			{
-				AST __t83 = _t;
-				r = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,RULE_REF);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ARG_ACTION:
-				{
-					rarg = (GrammarAST)_t;
-					match(_t,ARG_ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t83;
-				_t = _t.getNextSibling();
-				
-				grammar.checkRuleReference(r, rarg, currentRuleName);
-				Rule rdef = grammar.getRule(r.getText());
-				// don't insert label=r() if $label.attr not used, no ret value, ...
-				if ( !rdef.getHasReturnValue() ) {
-				labelText = null;
-				}
-				code = getRuleElementST("ruleRef", r.getText(), r, astSuffix, labelText);
-						code.setAttribute("rule", r.getText());
-				
-						if ( rarg!=null ) {
-							List args = generator.translateAction(currentRuleName,rarg);
-							code.setAttribute("args", args);
-						}
-				int i = ((TokenWithIndex)r.getToken()).getIndex();
-						code.setAttribute("elementIndex", i);
-						generator.generateLocalFOLLOW(r,r.getText(),currentRuleName,i);
-						r.code = code;
-				
-				break;
-			}
-			case TOKEN_REF:
-			{
-				AST __t85 = _t;
-				t = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ARG_ACTION:
-				{
-					targ = (GrammarAST)_t;
-					match(_t,ARG_ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t85;
-				_t = _t.getNextSibling();
-				
-				grammar.checkRuleReference(t, targ, currentRuleName);
-						   if ( grammar.type==Grammar.LEXER ) {
-								if ( grammar.getTokenType(t.getText())==Label.EOF ) {
-									code = templates.getInstanceOf("lexerMatchEOF");
-								}
-							    else {
-									code = templates.getInstanceOf("lexerRuleRef");
-				if ( isListLabel(labelText) ) {
-				code = templates.getInstanceOf("lexerRuleRefAndListLabel");
-				}
-									code.setAttribute("rule", t.getText());
-									if ( targ!=null ) {
-										List args = generator.translateAction(currentRuleName,targ);
-										code.setAttribute("args", args);
-									}
-								}
-				int i = ((TokenWithIndex)t.getToken()).getIndex();
-							    code.setAttribute("elementIndex", i);
-							    if ( label!=null ) code.setAttribute("label", labelText);
-						   }
-						   else {
-								code = getTokenElementST("tokenRef", t.getText(), t, astSuffix, labelText);
-								String tokenLabel =
-								   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
-								code.setAttribute("token",tokenLabel);
-				int i = ((TokenWithIndex)t.getToken()).getIndex();
-							    code.setAttribute("elementIndex", i);
-							    generator.generateLocalFOLLOW(t,tokenLabel,currentRuleName,i);
-						   }
-						   t.code = code;
-						
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				
-						if ( grammar.type==Grammar.LEXER ) {
-							code = templates.getInstanceOf("charRef");
-							code.setAttribute("char",
-							   generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,c.getText()));
-							if ( label!=null ) {
-								code.setAttribute("label", labelText);
-							}
-						}
-						else { // else it's a token type reference
-							code = getTokenElementST("tokenRef", "char_literal", c, astSuffix, labelText);
-							String tokenLabel = generator.getTokenTypeAsTargetLabel(grammar.getTokenType(c.getText()));
-							code.setAttribute("token",tokenLabel);
-				int i = ((TokenWithIndex)c.getToken()).getIndex();
-							code.setAttribute("elementIndex", i);
-							generator.generateLocalFOLLOW(c,tokenLabel,currentRuleName,i);
-						}
-				
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				
-						if ( grammar.type==Grammar.LEXER ) {
-							code = templates.getInstanceOf("lexerStringRef");
-							code.setAttribute("string",
-							   generator.target.getTargetStringLiteralFromANTLRStringLiteral(generator,s.getText()));
-							if ( label!=null ) {
-								code.setAttribute("label", labelText);
-							}
-						}
-						else { // else it's a token type reference
-							code = getTokenElementST("tokenRef", "string_literal", s, astSuffix, labelText);
-							String tokenLabel =
-							   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(s.getText()));
-							code.setAttribute("token",tokenLabel);
-				int i = ((TokenWithIndex)s.getToken()).getIndex();
-							code.setAttribute("elementIndex", i);
-							generator.generateLocalFOLLOW(s,tokenLabel,currentRuleName,i);
-						}
-						
-				break;
-			}
-			case WILDCARD:
-			{
-				w = (GrammarAST)_t;
-				match(_t,WILDCARD);
-				_t = _t.getNextSibling();
-				
-						code = getWildcardST(w,astSuffix,labelText);
-						code.setAttribute("elementIndex", ((TokenWithIndex)w.getToken()).getIndex());
-						
-				break;
-			}
-			case BLOCK:
-			{
-				code=set(_t,label,astSuffix);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  tree(AST _t) throws RecognitionException {
-		StringTemplate code=templates.getInstanceOf("tree");
-		
-		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StringTemplate el=null, act=null;
-		GrammarAST elAST=null, actAST=null;
-		NFAState afterDOWN = (NFAState)tree_AST_in.NFATreeDownState.transition(0).target;
-		LookaheadSet s = grammar.LOOK(afterDOWN);
-		if ( s.member(Label.UP) ) {
-			// nullable child list if we can see the UP as the next token
-			// we need an "if ( input.LA(1)==Token.DOWN )" gate around
-			// the child list.
-			code.setAttribute("nullableChildList", "true");
-		}
-		
-		
-		try {      // for error handling
-			AST __t77 = _t;
-			GrammarAST tmp50_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			elAST=(GrammarAST)_t;
-			el=element(_t,null,null);
-			_t = _retTree;
-			
-			code.setAttribute("root.{el,line,pos}",
-										  el,
-										  Utils.integer(elAST.getLine()),
-										  Utils.integer(elAST.getColumn())
-										  );
-			
-			{
-			_loop79:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ACTION)) {
-					actAST=(GrammarAST)_t;
-					act=element_action(_t);
-					_t = _retTree;
-					
-					code.setAttribute("actionsAfterRoot.{el,line,pos}",
-					act,
-					Utils.integer(actAST.getLine()),
-					Utils.integer(actAST.getColumn())
-					);
-					
-				}
-				else {
-					break _loop79;
-				}
-				
-			} while (true);
-			}
-			{
-			_loop81:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.getType()==ROOT||_t.getType()==RULE_REF||_t.g [...]
-					elAST=(GrammarAST)_t;
-					el=element(_t,null,null);
-					_t = _retTree;
-					
-								 code.setAttribute("children.{el,line,pos}",
-												  el,
-												  Utils.integer(elAST.getLine()),
-												  Utils.integer(elAST.getColumn())
-												  );
-								
-				}
-				else {
-					break _loop81;
-				}
-				
-			} while (true);
-			}
-			_t = __t77;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  element_action(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST element_action_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST act = null;
-		
-		try {      // for error handling
-			act = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			
-			code = templates.getInstanceOf("execAction");
-			code.setAttribute("action", generator.translateAction(currentRuleName,act));
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  set(AST _t,
-		GrammarAST label, GrammarAST astSuffix
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST set_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST s = null;
-		
-		String labelText=null;
-		if ( label!=null ) {
-		labelText = label.getText();
-		}
-		
-		
-		try {      // for error handling
-			s = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getNextSibling();
-			
-			code = getTokenElementST("matchSet", "set", s, astSuffix, labelText);
-			int i = ((TokenWithIndex)s.getToken()).getIndex();
-					code.setAttribute("elementIndex", i);
-					if ( grammar.type!=Grammar.LEXER ) {
-						generator.generateLocalFOLLOW(s,"set",currentRuleName,i);
-			}
-			code.setAttribute("s", generator.genSetExpr(templates,s.getSetValue(),1,false));
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final void ast_suffix(AST _t) throws RecognitionException {
-		
-		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				GrammarAST tmp51_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				GrammarAST tmp52_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void setElement(AST _t) throws RecognitionException {
-		
-		GrammarAST setElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST c = null;
-		GrammarAST t = null;
-		GrammarAST s = null;
-		GrammarAST c1 = null;
-		GrammarAST c2 = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TOKEN_REF:
-			{
-				t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t90 = _t;
-				GrammarAST tmp53_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				c1 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				c2 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t90;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StringTemplate  rewrite_alternative(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST a = null;
-		
-		StringTemplate el,st;
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			if (((_t.getType()==ALT))&&(generator.grammar.buildAST())) {
-				AST __t99 = _t;
-				a = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,ALT);
-				_t = _t.getFirstChild();
-				code=templates.getInstanceOf("rewriteElementList");
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case OPTIONAL:
-				case CLOSURE:
-				case POSITIVE_CLOSURE:
-				case LABEL:
-				case ACTION:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case TOKEN_REF:
-				case RULE_REF:
-				case TREE_BEGIN:
-				{
-					{
-					int _cnt102=0;
-					_loop102:
-					do {
-						if (_t==null) _t=ASTNULL;
-						if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
-							GrammarAST elAST=(GrammarAST)_t;
-							el=rewrite_element(_t);
-							_t = _retTree;
-							code.setAttribute("elements.{el,line,pos}",
-												 					el,
-														  		Utils.integer(elAST.getLine()),
-														  		Utils.integer(elAST.getColumn())
-												 					);
-												
-						}
-						else {
-							if ( _cnt102>=1 ) { break _loop102; } else {throw new NoViableAltException(_t);}
-						}
-						
-						_cnt102++;
-					} while (true);
-					}
-					break;
-				}
-				case EPSILON:
-				{
-					GrammarAST tmp54_AST_in = (GrammarAST)_t;
-					match(_t,EPSILON);
-					_t = _t.getNextSibling();
-					code.setAttribute("elements.{el,line,pos}",
-												   templates.getInstanceOf("rewriteEmptyAlt"),
-												   Utils.integer(a.getLine()),
-												   Utils.integer(a.getColumn())
-										 			   );
-									
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				GrammarAST tmp55_AST_in = (GrammarAST)_t;
-				match(_t,EOA);
-				_t = _t.getNextSibling();
-				_t = __t99;
-				_t = _t.getNextSibling();
-			}
-			else if (((_t.getType()==ALT||_t.getType()==TEMPLATE||_t.getType()==ACTION))&&(generator.grammar.buildTemplate())) {
-				code=rewrite_template(_t);
-				_t = _retTree;
-			}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite_block(AST _t,
-		String blockTemplateName
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		rewriteBlockNestingLevel++;
-		code = templates.getInstanceOf(blockTemplateName);
-		StringTemplate save_currentBlockST = currentBlockST;
-		currentBlockST = code;
-		code.setAttribute("rewriteBlockLevel", rewriteBlockNestingLevel);
-		StringTemplate alt=null;
-		
-		
-		try {      // for error handling
-			AST __t97 = _t;
-			GrammarAST tmp56_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			
-			currentBlockST.setAttribute("referencedElementsDeep",
-			getTokenTypesAsTargetLabels(tmp56_AST_in.rewriteRefsDeep));
-			currentBlockST.setAttribute("referencedElements",
-			getTokenTypesAsTargetLabels(tmp56_AST_in.rewriteRefsShallow));
-			
-			alt=rewrite_alternative(_t);
-			_t = _retTree;
-			GrammarAST tmp57_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t97;
-			_t = _t.getNextSibling();
-			
-				code.setAttribute("alt", alt);
-				rewriteBlockNestingLevel--;
-				currentBlockST = save_currentBlockST;
-				
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite_element(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		IntSet elements=null;
-		GrammarAST ast = null;
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LABEL:
-			case ACTION:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			{
-				code=rewrite_atom(_t,false);
-				_t = _retTree;
-				break;
-			}
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			{
-				code=rewrite_ebnf(_t);
-				_t = _retTree;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				code=rewrite_tree(_t);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite_template(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_template_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST ind = null;
-		GrammarAST arg = null;
-		GrammarAST a = null;
-		GrammarAST act = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ALT:
-			{
-				AST __t117 = _t;
-				GrammarAST tmp58_AST_in = (GrammarAST)_t;
-				match(_t,ALT);
-				_t = _t.getFirstChild();
-				GrammarAST tmp59_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				GrammarAST tmp60_AST_in = (GrammarAST)_t;
-				match(_t,EOA);
-				_t = _t.getNextSibling();
-				_t = __t117;
-				_t = _t.getNextSibling();
-				code=templates.getInstanceOf("rewriteEmptyTemplate");
-				break;
-			}
-			case TEMPLATE:
-			{
-				AST __t118 = _t;
-				GrammarAST tmp61_AST_in = (GrammarAST)_t;
-				match(_t,TEMPLATE);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ID:
-				{
-					id = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case ACTION:
-				{
-					ind = (GrammarAST)_t;
-					match(_t,ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				
-						   if ( id!=null && id.getText().equals("template") ) {
-						   		code = templates.getInstanceOf("rewriteInlineTemplate");
-						   }
-						   else if ( id!=null ) {
-						   		code = templates.getInstanceOf("rewriteExternalTemplate");
-						   		code.setAttribute("name", id.getText());
-						   }
-						   else if ( ind!=null ) { // must be %({expr})(args)
-						   		code = templates.getInstanceOf("rewriteIndirectTemplate");
-								List chunks=generator.translateAction(currentRuleName,ind);
-						   		code.setAttribute("expr", chunks);
-						   }
-						
-				AST __t120 = _t;
-				GrammarAST tmp62_AST_in = (GrammarAST)_t;
-				match(_t,ARGLIST);
-				_t = _t.getFirstChild();
-				{
-				_loop123:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==ARG)) {
-						AST __t122 = _t;
-						GrammarAST tmp63_AST_in = (GrammarAST)_t;
-						match(_t,ARG);
-						_t = _t.getFirstChild();
-						arg = (GrammarAST)_t;
-						match(_t,ID);
-						_t = _t.getNextSibling();
-						a = (GrammarAST)_t;
-						match(_t,ACTION);
-						_t = _t.getNextSibling();
-						
-						// must set alt num here rather than in define.g
-						// because actions like %foo(name={$ID.text}) aren't
-						// broken up yet into trees.
-										   a.outerAltNum = this.outerAltNum;
-								   		   List chunks = generator.translateAction(currentRuleName,a);
-								   		   code.setAttribute("args.{name,value}", arg.getText(), chunks);
-								   		
-						_t = __t122;
-						_t = _t.getNextSibling();
-					}
-					else {
-						break _loop123;
-					}
-					
-				} while (true);
-				}
-				_t = __t120;
-				_t = _t.getNextSibling();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case DOUBLE_QUOTE_STRING_LITERAL:
-				{
-					GrammarAST tmp64_AST_in = (GrammarAST)_t;
-					match(_t,DOUBLE_QUOTE_STRING_LITERAL);
-					_t = _t.getNextSibling();
-					
-					String sl = tmp64_AST_in.getText();
-								 String t = sl.substring(1,sl.length()-1); // strip quotes
-								 t = generator.target.getTargetStringLiteralFromString(t);
-					code.setAttribute("template",t);
-					
-					break;
-				}
-				case DOUBLE_ANGLE_STRING_LITERAL:
-				{
-					GrammarAST tmp65_AST_in = (GrammarAST)_t;
-					match(_t,DOUBLE_ANGLE_STRING_LITERAL);
-					_t = _t.getNextSibling();
-					
-					String sl = tmp65_AST_in.getText();
-								 String t = sl.substring(2,sl.length()-2); // strip double angle quotes
-								 t = generator.target.getTargetStringLiteralFromString(t);
-					code.setAttribute("template",t);
-					
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t118;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ACTION:
-			{
-				act = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				
-				// set alt num for same reason as ARGLIST above
-				act.outerAltNum = this.outerAltNum;
-						code=templates.getInstanceOf("rewriteAction");
-						code.setAttribute("action",
-										  generator.translateAction(currentRuleName,act));
-						
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite_atom(AST _t,
-		boolean isRoot
-	) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST r = null;
-		GrammarAST arg = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case RULE_REF:
-			{
-				r = (GrammarAST)_t;
-				match(_t,RULE_REF);
-				_t = _t.getNextSibling();
-				
-					String ruleRefName = r.getText();
-					String stName = "rewriteRuleRef";
-					if ( isRoot ) {
-						stName += "Root";
-					}
-					code = templates.getInstanceOf(stName);
-					code.setAttribute("rule", ruleRefName);
-					if ( grammar.getRule(ruleRefName)==null ) {
-							ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
-													  grammar,
-													  ((GrammarAST)(r)).getToken(),
-													  ruleRefName);
-						code = new StringTemplate(); // blank; no code gen
-					}
-					else if ( grammar.getRule(currentRuleName)
-							     .getRuleRefsInAlt(ruleRefName,outerAltNum)==null )
-						{
-							ErrorManager.grammarError(ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS,
-													  grammar,
-													  ((GrammarAST)(r)).getToken(),
-													  ruleRefName);
-						code = new StringTemplate(); // blank; no code gen
-					}
-					else {
-						// track all rule refs as we must copy 2nd ref to rule and beyond
-						if ( !rewriteRuleRefs.contains(ruleRefName) ) {
-					    		rewriteRuleRefs.add(ruleRefName);
-						}
-						}
-					
-				break;
-			}
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			{
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case TOKEN_REF:
-				{
-					AST __t114 = _t;
-					GrammarAST tmp66_AST_in = (GrammarAST)_t;
-					match(_t,TOKEN_REF);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case ARG_ACTION:
-					{
-						arg = (GrammarAST)_t;
-						match(_t,ARG_ACTION);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t114;
-					_t = _t.getNextSibling();
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					GrammarAST tmp67_AST_in = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case STRING_LITERAL:
-				{
-					GrammarAST tmp68_AST_in = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				
-					String tokenName = rewrite_atom_AST_in.getText();
-					String stName = "rewriteTokenRef";
-					Rule rule = grammar.getRule(currentRuleName);
-					Set tokenRefsInAlt = rule.getTokenRefsInAlt(outerAltNum);
-					boolean imaginary = !tokenRefsInAlt.contains(tokenName);
-					if ( imaginary ) {
-						stName = "rewriteImaginaryTokenRef";
-					}
-					if ( isRoot ) {
-						stName += "Root";
-					}
-					code = templates.getInstanceOf(stName);
-					if ( arg!=null ) {
-							List args = generator.translateAction(currentRuleName,arg);
-							code.setAttribute("args", args);
-					}
-						code.setAttribute("elementIndex", ((TokenWithIndex)rewrite_atom_AST_in.getToken()).getIndex());
-						int ttype = grammar.getTokenType(tokenName);
-						String tok = generator.getTokenTypeAsTargetLabel(ttype);
-					code.setAttribute("token", tok);
-					if ( grammar.getTokenType(tokenName)==Label.INVALID ) {
-							ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE,
-													  grammar,
-													  ((GrammarAST)(rewrite_atom_AST_in)).getToken(),
-													  tokenName);
-						code = new StringTemplate(); // blank; no code gen
-					}
-					
-				break;
-			}
-			case LABEL:
-			{
-				GrammarAST tmp69_AST_in = (GrammarAST)_t;
-				match(_t,LABEL);
-				_t = _t.getNextSibling();
-				
-					String labelName = tmp69_AST_in.getText();
-					Rule rule = grammar.getRule(currentRuleName);
-					Grammar.LabelElementPair pair = rule.getLabel(labelName);
-					if ( labelName.equals(currentRuleName) ) {
-						// special case; ref to old value via $rule
-						StringTemplate labelST = templates.getInstanceOf("prevRuleRootRef");
-						code = templates.getInstanceOf("rewriteRuleLabelRef"+(isRoot?"Root":""));
-						code.setAttribute("label", labelST);
-					}
-					else if ( pair==null ) {
-							ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE,
-													  grammar,
-													  ((GrammarAST)(tmp69_AST_in)).getToken(),
-													  labelName);
-							code = new StringTemplate();
-					}
-					else {
-							String stName = null;
-							switch ( pair.type ) {
-								case Grammar.TOKEN_LABEL :
-									stName = "rewriteTokenLabelRef";
-									break;
-								case Grammar.RULE_LABEL :
-									stName = "rewriteRuleLabelRef";
-									break;
-								case Grammar.TOKEN_LIST_LABEL :
-									stName = "rewriteTokenListLabelRef";
-									break;
-								case Grammar.RULE_LIST_LABEL :
-									stName = "rewriteRuleListLabelRef";
-									break;
-							}
-							if ( isRoot ) {
-								stName += "Root";
-							}
-							code = templates.getInstanceOf(stName);
-							code.setAttribute("label", labelName);
-						}
-					
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp70_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				
-				// actions in rewrite rules yield a tree object
-				String actText = tmp70_AST_in.getText();
-				List chunks = generator.translateAction(currentRuleName,tmp70_AST_in);
-						code = templates.getInstanceOf("rewriteNodeAction"+(isRoot?"Root":""));
-						code.setAttribute("action", chunks);
-				
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite_ebnf(AST _t) throws RecognitionException {
-		StringTemplate code=null;
-		
-		GrammarAST rewrite_ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONAL:
-			{
-				AST __t105 = _t;
-				GrammarAST tmp71_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				code=rewrite_block(_t,"rewriteOptionalBlock");
-				_t = _retTree;
-				_t = __t105;
-				_t = _t.getNextSibling();
-				
-						String description = grammar.grammarTreeToString(rewrite_ebnf_AST_in, false);
-						description = generator.target.getTargetStringLiteralFromString(description);
-						code.setAttribute("description", description);
-						
-				break;
-			}
-			case CLOSURE:
-			{
-				AST __t106 = _t;
-				GrammarAST tmp72_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				code=rewrite_block(_t,"rewriteClosureBlock");
-				_t = _retTree;
-				_t = __t106;
-				_t = _t.getNextSibling();
-				
-						String description = grammar.grammarTreeToString(rewrite_ebnf_AST_in, false);
-						description = generator.target.getTargetStringLiteralFromString(description);
-						code.setAttribute("description", description);
-						
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				AST __t107 = _t;
-				GrammarAST tmp73_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				code=rewrite_block(_t,"rewritePositiveClosureBlock");
-				_t = _retTree;
-				_t = __t107;
-				_t = _t.getNextSibling();
-				
-						String description = grammar.grammarTreeToString(rewrite_ebnf_AST_in, false);
-						description = generator.target.getTargetStringLiteralFromString(description);
-						code.setAttribute("description", description);
-						
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	public final StringTemplate  rewrite_tree(AST _t) throws RecognitionException {
-		StringTemplate code=templates.getInstanceOf("rewriteTree");
-		
-		GrammarAST rewrite_tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		rewriteTreeNestingLevel++;
-		code.setAttribute("treeLevel", rewriteTreeNestingLevel);
-		code.setAttribute("enclosingTreeLevel", rewriteTreeNestingLevel-1);
-		StringTemplate r, el;
-		GrammarAST elAST=null;
-		
-		
-		try {      // for error handling
-			AST __t109 = _t;
-			GrammarAST tmp74_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			elAST=(GrammarAST)_t;
-			r=rewrite_atom(_t,true);
-			_t = _retTree;
-			code.setAttribute("root.{el,line,pos}",
-										   r,
-										   Utils.integer(elAST.getLine()),
-										   Utils.integer(elAST.getColumn())
-										  );
-						
-			{
-			_loop111:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
-					elAST=(GrammarAST)_t;
-					el=rewrite_element(_t);
-					_t = _retTree;
-					
-								  code.setAttribute("children.{el,line,pos}",
-												    el,
-												    Utils.integer(elAST.getLine()),
-												    Utils.integer(elAST.getColumn())
-												    );
-								
-				}
-				else {
-					break _loop111;
-				}
-				
-			} while (true);
-			}
-			_t = __t109;
-			_t = _t.getNextSibling();
-			
-					String description = grammar.grammarTreeToString(rewrite_tree_AST_in, false);
-					description = generator.target.getTargetStringLiteralFromString(description);
-					code.setAttribute("description", description);
-				rewriteTreeNestingLevel--;
-					
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return code;
-	}
-	
-	
-	public static final String[] _tokenNames = {
-		"<0>",
-		"EOF",
-		"<2>",
-		"NULL_TREE_LOOKAHEAD",
-		"\"options\"",
-		"\"tokens\"",
-		"\"parser\"",
-		"LEXER",
-		"RULE",
-		"BLOCK",
-		"OPTIONAL",
-		"CLOSURE",
-		"POSITIVE_CLOSURE",
-		"SYNPRED",
-		"RANGE",
-		"CHAR_RANGE",
-		"EPSILON",
-		"ALT",
-		"EOR",
-		"EOB",
-		"EOA",
-		"ID",
-		"ARG",
-		"ARGLIST",
-		"RET",
-		"LEXER_GRAMMAR",
-		"PARSER_GRAMMAR",
-		"TREE_GRAMMAR",
-		"COMBINED_GRAMMAR",
-		"INITACTION",
-		"LABEL",
-		"TEMPLATE",
-		"\"scope\"",
-		"GATED_SEMPRED",
-		"SYN_SEMPRED",
-		"BACKTRACK_SEMPRED",
-		"\"fragment\"",
-		"ACTION",
-		"DOC_COMMENT",
-		"SEMI",
-		"\"lexer\"",
-		"\"tree\"",
-		"\"grammar\"",
-		"AMPERSAND",
-		"COLON",
-		"RCURLY",
-		"ASSIGN",
-		"STRING_LITERAL",
-		"CHAR_LITERAL",
-		"INT",
-		"STAR",
-		"TOKEN_REF",
-		"\"protected\"",
-		"\"public\"",
-		"\"private\"",
-		"BANG",
-		"ARG_ACTION",
-		"\"returns\"",
-		"\"throws\"",
-		"COMMA",
-		"LPAREN",
-		"OR",
-		"RPAREN",
-		"\"catch\"",
-		"\"finally\"",
-		"PLUS_ASSIGN",
-		"SEMPRED",
-		"IMPLIES",
-		"ROOT",
-		"RULE_REF",
-		"NOT",
-		"TREE_BEGIN",
-		"QUESTION",
-		"PLUS",
-		"WILDCARD",
-		"REWRITE",
-		"DOLLAR",
-		"DOUBLE_QUOTE_STRING_LITERAL",
-		"DOUBLE_ANGLE_STRING_LITERAL",
-		"WS",
-		"COMMENT",
-		"SL_COMMENT",
-		"ML_COMMENT",
-		"OPEN_ELEMENT_OPTION",
-		"CLOSE_ELEMENT_OPTION",
-		"ESC",
-		"DIGIT",
-		"XDIGIT",
-		"NESTED_ARG_ACTION",
-		"NESTED_ACTION",
-		"ACTION_CHAR_LITERAL",
-		"ACTION_STRING_LITERAL",
-		"ACTION_ESC",
-		"WS_LOOP",
-		"INTERNAL_RULE_REF",
-		"WS_OPT",
-		"SRC"
-	};
-	
-	}
-	
diff --git a/src/org/antlr/codegen/CodeGenTreeWalker.smap b/src/org/antlr/codegen/CodeGenTreeWalker.smap
deleted file mode 100644
index c2dbf69..0000000
--- a/src/org/antlr/codegen/CodeGenTreeWalker.smap
+++ /dev/null
@@ -1,2419 +0,0 @@
-SMAP
-CodeGenTreeWalker.java
-G
-*S G
-*F
-+ 0 codegen.g
-codegen.g
-*L
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-1:32
-1:33
-1:34
-1:35
-1:36
-1:37
-58:62
-59:63
-61:65
-62:66
-63:67
-64:68
-65:69
-66:70
-67:71
-68:72
-70:74
-71:75
-72:76
-73:77
-74:78
-75:79
-76:80
-77:81
-78:82
-79:83
-80:84
-81:85
-82:86
-83:87
-84:88
-86:90
-87:91
-88:92
-90:94
-91:95
-92:96
-94:98
-95:99
-96:100
-97:101
-99:103
-100:104
-102:106
-104:108
-105:109
-106:110
-107:111
-108:112
-109:113
-110:114
-112:116
-113:117
-114:118
-115:119
-116:120
-117:121
-118:122
-119:123
-120:124
-121:125
-122:126
-123:127
-124:128
-125:129
-126:130
-127:131
-128:132
-129:133
-130:134
-131:135
-132:136
-133:137
-134:138
-135:139
-136:140
-138:142
-139:143
-140:144
-141:145
-142:146
-143:147
-144:148
-145:149
-146:150
-147:151
-148:152
-149:153
-150:154
-151:155
-152:156
-153:157
-154:158
-155:159
-156:160
-157:161
-158:162
-159:163
-160:164
-161:165
-163:167
-164:168
-165:169
-166:170
-167:171
-168:172
-169:173
-170:174
-171:175
-172:176
-173:177
-174:178
-175:179
-176:180
-177:181
-178:182
-179:183
-181:185
-182:186
-183:187
-184:188
-185:189
-186:190
-187:191
-188:192
-190:194
-191:195
-192:196
-193:197
-194:198
-195:199
-196:200
-197:201
-198:202
-199:203
-200:204
-201:205
-202:206
-203:207
-204:208
-205:209
-206:210
-207:211
-208:212
-209:213
-210:214
-211:215
-212:216
-214:218
-215:219
-217:221
-218:222
-219:223
-220:224
-221:225
-222:226
-223:227
-224:228
-225:229
-226:230
-227:231
-228:232
-229:233
-230:234
-231:235
-232:236
-233:237
-234:238
-235:239
-236:240
-237:241
-238:242
-239:243
-241:245
-242:246
-243:247
-244:248
-245:249
-248:254
-248:255
-248:256
-248:257
-248:258
-248:259
-248:287
-248:345
-248:346
-248:347
-248:348
-248:349
-248:350
-248:351
-249:263
-250:264
-251:265
-252:266
-253:267
-254:268
-255:269
-256:270
-257:271
-258:272
-259:273
-260:274
-261:275
-262:276
-263:277
-264:278
-265:279
-266:280
-267:281
-268:282
-269:283
-270:284
-276:289
-276:290
-276:291
-276:292
-276:293
-276:294
-276:295
-276:296
-276:297
-276:298
-276:299
-276:300
-276:339
-276:340
-276:341
-276:342
-276:343
-277:303
-277:304
-277:305
-277:306
-277:307
-277:308
-277:309
-277:310
-277:311
-277:312
-278:315
-278:316
-278:317
-278:318
-278:319
-278:320
-278:321
-278:322
-278:323
-278:324
-280:327
-280:328
-280:329
-280:330
-280:331
-280:332
-280:333
-280:334
-280:335
-280:336
-284:496
-284:500
-284:513
-284:514
-284:515
-284:516
-284:517
-284:518
-284:519
-285:501
-285:502
-285:503
-285:504
-285:505
-285:506
-285:507
-285:508
-285:509
-285:510
-285:511
-285:512
-288:353
-288:359
-288:488
-288:489
-288:490
-288:491
-288:492
-288:493
-288:494
-289:356
-289:360
-289:361
-289:362
-290:357
-290:364
-290:365
-290:366
-290:367
-290:368
-290:369
-290:370
-290:385
-290:386
-290:387
-290:388
-290:389
-292:372
-293:373
-297:392
-298:393
-299:394
-300:395
-301:396
-302:397
-303:398
-305:401
-305:402
-305:403
-305:404
-305:405
-305:406
-305:407
-305:408
-305:409
-305:410
-305:411
-305:412
-305:413
-305:423
-305:424
-305:425
-305:426
-305:427
-306:430
-306:431
-306:432
-306:433
-306:434
-306:435
-306:436
-306:437
-306:438
-306:439
-306:440
-306:441
-306:442
-306:451
-306:452
-306:453
-306:454
-306:455
-307:457
-307:458
-307:459
-307:460
-307:461
-307:462
-307:463
-307:464
-307:465
-307:466
-307:467
-307:469
-307:470
-308:471
-308:472
-308:473
-308:474
-308:475
-308:476
-308:477
-308:478
-308:479
-308:480
-308:481
-308:482
-308:484
-308:485
-309:486
-309:487
-312:521
-312:522
-312:523
-312:530
-312:572
-312:573
-312:574
-312:575
-312:576
-312:577
-312:578
-313:527
-316:532
-316:533
-316:534
-316:535
-316:536
-316:553
-316:558
-316:559
-316:560
-316:561
-316:564
-316:565
-316:566
-316:567
-316:569
-316:570
-316:571
-317:539
-318:540
-323:542
-323:543
-323:544
-323:545
-325:547
-326:548
-327:549
-328:550
-329:551
-331:554
-331:555
-331:556
-331:557
-336:580
-336:581
-336:606
-336:852
-336:853
-336:854
-336:855
-336:856
-336:857
-336:858
-336:859
-337:587
-338:588
-339:589
-340:590
-341:591
-342:592
-343:593
-344:594
-345:595
-346:596
-348:598
-349:599
-350:600
-351:601
-352:602
-353:603
-356:584
-356:607
-356:608
-356:609
-356:610
-356:611
-356:612
-356:613
-356:614
-356:822
-356:823
-357:585
-357:616
-357:617
-357:618
-357:619
-357:620
-357:621
-357:622
-357:623
-357:624
-357:625
-357:632
-357:633
-357:634
-357:635
-357:636
-358:638
-358:639
-358:640
-358:641
-358:643
-358:644
-358:645
-358:646
-358:647
-358:648
-358:649
-358:656
-358:657
-358:658
-358:659
-358:660
-358:662
-358:663
-359:664
-359:665
-359:666
-359:667
-359:669
-359:670
-359:671
-359:672
-359:673
-359:674
-359:675
-359:682
-359:683
-359:684
-359:685
-359:686
-359:688
-359:689
-360:691
-360:692
-360:693
-360:694
-360:695
-360:696
-360:697
-360:698
-360:699
-360:700
-360:701
-360:702
-360:703
-360:712
-360:713
-360:714
-360:715
-360:716
-361:719
-361:720
-361:721
-361:722
-361:723
-361:724
-361:732
-361:733
-361:734
-361:735
-361:736
-362:738
-362:739
-362:740
-362:741
-362:742
-362:743
-362:744
-362:745
-362:746
-362:747
-362:748
-362:749
-362:751
-362:752
-363:753
-363:754
-365:756
-366:757
-367:758
-368:759
-369:760
-370:761
-371:762
-372:763
-373:764
-374:765
-375:766
-376:767
-377:768
-378:769
-379:770
-380:771
-381:772
-382:773
-383:774
-384:775
-385:776
-386:777
-387:778
-388:779
-389:780
-390:781
-391:782
-392:783
-393:784
-394:785
-395:786
-396:787
-397:788
-398:789
-399:790
-400:791
-401:792
-402:793
-403:794
-404:795
-405:796
-406:797
-409:800
-409:801
-409:802
-409:803
-409:804
-409:805
-409:806
-409:813
-409:814
-409:815
-409:816
-409:817
-410:819
-410:820
-410:821
-413:825
-414:826
-415:827
-416:828
-417:829
-418:830
-419:831
-420:832
-421:833
-422:834
-423:835
-424:836
-425:837
-426:838
-427:839
-428:840
-429:841
-430:842
-431:843
-432:844
-433:845
-434:846
-435:847
-436:848
-437:849
-438:850
-442:861
-442:865
-442:866
-442:867
-442:896
-442:897
-442:898
-442:899
-442:900
-442:901
-442:902
-442:903
-442:904
-442:905
-442:906
-442:907
-443:868
-443:869
-443:870
-443:871
-443:872
-444:875
-444:876
-444:877
-444:878
-444:879
-445:882
-445:883
-445:884
-445:885
-445:886
-446:889
-446:890
-446:891
-446:892
-446:893
-449:909
-449:913
-449:956
-449:957
-449:958
-449:959
-449:960
-449:961
-449:962
-450:914
-450:915
-450:916
-450:917
-450:919
-450:920
-450:921
-450:922
-450:923
-450:924
-450:925
-450:933
-450:934
-450:935
-450:936
-450:937
-450:939
-450:940
-450:941
-450:942
-450:943
-450:944
-450:945
-450:946
-450:947
-450:948
-450:949
-450:950
-450:952
-450:953
-450:954
-450:955
-453:964
-453:965
-453:966
-453:967
-453:996
-453:1005
-453:1072
-453:1073
-453:1074
-453:1075
-453:1077
-453:1078
-453:1079
-453:1080
-453:1081
-453:1082
-453:1083
-453:1084
-454:971
-455:972
-456:973
-457:974
-458:975
-459:976
-460:977
-461:978
-462:979
-463:980
-464:981
-465:982
-466:983
-467:984
-468:985
-469:986
-470:987
-471:988
-472:989
-473:990
-474:991
-475:992
-476:993
-480:997
-480:998
-480:999
-480:1000
-482:1002
-483:1003
-486:1006
-486:1007
-486:1008
-486:1009
-486:1010
-486:1069
-486:1070
-487:1012
-487:1013
-487:1014
-487:1015
-487:1016
-487:1017
-487:1018
-487:1025
-487:1026
-487:1027
-487:1028
-487:1029
-488:1032
-488:1033
-488:1034
-488:1035
-488:1036
-488:1037
-488:1038
-488:1039
-488:1040
-488:1041
-488:1058
-488:1059
-488:1060
-488:1061
-488:1063
-488:1064
-488:1065
-490:1043
-491:1044
-492:1045
-493:1046
-494:1047
-495:1048
-496:1049
-497:1050
-498:1051
-499:1052
-500:1053
-501:1054
-502:1055
-503:1056
-506:1066
-506:1067
-506:1068
-508:1071
-511:1153
-511:1154
-511:1168
-511:1196
-511:1197
-511:1198
-511:1199
-511:1200
-511:1201
-511:1202
-511:1203
-512:1159
-513:1160
-514:1161
-515:1162
-516:1163
-517:1164
-518:1165
-521:1157
-521:1169
-521:1170
-521:1171
-523:1173
-524:1174
-525:1175
-526:1176
-527:1177
-528:1178
-529:1179
-530:1180
-531:1181
-532:1182
-533:1183
-534:1184
-535:1185
-536:1186
-537:1187
-538:1188
-539:1189
-540:1190
-541:1191
-542:1192
-543:1193
-544:1194
-548:1086
-548:1087
-548:1088
-548:1092
-548:1093
-548:1094
-548:1140
-548:1141
-548:1142
-548:1143
-548:1144
-548:1145
-548:1146
-548:1147
-548:1148
-548:1149
-548:1150
-548:1151
-549:1095
-549:1096
-549:1098
-549:1099
-549:1100
-549:1101
-549:1102
-549:1103
-549:1104
-549:1105
-549:1106
-549:1107
-549:1108
-549:1110
-549:1111
-549:1112
-549:1114
-549:1115
-549:1116
-549:1117
-549:1118
-549:1119
-549:1126
-549:1127
-549:1128
-549:1129
-549:1130
-550:1134
-550:1135
-550:1136
-550:1137
-553:1392
-553:1393
-553:1394
-553:1398
-553:1415
-553:1416
-553:1417
-553:1418
-553:1419
-553:1420
-553:1421
-554:1399
-554:1400
-554:1401
-554:1402
-554:1403
-554:1404
-554:1405
-554:1406
-554:1407
-554:1408
-554:1409
-554:1410
-556:1412
-557:1413
-561:1423
-561:1424
-561:1425
-561:1429
-561:1443
-561:1444
-561:1445
-561:1446
-561:1447
-561:1448
-561:1449
-562:1430
-562:1431
-562:1432
-562:1433
-562:1434
-562:1435
-562:1436
-562:1437
-562:1438
-564:1440
-565:1441
-569:1205
-569:1206
-569:1240
-569:1276
-569:1277
-569:1278
-569:1279
-569:1280
-569:1281
-569:1282
-569:1283
-570:1211
-571:1212
-572:1213
-573:1214
-574:1215
-575:1216
-576:1217
-577:1218
-578:1219
-579:1220
-580:1221
-581:1222
-582:1223
-583:1224
-584:1225
-585:1226
-586:1227
-587:1228
-588:1229
-589:1230
-590:1231
-591:1232
-592:1233
-593:1234
-594:1235
-595:1236
-596:1237
-599:1209
-599:1241
-599:1242
-599:1243
-599:1244
-599:1274
-599:1275
-600:1246
-600:1247
-600:1248
-600:1249
-600:1250
-600:1251
-600:1263
-600:1264
-600:1265
-600:1266
-600:1268
-600:1269
-600:1270
-601:1252
-601:1253
-603:1255
-604:1256
-605:1257
-606:1258
-607:1259
-608:1260
-609:1261
-612:1271
-612:1272
-612:1273
-616:1451
-616:1452
-616:1453
-616:1454
-616:1469
-616:1470
-616:1471
-616:1634
-616:1639
-616:1643
-616:1644
-616:1645
-616:1646
-616:1647
-616:1648
-616:1649
-616:1650
-616:1651
-616:1652
-616:1653
-616:1654
-616:1655
-617:1465
-618:1466
-621:1472
-621:1473
-621:1474
-621:1475
-621:1476
-621:1477
-621:1478
-621:1479
-621:1480
-621:1481
-623:1484
-623:1485
-623:1486
-623:1487
-623:1488
-623:1489
-623:1490
-623:1491
-623:1492
-623:1493
-625:1457
-625:1496
-625:1497
-625:1498
-625:1499
-625:1500
-625:1501
-625:1502
-625:1503
-625:1504
-625:1505
-627:1458
-627:1508
-627:1509
-627:1510
-627:1511
-627:1512
-627:1513
-627:1514
-627:1515
-627:1516
-627:1517
-627:1518
-627:1519
-627:1520
-629:1459
-629:1523
-629:1524
-629:1525
-629:1526
-629:1527
-629:1528
-629:1529
-629:1530
-629:1531
-629:1532
-629:1533
-629:1534
-629:1535
-631:1460
-631:1461
-631:1538
-631:1539
-631:1540
-631:1541
-631:1542
-631:1543
-631:1544
-631:1545
-631:1546
-631:1547
-631:1548
-631:1549
-631:1550
-631:1551
-632:1552
-633:1553
-634:1554
-635:1555
-636:1556
-637:1557
-638:1558
-639:1559
-640:1560
-641:1561
-644:1635
-644:1636
-644:1637
-644:1638
-646:1640
-646:1641
-646:1642
-648:1565
-648:1566
-648:1567
-648:1568
-650:1571
-650:1572
-650:1573
-650:1574
-652:1462
-652:1463
-652:1577
-652:1578
-652:1579
-652:1581
-652:1582
-652:1583
-652:1584
-652:1585
-652:1586
-652:1587
-652:1590
-652:1591
-652:1592
-652:1593
-652:1594
-652:1595
-652:1598
-652:1599
-652:1600
-652:1601
-652:1602
-654:1605
-655:1606
-656:1607
-657:1608
-658:1609
-661:1613
-661:1614
-661:1615
-661:1616
-661:1617
-663:1620
-663:1621
-663:1622
-663:1623
-663:1624
-665:1627
-665:1628
-665:1629
-665:1630
-665:1631
-668:2146
-668:2147
-668:2152
-668:2160
-668:2161
-668:2162
-668:2163
-668:2164
-668:2165
-668:2166
-668:2167
-669:2150
-669:2153
-669:2154
-669:2155
-671:2157
-672:2158
-676:1657
-676:1658
-676:1659
-676:1660
-676:1675
-676:1754
-676:1755
-676:1756
-676:1757
-676:1758
-676:1759
-676:1760
-676:1761
-677:1668
-678:1669
-679:1670
-680:1671
-681:1672
-685:1663
-685:1677
-685:1678
-685:1679
-685:1680
-685:1681
-685:1682
-685:1683
-685:1735
-685:1736
-685:1737
-685:1738
-685:1739
-687:1685
-688:1686
-689:1687
-690:1688
-691:1689
-692:1690
-693:1691
-694:1692
-696:1664
-696:1696
-696:1697
-696:1698
-696:1699
-696:1700
-698:1702
-699:1703
-700:1704
-701:1705
-702:1706
-703:1707
-704:1708
-705:1709
-707:1665
-707:1713
-707:1714
-707:1715
-707:1716
-707:1717
-709:1719
-710:1720
-712:1666
-712:1724
-712:1725
-712:1726
-712:1727
-712:1728
-714:1730
-715:1731
-719:1742
-720:1743
-721:1744
-722:1745
-723:1746
-724:1747
-725:1748
-726:1749
-727:1750
-728:1751
-729:1752
-733:1763
-733:1764
-733:1773
-733:1834
-733:1835
-733:1836
-733:1837
-733:1838
-733:1839
-733:1840
-733:1841
-734:1768
-735:1769
-736:1770
-739:1775
-739:1776
-739:1777
-739:1778
-739:1779
-739:1823
-739:1824
-739:1825
-739:1826
-739:1827
-740:1780
-740:1781
-741:1784
-741:1785
-741:1786
-742:1787
-742:1788
-742:1789
-742:1790
-742:1791
-742:1792
-742:1793
-742:1794
-743:1797
-743:1798
-743:1799
-744:1800
-744:1801
-744:1802
-744:1803
-744:1804
-744:1805
-744:1806
-744:1807
-745:1810
-745:1811
-745:1812
-746:1813
-746:1814
-746:1815
-746:1816
-746:1817
-746:1818
-746:1819
-746:1820
-749:1830
-750:1831
-751:1832
-755:2059
-755:2060
-755:2076
-755:2137
-755:2138
-755:2139
-755:2140
-755:2141
-755:2142
-755:2143
-755:2144
-756:2064
-757:2065
-758:2066
-759:2067
-760:2068
-761:2069
-762:2070
-763:2071
-764:2072
-765:2073
-768:2077
-768:2078
-768:2079
-768:2080
-768:2081
-768:2135
-768:2136
-769:2082
-769:2083
-771:2085
-772:2086
-773:2087
-774:2088
-775:2089
-780:2091
-780:2092
-780:2093
-780:2106
-780:2107
-780:2108
-780:2109
-780:2111
-780:2112
-781:2094
-781:2095
-781:2096
-782:2097
-782:2098
-784:2100
-785:2101
-786:2102
-787:2103
-788:2104
-791:2113
-791:2114
-791:2115
-791:2116
-791:2117
-791:2118
-791:2128
-791:2129
-791:2130
-791:2131
-791:2133
-791:2134
-792:2119
-792:2120
-794:2122
-795:2123
-796:2124
-797:2125
-798:2126
-804:1843
-804:1844
-804:1845
-804:1846
-804:1863
-804:1864
-804:1865
-804:2045
-804:2046
-804:2047
-804:2048
-804:2049
-804:2050
-804:2051
-804:2052
-804:2053
-804:2054
-804:2055
-804:2056
-804:2057
-805:1857
-806:1858
-807:1859
-808:1860
-812:1849
-812:1850
-812:1866
-812:1867
-812:1868
-812:1869
-812:1870
-812:1871
-812:1873
-812:1874
-812:1875
-812:1876
-812:1877
-812:1878
-812:1879
-812:1886
-812:1887
-812:1888
-812:1889
-812:1890
-812:1892
-812:1893
-814:1895
-815:1896
-816:1897
-817:1898
-818:1899
-819:1900
-820:1901
-821:1902
-823:1904
-824:1905
-825:1906
-826:1907
-827:1908
-828:1909
-829:1910
-830:1911
-833:1851
-833:1852
-833:1915
-833:1916
-833:1917
-833:1918
-833:1919
-833:1920
-833:1922
-833:1923
-833:1924
-833:1925
-833:1926
-833:1927
-833:1928
-833:1935
-833:1936
-833:1937
-833:1938
-833:1939
-833:1941
-833:1942
-835:1944
-836:1945
-837:1946
-838:1947
-839:1948
-840:1949
-841:1950
-842:1951
-843:1952
-844:1953
-845:1954
-846:1955
-847:1956
-848:1957
-849:1958
-850:1959
-851:1960
-852:1961
-853:1962
-854:1963
-855:1964
-856:1965
-857:1966
-858:1967
-859:1968
-860:1969
-861:1970
-862:1971
-863:1972
-864:1973
-867:1853
-867:1977
-867:1978
-867:1979
-867:1980
-867:1981
-869:1983
-870:1984
-871:1985
-872:1986
-873:1987
-874:1988
-875:1989
-876:1990
-877:1991
-878:1992
-879:1993
-880:1994
-881:1995
-882:1996
-883:1997
-884:1998
-887:1854
-887:2002
-887:2003
-887:2004
-887:2005
-887:2006
-889:2008
-890:2009
-891:2010
-892:2011
-893:2012
-894:2013
-895:2014
-896:2015
-897:2016
-898:2017
-899:2018
-900:2019
-901:2020
-902:2021
-903:2022
-904:2023
-905:2024
-908:1855
-908:2028
-908:2029
-908:2030
-908:2031
-908:2032
-910:2034
-911:2035
-914:2039
-914:2040
-914:2041
-914:2042
-917:2205
-917:2209
-917:2210
-917:2211
-917:2226
-917:2227
-917:2228
-917:2229
-917:2230
-917:2231
-917:2232
-917:2233
-917:2234
-917:2235
-917:2236
-917:2237
-918:2212
-918:2213
-918:2214
-918:2215
-918:2216
-919:2219
-919:2220
-919:2221
-919:2222
-919:2223
-923:2169
-923:2170
-923:2171
-923:2172
-923:2183
-923:2196
-923:2197
-923:2198
-923:2199
-923:2200
-923:2201
-923:2202
-923:2203
-924:2177
-925:2178
-926:2179
-927:2180
-930:2175
-930:2184
-930:2185
-930:2186
-932:2188
-933:2189
-934:2190
-935:2191
-936:2192
-937:2193
-938:2194
-942:2239
-942:2248
-942:2249
-942:2250
-942:2288
-942:2289
-942:2290
-942:2291
-942:2292
-942:2293
-942:2294
-942:2295
-942:2296
-942:2297
-942:2298
-942:2299
-943:2242
-943:2251
-943:2252
-943:2253
-943:2254
-943:2255
-944:2243
-944:2258
-944:2259
-944:2260
-944:2261
-944:2262
-945:2244
-945:2265
-945:2266
-945:2267
-945:2268
-945:2269
-946:2245
-946:2246
-946:2272
-946:2273
-946:2274
-946:2275
-946:2276
-946:2277
-946:2278
-946:2279
-946:2280
-946:2281
-946:2282
-946:2283
-946:2284
-946:2285
-951:1285
-951:1286
-951:1323
-951:1383
-951:1384
-951:1385
-951:1386
-951:1387
-951:1388
-951:1389
-951:1390
-952:1292
-953:1293
-954:1294
-955:1295
-956:1296
-957:1297
-958:1298
-959:1299
-960:1300
-961:1301
-962:1302
-963:1303
-964:1304
-965:1305
-966:1306
-967:1307
-968:1308
-969:1309
-970:1310
-971:1311
-972:1312
-973:1313
-974:1314
-975:1315
-976:1316
-977:1317
-978:1318
-979:1319
-980:1320
-983:1324
-983:1325
-983:1326
-983:1376
-983:1377
-983:1378
-983:1379
-983:1381
-983:1382
-984:1327
-984:1328
-984:1329
-985:1289
-985:1290
-985:1330
-985:1331
-985:1332
-985:1333
-985:1335
-985:1336
-985:1337
-985:1338
-985:1339
-985:1340
-985:1341
-985:1350
-985:1351
-985:1352
-985:1353
-985:1354
-985:1356
-985:1357
-985:1358
-985:1359
-987:1361
-988:1362
-989:1363
-990:1364
-991:1365
-992:1366
-993:1367
-994:1368
-995:1369
-996:1370
-997:1371
-998:1372
-999:1373
-1000:1374
-1005:2399
-1005:2400
-1005:2401
-1005:2402
-1005:2414
-1005:2437
-1005:2438
-1005:2439
-1005:2440
-1005:2441
-1005:2442
-1005:2443
-1005:2444
-1006:2406
-1007:2407
-1008:2408
-1009:2409
-1010:2410
-1011:2411
-1014:2415
-1014:2416
-1014:2417
-1014:2418
-1014:2430
-1014:2431
-1016:2420
-1017:2421
-1018:2422
-1019:2423
-1021:2425
-1021:2426
-1022:2427
-1022:2428
-1022:2429
-1025:2433
-1026:2434
-1027:2435
-1031:2301
-1031:2302
-1031:2310
-1031:2381
-1031:2385
-1031:2386
-1031:2387
-1031:2388
-1031:2390
-1031:2391
-1031:2392
-1031:2393
-1031:2394
-1031:2395
-1031:2396
-1031:2397
-1032:2307
-1037:2305
-1037:2311
-1037:2312
-1037:2313
-1037:2314
-1037:2315
-1037:2316
-1037:2317
-1037:2379
-1037:2380
-1038:2319
-1038:2320
-1038:2321
-1038:2322
-1038:2323
-1038:2324
-1038:2325
-1038:2326
-1038:2327
-1038:2328
-1038:2329
-1038:2330
-1038:2331
-1038:2333
-1038:2334
-1038:2335
-1038:2336
-1038:2337
-1038:2338
-1038:2347
-1038:2348
-1038:2349
-1038:2350
-1038:2352
-1038:2353
-1038:2354
-1038:2370
-1038:2371
-1038:2372
-1038:2373
-1038:2374
-1039:2339
-1039:2340
-1040:2341
-1041:2342
-1042:2343
-1043:2344
-1044:2345
-1047:2357
-1047:2358
-1047:2359
-1047:2360
-1047:2361
-1048:2362
-1049:2363
-1050:2364
-1051:2365
-1052:2366
-1055:2376
-1055:2377
-1055:2378
-1057:2382
-1057:2383
-1057:2384
-1060:2446
-1060:2447
-1060:2455
-1060:2456
-1060:2457
-1060:2483
-1060:2484
-1060:2485
-1060:2486
-1060:2487
-1060:2488
-1060:2489
-1060:2490
-1060:2491
-1060:2492
-1060:2493
-1060:2494
-1060:2495
-1061:2451
-1062:2452
-1065:2458
-1065:2459
-1065:2460
-1065:2461
-1065:2462
-1065:2463
-1065:2464
-1065:2465
-1065:2466
-1067:2469
-1067:2470
-1067:2471
-1067:2472
-1067:2473
-1067:2474
-1069:2477
-1069:2478
-1069:2479
-1069:2480
-1072:2892
-1072:2893
-1072:2897
-1072:2898
-1072:2899
-1072:2951
-1072:2952
-1072:2953
-1072:2954
-1072:2955
-1072:2956
-1072:2957
-1072:2958
-1072:2959
-1072:2960
-1072:2961
-1072:2962
-1072:2963
-1073:2900
-1073:2901
-1073:2902
-1073:2903
-1073:2904
-1073:2905
-1073:2906
-1073:2907
-1073:2908
-1073:2909
-1075:2911
-1076:2912
-1077:2913
-1079:2917
-1079:2918
-1079:2919
-1079:2920
-1079:2921
-1079:2922
-1079:2923
-1079:2924
-1079:2925
-1079:2926
-1081:2928
-1082:2929
-1083:2930
-1085:2934
-1085:2935
-1085:2936
-1085:2937
-1085:2938
-1085:2939
-1085:2940
-1085:2941
-1085:2942
-1085:2943
-1087:2945
-1088:2946
-1089:2947
-1093:2965
-1093:2966
-1093:2977
-1093:3021
-1093:3022
-1093:3023
-1093:3024
-1093:3025
-1093:3026
-1093:3027
-1093:3028
-1094:2970
-1095:2971
-1096:2972
-1097:2973
-1098:2974
-1101:2978
-1101:2979
-1101:2980
-1101:2981
-1101:2982
-1101:3013
-1101:3014
-1102:2983
-1102:2984
-1103:2985
-1104:2986
-1105:2987
-1106:2988
-1107:2989
-1109:2991
-1109:2992
-1109:2993
-1109:2994
-1109:2995
-1109:2996
-1109:3006
-1109:3007
-1109:3008
-1109:3009
-1109:3011
-1109:3012
-1110:2997
-1110:2998
-1112:3000
-1113:3001
-1114:3002
-1115:3003
-1116:3004
-1121:3016
-1122:3017
-1123:3018
-1124:3019
-1128:2679
-1128:2680
-1128:2681
-1128:2682
-1128:2688
-1128:2689
-1128:2690
-1128:2878
-1128:2879
-1128:2880
-1128:2881
-1128:2882
-1128:2883
-1128:2884
-1128:2885
-1128:2886
-1128:2887
-1128:2888
-1128:2889
-1128:2890
-1129:2685
-1129:2691
-1129:2692
-1129:2693
-1129:2694
-1129:2695
-1131:2697
-1132:2698
-1133:2699
-1134:2700
-1135:2701
-1136:2702
-1137:2703
-1138:2704
-1139:2705
-1140:2706
-1141:2707
-1142:2708
-1143:2709
-1144:2710
-1145:2711
-1146:2712
-1147:2713
-1148:2714
-1149:2715
-1150:2716
-1151:2717
-1152:2718
-1153:2719
-1154:2720
-1155:2721
-1156:2722
-1157:2723
-1158:2724
-1159:2725
-1162:2686
-1162:2729
-1162:2730
-1162:2731
-1162:2732
-1162:2734
-1162:2735
-1162:2736
-1162:2737
-1162:2738
-1162:2739
-1162:2740
-1162:2741
-1162:2743
-1162:2744
-1162:2745
-1162:2746
-1162:2747
-1162:2748
-1162:2749
-1162:2756
-1162:2757
-1162:2758
-1162:2759
-1162:2760
-1162:2762
-1162:2763
-1162:2766
-1162:2767
-1162:2768
-1162:2769
-1162:2770
-1162:2773
-1162:2774
-1162:2775
-1162:2776
-1162:2777
-1162:2780
-1162:2781
-1162:2782
-1162:2783
-1162:2784
-1164:2787
-1165:2788
-1166:2789
-1167:2790
-1168:2791
-1169:2792
-1170:2793
-1171:2794
-1172:2795
-1173:2796
-1174:2797
-1175:2798
-1176:2799
-1177:2800
-1178:2801
-1179:2802
-1180:2803
-1181:2804
-1182:2805
-1183:2806
-1184:2807
-1185:2808
-1186:2809
-1187:2810
-1188:2811
-1189:2812
-1190:2813
-1193:2817
-1193:2818
-1193:2819
-1193:2820
-1193:2821
-1195:2823
-1196:2824
-1197:2825
-1198:2826
-1199:2827
-1200:2828
-1201:2829
-1202:2830
-1203:2831
-1204:2832
-1205:2833
-1206:2834
-1207:2835
-1208:2836
-1209:2837
-1210:2838
-1211:2839
-1212:2840
-1213:2841
-1214:2842
-1215:2843
-1216:2844
-1217:2845
-1218:2846
-1219:2847
-1220:2848
-1221:2849
-1222:2850
-1223:2851
-1224:2852
-1225:2853
-1226:2854
-1227:2855
-1228:2856
-1229:2857
-1230:2858
-1231:2859
-1232:2860
-1235:2864
-1235:2865
-1235:2866
-1235:2867
-1235:2868
-1237:2870
-1238:2871
-1239:2872
-1240:2873
-1241:2874
-1245:2497
-1245:2498
-1245:2507
-1245:2508
-1245:2509
-1245:2665
-1245:2666
-1245:2667
-1245:2668
-1245:2669
-1245:2670
-1245:2671
-1245:2672
-1245:2673
-1245:2674
-1245:2675
-1245:2676
-1245:2677
-1246:2510
-1246:2511
-1246:2512
-1246:2513
-1246:2514
-1246:2515
-1246:2516
-1246:2517
-1246:2518
-1246:2519
-1246:2520
-1246:2521
-1246:2522
-1246:2523
-1246:2524
-1247:2501
-1247:2502
-1247:2527
-1247:2528
-1247:2529
-1247:2530
-1247:2531
-1247:2532
-1247:2534
-1247:2535
-1247:2536
-1247:2537
-1247:2538
-1247:2539
-1247:2540
-1247:2543
-1247:2544
-1247:2545
-1247:2546
-1247:2547
-1247:2550
-1247:2551
-1247:2552
-1247:2553
-1247:2554
-1247:2647
-1247:2648
-1249:2557
-1250:2558
-1251:2559
-1252:2560
-1253:2561
-1254:2562
-1255:2563
-1256:2564
-1257:2565
-1258:2566
-1259:2567
-1260:2568
-1262:2570
-1262:2571
-1262:2572
-1262:2573
-1262:2606
-1262:2607
-1263:2503
-1263:2504
-1263:2574
-1263:2575
-1263:2576
-1263:2577
-1263:2578
-1263:2579
-1263:2580
-1263:2581
-1263:2582
-1263:2583
-1263:2584
-1263:2585
-1263:2586
-1263:2587
-1263:2588
-1263:2597
-1263:2598
-1263:2599
-1263:2600
-1263:2601
-1263:2602
-1263:2604
-1263:2605
-1265:2590
-1266:2591
-1267:2592
-1268:2593
-1269:2594
-1270:2595
-1275:2609
-1275:2610
-1275:2611
-1275:2612
-1275:2613
-1275:2614
-1275:2615
-1275:2641
-1275:2642
-1275:2643
-1275:2644
-1275:2645
-1277:2617
-1278:2618
-1279:2619
-1280:2620
-1282:2624
-1282:2625
-1282:2626
-1282:2627
-1282:2628
-1284:2630
-1285:2631
-1286:2632
-1287:2633
-1292:2505
-1292:2651
-1292:2652
-1292:2653
-1292:2654
-1292:2655
-1294:2657
-1295:2658
-1296:2659
-1297:2660
-1298:2661
-*E
diff --git a/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.java b/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.java
deleted file mode 100644
index 5ff363b..0000000
--- a/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.java
+++ /dev/null
@@ -1,135 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "codegen.g" -> "CodeGenTreeWalker.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.codegen;
-    import org.antlr.tool.*;
-    import org.antlr.analysis.*;
-    import org.antlr.misc.*;
-	import java.util.*;
-	import org.antlr.stringtemplate.*;
-    import antlr.TokenWithIndex;
-    import antlr.CommonToken;
-
-public interface CodeGenTreeWalkerTokenTypes {
-	int EOF = 1;
-	int NULL_TREE_LOOKAHEAD = 3;
-	int OPTIONS = 4;
-	int TOKENS = 5;
-	int PARSER = 6;
-	int LEXER = 7;
-	int RULE = 8;
-	int BLOCK = 9;
-	int OPTIONAL = 10;
-	int CLOSURE = 11;
-	int POSITIVE_CLOSURE = 12;
-	int SYNPRED = 13;
-	int RANGE = 14;
-	int CHAR_RANGE = 15;
-	int EPSILON = 16;
-	int ALT = 17;
-	int EOR = 18;
-	int EOB = 19;
-	int EOA = 20;
-	int ID = 21;
-	int ARG = 22;
-	int ARGLIST = 23;
-	int RET = 24;
-	int LEXER_GRAMMAR = 25;
-	int PARSER_GRAMMAR = 26;
-	int TREE_GRAMMAR = 27;
-	int COMBINED_GRAMMAR = 28;
-	int INITACTION = 29;
-	int LABEL = 30;
-	int TEMPLATE = 31;
-	int SCOPE = 32;
-	int GATED_SEMPRED = 33;
-	int SYN_SEMPRED = 34;
-	int BACKTRACK_SEMPRED = 35;
-	int FRAGMENT = 36;
-	int ACTION = 37;
-	int DOC_COMMENT = 38;
-	int SEMI = 39;
-	int LITERAL_lexer = 40;
-	int LITERAL_tree = 41;
-	int LITERAL_grammar = 42;
-	int AMPERSAND = 43;
-	int COLON = 44;
-	int RCURLY = 45;
-	int ASSIGN = 46;
-	int STRING_LITERAL = 47;
-	int CHAR_LITERAL = 48;
-	int INT = 49;
-	int STAR = 50;
-	int TOKEN_REF = 51;
-	int LITERAL_protected = 52;
-	int LITERAL_public = 53;
-	int LITERAL_private = 54;
-	int BANG = 55;
-	int ARG_ACTION = 56;
-	int LITERAL_returns = 57;
-	int LITERAL_throws = 58;
-	int COMMA = 59;
-	int LPAREN = 60;
-	int OR = 61;
-	int RPAREN = 62;
-	int LITERAL_catch = 63;
-	int LITERAL_finally = 64;
-	int PLUS_ASSIGN = 65;
-	int SEMPRED = 66;
-	int IMPLIES = 67;
-	int ROOT = 68;
-	int RULE_REF = 69;
-	int NOT = 70;
-	int TREE_BEGIN = 71;
-	int QUESTION = 72;
-	int PLUS = 73;
-	int WILDCARD = 74;
-	int REWRITE = 75;
-	int DOLLAR = 76;
-	int DOUBLE_QUOTE_STRING_LITERAL = 77;
-	int DOUBLE_ANGLE_STRING_LITERAL = 78;
-	int WS = 79;
-	int COMMENT = 80;
-	int SL_COMMENT = 81;
-	int ML_COMMENT = 82;
-	int OPEN_ELEMENT_OPTION = 83;
-	int CLOSE_ELEMENT_OPTION = 84;
-	int ESC = 85;
-	int DIGIT = 86;
-	int XDIGIT = 87;
-	int NESTED_ARG_ACTION = 88;
-	int NESTED_ACTION = 89;
-	int ACTION_CHAR_LITERAL = 90;
-	int ACTION_STRING_LITERAL = 91;
-	int ACTION_ESC = 92;
-	int WS_LOOP = 93;
-	int INTERNAL_RULE_REF = 94;
-	int WS_OPT = 95;
-	int SRC = 96;
-}
diff --git a/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.txt b/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.txt
deleted file mode 100644
index b600f94..0000000
--- a/src/org/antlr/codegen/CodeGenTreeWalkerTokenTypes.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): codegen.g -> CodeGenTreeWalkerTokenTypes.txt$
-CodeGenTreeWalker    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
diff --git a/src/org/antlr/codegen/templates/C/Dbg.stg b/src/org/antlr/codegen/templates/C/Dbg.stg
deleted file mode 100644
index 0121588..0000000
--- a/src/org/antlr/codegen/templates/C/Dbg.stg
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template overrides to add debugging to normal Java output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-group CDbg;
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-import org.antlr.runtime.debug.*;
->>
-
- at genericParser.members() ::= <<
-public static final String[] ruleNames = new String[] {
-    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
-};<\n>
-public int ruleLevel = 0;
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-public <name>(<inputStreamType> input) {
-<if(profile)>
-        this(input, new Profiler(null));
-        Profiler p = (Profiler)dbg;
-        p.setParser(this);
-<else>
-        super(input);
-<endif><\n>
-<if(memoize)>
-        ruleMemo = new Map[<numRules>+1];<\n><! index from 1..n !>
-<endif>
-}
-<if(profile)>
-public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-    ((Profiler)dbg).examineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
-    return super.alreadyParsedRule(input, ruleIndex);
-}<\n>
-public void memoize(IntStream input,
-                    int ruleIndex,
-                    int ruleStartIndex)
-{
-    ((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
-    super.memoize(input, ruleIndex, ruleStartIndex);
-}<\n>
-<endif>
-public <name>(<inputStreamType> input, DebugEventListener dbg) {
-    super(input, dbg);
-}<\n>
-protected boolean evalPredicate(boolean result, String predicate) {
-    dbg.semanticPredicate(result, predicate);
-    return result;
-}<\n>
->>
-
- at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
-
- at rule.preamble() ::= <<
-try { dbg.enterRule("<ruleName>");
-if ( ruleLevel==0 ) {dbg.commence();}
-ruleLevel++;
-dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
->>
-
- at rule.postamble() ::= <<
-dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
-}
-finally {
-    dbg.exitRule("<ruleName>");
-    ruleLevel--;
-    if ( ruleLevel==0 ) {dbg.terminate();}
-}<\n>
->>
-
- at synpred.start() ::= "dbg.beginBacktrack(backtracking);"
-
- at synpred.stop() ::= "dbg.endBacktrack(backtracking, success);"
-
-// Common debug event triggers used by region overrides below
-
-enterSubRule() ::=
-    "try { dbg.enterSubRule(<decisionNumber>);<\n>"
-
-exitSubRule() ::=
-    "} finally {dbg.exitSubRule(<decisionNumber>);}<\n>"
-
-enterDecision() ::=
-    "try { dbg.enterDecision(<decisionNumber>);<\n>"
-
-exitDecision() ::=
-    "} finally {dbg.exitDecision(<decisionNumber>);}<\n>"
-
-enterAlt(n) ::= "dbg.enterAlt(<n>);<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
- at block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
- at block.postdecision() ::= "<exitDecision()>"
-
- at block.postbranch() ::= "<exitSubRule()>"
-
- at ruleBlock.predecision() ::= "<enterDecision()>"
-
- at ruleBlock.postdecision() ::= "<exitDecision()>"
-
- at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
- at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
- at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
- at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
- at positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
- at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
- at positiveClosureBlock.earlyExitException() ::=
-    "dbg.recognitionException(eee);<\n>"
-
- at closureBlock.preloop() ::= "<enterSubRule()>"
-
- at closureBlock.postloop() ::= "<exitSubRule()>"
-
- at closureBlock.predecision() ::= "<enterDecision()>"
-
- at closureBlock.postdecision() ::= "<exitDecision()>"
-
- at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
-
- at element.prematch() ::=
-    "dbg.location(<it.line>,<it.pos>);"
-
- at matchSet.mismatchedSetException() ::=
-    "dbg.recognitionException(mse);"
-
- at dfaState.noViableAltException() ::= "dbg.recognitionException(nvae);"
-
- at dfaStateSwitch.noViableAltException() ::= "dbg.recognitionException(nvae);"
-
-dfaDecision(decisionNumber,description) ::= <<
-try {
-    isCyclicDecision = true;
-    <super.dfaDecision(...)>
-}
-catch (NoViableAltException nvae) {
-    dbg.recognitionException(nvae);
-    throw nvae;
-}
->>
-
- at cyclicDFA.errorMethod() ::= <<
-public void error(NoViableAltException nvae) {
-    dbg.recognitionException(nvae);
-}
->>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-evalPredicate(<pred>,"<description>")
->>
diff --git a/src/org/antlr/codegen/templates/Python/AST.stg b/src/org/antlr/codegen/templates/Python/AST.stg
deleted file mode 100644
index 2125271..0000000
--- a/src/org/antlr/codegen/templates/Python/AST.stg
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* in sync with Java/AST.stg revision 36 */
-
-group AST;
-
-finishedBacktracking(block) ::= <<
-<if(backtracking)>
-if self.backtracking == 0:
-    <block>
-<else>
-<block>
-<endif>
->>
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-<if(!TREE_PARSER)><! tree parser would already have imported !>
-from antlr3.tree import *<\n>
-<endif>
->>
-
- at genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-self.adaptor = CommonTreeAdaptor()<\n>
->>
-
- at returnScope.ruleReturnMembers() ::= <<
-self.tree = None
->>
-
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-root_0 = None<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<ruleDescriptor.tokenLabels:{<it.label.text>_tree = None}; separator="\n">
-<ruleDescriptor.tokenListLabels:{<it.label.text>_tree = None}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{stream_<it> = RewriteRuleTokenStream(self.adaptor, "token <it>")}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "rule <it>")}; separator="\n">
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<finishedBacktracking({
-retval.tree = self.adaptor.rulePostProcessing(root_0)
-self.adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)})>
-<endif>
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
- at alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-root_0 = self.adaptor.nil()<\n>
-<else>
-<endif>
-<endif>
->>
-
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-
-<label>_tree = self.adaptor.createWithPayload(<label>)
-self.adaptor.addChild(root_0, <label>_tree)<\n>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<finishedBacktracking({
-<label>_tree = self.adaptor.createWithPayload(<label>)
-root_0 = self.adaptor.becomeRoot(<label>_tree, root_0)})>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefBang(...)>
-<listLabel(...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(...)>
->>
-
-/** ID but track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<finishedBacktracking({stream_<token>.add(<label>)})>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefTrack(...)>
-<listLabel(...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={<finishedBacktracking({self.adaptor.addChild(root_0, self.adaptor.createWithPayload(<label>))})>})>
->>
-
-matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
-
-matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={<finishedBacktracking({<!FIXME(40,untested)!>root_0 = self.adaptor.becomeRoot(self.adaptor.createWithPayload(<label>), root_0)})>})>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<finishedBacktracking({self.adaptor.addChild(root_0, <label>.tree)})>
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<finishedBacktracking({root_0 = self.adaptor.becomeRoot(<label>.tree, root_0)})>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<finishedBacktracking({stream_<rule>.add(<label>.tree)})>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefTrack(...)>
-<listLabel(...)>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefBang(...)>
-<listLabel(...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(...)>
->>
-
-// WILDCARD AST
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<finishedBacktracking({
-<label>_tree = self.adaptor.createWithPayload(<label>)
-self.adaptor.addChild(root_0, <label>_tree)})>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<finishedBacktracking({
-<label>_tree = self.adaptor.createWithPayload(<label>)
-root_0 = self.adaptor.becomeRoot(<label>_tree, root_0)})>
->>
-
-// TODO: ugh, am i really missing the combinations for Track and ListLabel?
-// there's got to be a better way
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-# AST Rewrite
-# elements: <referencedElementsDeep; separator=", ">
-# token labels: <referencedTokenLabels; separator=", ">
-# rule labels: <referencedRuleLabels; separator=", ">
-# token list labels: <referencedTokenListLabels; separator=", ">
-# rule list labels: <referencedRuleListLabels; separator=", ">
-<finishedBacktracking({
-<prevRuleRootRef()>.tree = root_0
-<rewriteCodeLabels()>
-root_0 = self.adaptor.nil()
-<first(alts):rewriteAltFirst(); anchor>
-
-<rest(alts):{a | el<rewriteAltRest(a)>}; anchor, separator="\n\n">})>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{stream_<it> = RewriteRuleTokenStream(self.adaptor, "token <it>", <it>)};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{stream_<it> = RewriteRuleTokenStream(self.adaptor, "token <it>", list_<it>)};
-    separator="\n"
->
-<referencedRuleLabels
-    :{
-if <it> is not None:
-    stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "token <it>", <it>.tree)
-else:
-    stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "token <it>", None)
-};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{stream_<it> = RewriteRuleSubtreeStream(self.adaptor, "token <it>", list_<it>)};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-# <fileName>:<description>
-if <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator=" or ">:
-    <alt>
-
-<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-# <fileName>:<description>
-while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
-    <alt>
-
-<referencedElements:{el | stream_<el>.reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-# <fileName>:<description>
-if not (<referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">):
-    raise RewriteEarlyExitException()
-
-while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
-    <alt>
-
-<referencedElements:{el | stream_<el>.reset()<\n>}>
->>
-
-rewriteAltRest(a) ::= <<
-<if(a.pred)>
-if <a.pred>:
-    # <a.description>
-    <a.alt>
-<else>
-se: <! little hack to get if .. elif .. else block right !>
-    # <a.description>
-    <a.alt>
-<endif>
->>
-
-rewriteAltFirst(a) ::= <<
-<if(a.pred)>
-if <a.pred>:
-    # <a.description>
-    <a.alt>
-<else>
-# <a.description>
-<a.alt>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = self.adaptor.nil()"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-# <fileName>:<description>
-root_<treeLevel> = self.adaptor.nil()
-<root:rewriteElement()>
-<children:rewriteElement()>
-self.adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen()>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,args) ::= <<
-self.adaptor.addChild(root_<treeLevel>, <if(args)>self.adaptor.createFromToken(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>)<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-self.adaptor.addChild(root_<treeLevel>, stream_<label>.next())<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-self.adaptor.addChild(root_<treeLevel>, stream_<label>.next())<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = self.adaptor.becomeRoot(stream_<label>.next(), root_<treeLevel>)<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,args) ::= <<
-root_<treeLevel> = self.adaptor.becomeRoot(<if(args)>self.adaptor.createFromToken(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>, root_<treeLevel>)<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
-<! need to call different adaptor.create*() methods depending of argument count !>
-self.adaptor.addChild(root_<treeLevel>, <if (!args)>self.adaptor.createFromType(<token>, "<token>")
-<else><if (!rest(args))>self.adaptor.createFromToken(<token>, <first(args)>)
-<else><if (!rest(rest(args)))>self.adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
-<endif>
-<endif>
-<endif>)<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
-<! need to call different adaptor.create*() methods depending of argument count !>
-root_<treeLevel> = self.adaptor.becomeRoot(<if (!args)>self.adaptor.createFromType(<token>, "<token>")
-<else><if (!rest(args))>self.adaptor.createFromToken(<token>, <first(args)>)
-<else><if (!rest(rest(args)))>self.adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
-<endif>
-<endif>
-<endif>, root_<treeLevel>)<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-<!FIXME(96,untested)!>
-root_0 = <action><\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-self.adaptor.addChild(root_<treeLevel>, stream_<rule>.next())<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = self.adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>)<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-self.adaptor.addChild(root_<treeLevel>, <action>)<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = self.adaptor.becomeRoot(<action>, root_<treeLevel>)<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-self.adaptor.addChild(root_<treeLevel>, stream_<label>.next())<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-self.adaptor.addChild(root_<treeLevel>, stream_<label>.next().tree)<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = self.adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = self.adaptor.becomeRoot(stream_<label>.nextNode().tree, root_<treeLevel>)<\n>
->>
diff --git a/src/org/antlr/misc/Barrier.java b/src/org/antlr/misc/Barrier.java
deleted file mode 100644
index aa965e0..0000000
--- a/src/org/antlr/misc/Barrier.java
+++ /dev/null
@@ -1,35 +0,0 @@
-package org.antlr.misc;
-
-/**A very simple barrier wait.  Once a thread has requested a
- * wait on the barrier with waitForRelease, it cannot fool the
- * barrier into releasing by "hitting" the barrier multiple times--
- * the thread is blocked on the wait().
- */
-public class Barrier {
-    protected int threshold;
-    protected int count = 0;
-
-    public Barrier(int t) {
-        threshold = t;
-    }
-
-    public synchronized void waitForRelease()
-        throws InterruptedException
-    {
-        count++;
-        // The final thread to reach barrier resets barrier and
-        // releases all threads
-        if ( count==threshold ) {
-            // notify blocked threads that threshold has been reached
-            action(); // perform the requested operation
-            notifyAll();
-        }
-        else while ( count<threshold ) {
-            wait();
-        }
-    }
-
-    /** What to do when everyone reaches barrier */
-    public void action() {
-    }
-}
diff --git a/src/org/antlr/misc/Interval.java b/src/org/antlr/misc/Interval.java
deleted file mode 100644
index 76589ed..0000000
--- a/src/org/antlr/misc/Interval.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.misc;
-
-/** An immutable inclusive interval a..b */
-public class Interval {
-	public static final int INTERVAL_POOL_MAX_VALUE = 1000;
-	static Interval[] intervals = new Interval[INTERVAL_POOL_MAX_VALUE+1];
-
-    public int a;
-    public int b;
-
-    public Interval(int a, int b) { this.a=a; this.b=b; }
-
-	/** Interval objects are used readonly so share all with the
-	 *  same single value a==b up to some max size.  Use an array as a perfect hash.
-	 *  Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new
-	 *  Interval object with a..a in it.  On Java.g, 218623 IntervalSets
-	 *  have a..a (set with 1 element).
-	public static Interval create(int a, int b) {
-		if ( a!=b || a<0 || a>INTERVAL_POOL_MAX_VALUE ) {
-			return new Interval(a,b);
-		}
-		if ( intervals[a]==null ) {
-			intervals[a] = new Interval(a,a);
-		}
-		return intervals[a];
-	}
-	 ACK!  Fuond out that add() actually modifies intervals. :(
-	 */
-
-	public static Interval create(int a, int b) { return new Interval(a,b); }
-
-	public boolean equals(Object o) {
-		if ( o==null ) {
-			return false;
-		}
-        Interval other = (Interval)o;
-        return this.a==other.a && this.b==other.b;
-    }
-
-    /** Does this start completely before other? Disjoint */
-    public boolean startsBeforeDisjoint(Interval other) {
-        return this.a<other.a && this.b<other.a;
-    }
-
-    /** Does this start at or before other? Nondisjoint */
-    public boolean startsBeforeNonDisjoint(Interval other) {
-        return this.a<=other.a && this.b>=other.a;
-    }
-
-    /** Does this.a start after other.b? May or may not be disjoint */
-    public boolean startsAfter(Interval other) { return this.a>other.a; }
-
-    /** Does this start completely after other? Disjoint */
-    public boolean startsAfterDisjoint(Interval other) {
-        return this.a>other.b;
-    }
-
-    /** Does this start after other? NonDisjoint */
-    public boolean startsAfterNonDisjoint(Interval other) {
-        return this.a>other.a && this.a<=other.b; // this.b>=other.b implied
-    }
-
-    /** Are both ranges disjoint? I.e., no overlap? */
-    public boolean disjoint(Interval other) {
-        return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
-    }
-
-    /** Are two intervals adjacent such as 0..41 and 42..42? */
-    public boolean adjacent(Interval other) {
-        return this.a == other.b+1 || this.b == other.a-1;
-    }
-
-    public boolean properlyContains(Interval other) {
-        return other.a >= this.a && other.b <= this.b;
-    }
-
-    /** Return the interval computed from combining this and other */
-    public Interval union(Interval other) {
-        return new Interval(Math.min(a,other.a), Math.max(b,other.b));
-    }
-
-    /** Return the interval in common between this and o */
-    public Interval intersection(Interval other) {
-        return new Interval(Math.max(a,other.a), Math.min(b,other.b));
-    }
-
-    /** Return the interval with elements from this not in other;
-     *  other must not be totally enclosed (properly contained)
-     *  within this, which would result in two disjoint intervals
-     *  instead of the single one returned by this method.
-     */
-    public Interval differenceNotProperlyContained(Interval other) {
-        Interval diff = null;
-        // other.a to left of this.a (or same)
-        if ( other.startsBeforeNonDisjoint(this) ) {
-            diff = new Interval(Math.max(this.a,other.b+1),
-                                this.b);
-        }
-
-        // other.a to right of this.a
-        else if ( other.startsAfterNonDisjoint(this) ) {
-            diff = new Interval(this.a, other.a-1);
-        }
-        return diff;
-    }
-
-    public String toString() {
-        return a+".."+b;
-    }
-}
diff --git a/src/org/antlr/misc/MutableInteger.java b/src/org/antlr/misc/MutableInteger.java
deleted file mode 100644
index ae80407..0000000
--- a/src/org/antlr/misc/MutableInteger.java
+++ /dev/null
@@ -1,15 +0,0 @@
-package org.antlr.misc;
-
-/** Java won't let you modify an Integer; not sure how that's more
- *  efficient, but...here's one that let's you modify it.
- *  Frightening I have to implement this myself. Blech.
- */
-public class MutableInteger {
-	public int value;
-	public MutableInteger() {
-		this(0);
-	}
-	public MutableInteger(int value) {
-		this.value = value;
-	}
-}
diff --git a/src/org/antlr/test/BaseTest.java b/src/org/antlr/test/BaseTest.java
deleted file mode 100644
index f6903ee..0000000
--- a/src/org/antlr/test/BaseTest.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.test;
-
-import junit.framework.TestCase;
-import org.antlr.Tool;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Message;
-
-import java.io.*;
-import java.util.ArrayList;
-import java.util.List;
-
-public abstract class BaseTest extends TestCase {
-
-	public static final String jikes = null;//"/usr/bin/jikes";
-	public static final String pathSep = System.getProperty("path.separator");
-	public static final String CLASSPATH = System.getProperty("java.class.path");
-	public static final String tmpdir = new File(System.getProperty("java.io.tmpdir"), "antlr3").getAbsolutePath();
-
-	/** If error during execution, store stderr here */
-	protected String stderr;
-
-	protected Tool newTool() {
-		Tool tool = new Tool();
-		tool.setOutputDirectory(tmpdir);
-		return tool;
-	}
-
-	protected boolean compile(String fileName) {
-		String compiler = "javac";
-		String classpathOption = "-classpath";
-
-		if (jikes!=null) {
-			compiler = jikes;
-			classpathOption = "-bootclasspath";
-		}
-
-		String[] args = new String[] {
-					compiler, "-d", tmpdir,
-					classpathOption, tmpdir+pathSep+CLASSPATH,
-					tmpdir+"/"+fileName
-		};
-		String cmdLine = compiler+" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName;
-		//System.out.println("compile: "+cmdLine);
-		File outputDir = new File(tmpdir);
-		try {
-			Process process =
-				Runtime.getRuntime().exec(args, null, outputDir);
-			StreamVacuum stdout = new StreamVacuum(process.getInputStream());
-			StreamVacuum stderr = new StreamVacuum(process.getErrorStream());
-			stdout.start();
-			stderr.start();
-			process.waitFor();
-			if ( stdout.toString().length()>0 ) {
-				System.err.println("compile stderr from: "+cmdLine);
-				System.err.println(stdout);
-			}
-			if ( stderr.toString().length()>0 ) {
-				System.err.println("compile stderr from: "+cmdLine);
-				System.err.println(stderr);
-			}
-			int ret = process.exitValue();
-			return ret==0;
-		}
-		catch (Exception e) {
-			System.err.println("can't exec compilation");
-			e.printStackTrace(System.err);
-			return false;
-		}
-	}
-
-	/** Return true if all is ok, no errors */
-	protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) {
-		boolean allIsWell = true;
-		mkdir(tmpdir);
-		writeFile(tmpdir, fileName, grammarStr);
-		try {
-			final List options = new ArrayList();
-			if ( debug ) {
-				options.add("-debug");
-			}
-			options.add("-o");
-			options.add(tmpdir);
-			options.add("-lib");
-			options.add(tmpdir);
-			options.add(new File(tmpdir,grammarFileName).toString());
-			final String[] optionsA = new String[options.size()];
-			options.toArray(optionsA);
-			final ErrorQueue equeue = new ErrorQueue();
-			ErrorManager.setErrorListener(equeue);
-			Tool antlr = new Tool(optionsA);
-			antlr.process();
-			if ( equeue.errors.size()>0 ) {
-				allIsWell = false;
-				System.err.println("antlr reports errors from "+options);
-				for (int i = 0; i < equeue.errors.size(); i++) {
-					Message msg = (Message) equeue.errors.get(i);
-					System.err.println(msg);
-				}
-			}
-		}
-		catch (Exception e) {
-			allIsWell = false;
-			System.err.println("problems building grammar: "+e);
-			e.printStackTrace(System.err);
-		}
-		return allIsWell;
-	}
-
-	protected String execParser(String grammarFileName,
-									String grammarStr,
-									String parserName,
-									String lexerName,
-									String startRuleName,
-									String input, boolean debug)
-	{
-		eraseFiles(".class");
-		eraseFiles(".java");
-
-		rawGenerateAndBuildRecognizer(grammarFileName,
-									  grammarStr,
-									  parserName,
-									  lexerName,
-									  debug);
-		writeFile(tmpdir, "input", input);
-		boolean parserBuildsTrees =
-			grammarStr.indexOf("output=AST")>=0 ||
-			grammarStr.indexOf("output = AST")>=0;
-		boolean parserBuildsTemplate =
-			grammarStr.indexOf("output=template")>=0 ||
-			grammarStr.indexOf("output = template")>=0;
-		return rawExecRecognizer(parserName,
-								 null,
-								 lexerName,
-								 startRuleName,
-								 null,
-								 parserBuildsTrees,
-								 parserBuildsTemplate,
-								 debug);
-	}
-
-	protected String execTreeParser(String parserGrammarFileName,
-										String parserGrammarStr,
-										String parserName,
-										String treeParserGrammarFileName,
-										String treeParserGrammarStr,
-										String treeParserName,
-										String lexerName,
-										String parserStartRuleName,
-										String treeParserStartRuleName,
-										String input)
-	{
-		return execTreeParser(parserGrammarFileName,
-							  parserGrammarStr,
-							  parserName,
-							  treeParserGrammarFileName,
-							  treeParserGrammarStr,
-							  treeParserName,
-							  lexerName,
-							  parserStartRuleName,
-							  treeParserStartRuleName,
-							  input,
-							  false);
-	}
-
-	protected String execTreeParser(String parserGrammarFileName,
-										String parserGrammarStr,
-										String parserName,
-										String treeParserGrammarFileName,
-										String treeParserGrammarStr,
-										String treeParserName,
-										String lexerName,
-										String parserStartRuleName,
-										String treeParserStartRuleName,
-										String input,
-										boolean debug)
-	{
-		eraseFiles(".class");
-		eraseFiles(".java");
-
-		// build the parser
-		rawGenerateAndBuildRecognizer(parserGrammarFileName,
-									  parserGrammarStr,
-									  parserName,
-									  lexerName,
-									  debug);
-
-		// build the tree parser
-		rawGenerateAndBuildRecognizer(treeParserGrammarFileName,
-									  treeParserGrammarStr,
-									  treeParserName,
-									  lexerName,
-									  debug);
-
-		writeFile(tmpdir, "input", input);
-
-		boolean parserBuildsTrees = parserGrammarStr.indexOf("output=AST")>=0;
-		boolean parserBuildsTemplate = parserGrammarStr.indexOf("output=template")>=0;
-
-		return rawExecRecognizer(parserName,
-								 treeParserName,
-								 lexerName,
-								 parserStartRuleName,
-								 treeParserStartRuleName,
-								 parserBuildsTrees,
-								 parserBuildsTemplate,
-								 debug);
-	}
-
-	/** Return true if all is well */
-	protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
-													String grammarStr,
-													String parserName,
-													String lexerName,
-													boolean debug)
-	{
-		boolean allIsWell =
-			antlr(grammarFileName, grammarFileName, grammarStr, debug);
-		if ( lexerName!=null ) {
-			boolean ok;
-			if ( parserName!=null ) {
-				ok = compile(parserName+".java");
-				if ( !ok ) { allIsWell = false; }
-			}
-			ok = compile(lexerName+".java");
-			if ( !ok ) { allIsWell = false; }
-		}
-		else {
-			boolean ok = compile(parserName+".java");
-			if ( !ok ) { allIsWell = false; }
-		}
-		return allIsWell;
-	}
-
-	protected String rawExecRecognizer(String parserName,
-											  String treeParserName,
-											  String lexerName,
-											  String parserStartRuleName,
-											  String treeParserStartRuleName,
-											  boolean parserBuildsTrees,
-											  boolean parserBuildsTemplate,
-											  boolean debug)
-	{
-		if ( parserBuildsTrees ) {
-			writeTreeTestFile(parserName,
-							  treeParserName,
-							  lexerName,
-							  parserStartRuleName,
-							  treeParserStartRuleName,
-							  debug);
-		}
-		else if ( parserBuildsTemplate ) {
-			writeTemplateTestFile(parserName,
-								  lexerName,
-								  parserStartRuleName,
-								  debug);
-		}
-		else {
-			writeTestFile(parserName,
-						  lexerName,
-						  parserStartRuleName,
-						  debug);
-		}
-
-		compile("Test.java");
-		try {
-			String[] args = new String[] {
-				"java", "-classpath", CLASSPATH+pathSep+tmpdir,
-				"Test", new File(tmpdir, "input").getAbsolutePath()
-			};
-			String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath();
-			//System.out.println("execParser: "+cmdLine);
-			this.stderr = null;
-			Process process =
-				Runtime.getRuntime().exec(args, null, new File(tmpdir));
-			StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream());
-			StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream());
-			stdoutVacuum.start();
-			stderrVacuum.start();
-			process.waitFor();
-			stdoutVacuum.join();
-			stderrVacuum.join();
-			String output = null;
-			output = stdoutVacuum.toString();
-			if ( stderrVacuum.toString().length()>0 ) {
-				this.stderr = stderrVacuum.toString();
-				System.err.println("exec parser stderrVacuum: "+ stderrVacuum);
-			}
-			return output;
-		}
-		catch (Exception e) {
-			System.err.println("can't exec parser");
-			e.printStackTrace(System.err);
-		}
-		return null;
-	}
-
-	public static class StreamVacuum implements Runnable {
-		StringBuffer buf = new StringBuffer();
-		BufferedReader in;
-		Thread sucker;
-		public StreamVacuum(InputStream in) {
-			this.in = new BufferedReader( new InputStreamReader(in) );
-		}
-		public void start() {
-			sucker = new Thread(this);
-			sucker.start();
-		}
-		public void run() {
-			try {
-				String line = in.readLine();
-				while (line!=null) {
-					buf.append(line);
-					buf.append('\n');
-					line = in.readLine();
-				}
-			}
-			catch (IOException ioe) {
-				System.err.println("can't read output from process");
-			}
-		}
-		/** wait for the thread to finish */
-		public void join() throws InterruptedException {
-			sucker.join();
-		}
-		public String toString() {
-			return buf.toString();
-		}
-	}
-
-	protected void writeFile(String dir, String fileName, String content) {
-		try {
-			File f = new File(dir, fileName);
-			FileWriter w = new FileWriter(f);
-			BufferedWriter bw = new BufferedWriter(w);
-			bw.write(content);
-			bw.close();
-			w.close();
-		}
-		catch (IOException ioe) {
-			System.err.println("can't write file");
-			ioe.printStackTrace(System.err);
-		}
-	}
-
-	protected void mkdir(String dir) {
-		File f = new File(dir);
-		f.mkdirs();
-	}
-
-	protected void writeTestFile(String parserName,
-									 String lexerName,
-									 String parserStartRuleName,
-									 boolean debug)
-	{
-		StringTemplate outputFileST = new StringTemplate(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.runtime.tree.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        $lexerName$ lex = new $lexerName$(input);\n" +
-			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
-			"        $createParser$\n"+
-			"        parser.$parserStartRuleName$();\n" +
-			"    }\n" +
-			"}"
-			);
-		StringTemplate createParserST =
-			new StringTemplate(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new StringTemplate(
-				"        $parserName$ parser = new $parserName$(tokens);\n");
-		}
-		outputFileST.setAttribute("createParser", createParserST);
-		outputFileST.setAttribute("parserName", parserName);
-		outputFileST.setAttribute("lexerName", lexerName);
-		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.toString());
-	}
-
-	protected void writeTreeTestFile(String parserName,
-										 String treeParserName,
-										 String lexerName,
-										 String parserStartRuleName,
-										 String treeParserStartRuleName,
-										 boolean debug)
-	{
-		StringTemplate outputFileST = new StringTemplate(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.runtime.tree.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        $lexerName$ lex = new $lexerName$(input);\n" +
-			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
-			"        $createParser$\n"+
-			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
-			"        $if(!treeParserStartRuleName)$\n" +
-			"        if ( r.tree!=null )\n" +
-			"            System.out.println(((Tree)r.tree).toStringTree());\n" +
-			"        $else$\n" +
-			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
-			"        nodes.setTokenStream(tokens);\n" +
-			"        $treeParserName$ walker = new $treeParserName$(nodes);\n" +
-			"        walker.$treeParserStartRuleName$();\n" +
-			"        $endif$\n" +
-			"    }\n" +
-			"}"
-			);
-		StringTemplate createParserST =
-			new StringTemplate(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new StringTemplate(
-				"        $parserName$ parser = new $parserName$(tokens);\n");
-		}
-		outputFileST.setAttribute("createParser", createParserST);
-		outputFileST.setAttribute("parserName", parserName);
-		outputFileST.setAttribute("treeParserName", treeParserName);
-		outputFileST.setAttribute("lexerName", lexerName);
-		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
-		outputFileST.setAttribute("treeParserStartRuleName", treeParserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.toString());
-	}
-
-	protected void writeTemplateTestFile(String parserName,
-											 String lexerName,
-											 String parserStartRuleName,
-											 boolean debug)
-	{
-		StringTemplate outputFileST = new StringTemplate(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.stringtemplate.*;\n" +
-			"import org.antlr.stringtemplate.language.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"import java.io.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    static String templates =\n" +
-			"    		\"group test;\"+" +
-			"    		\"foo(x,y) ::= \\\"<x> <y>\\\"\";\n"+
-			"    static StringTemplateGroup group ="+
-			"    		new StringTemplateGroup(new StringReader(templates)," +
-			"					AngleBracketTemplateLexer.class);"+
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        $lexerName$ lex = new $lexerName$(input);\n" +
-			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
-			"        $createParser$\n"+
-			"		 parser.setTemplateLib(group);\n"+
-			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
-			"        if ( r.st!=null )\n" +
-			"            System.out.print(r.st.toString());\n" +
-			"	 	 else\n" +
-			"            System.out.print(\"\");\n" +
-			"    }\n" +
-			"}"
-			);
-		StringTemplate createParserST =
-			new StringTemplate(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new StringTemplate(
-				"        $parserName$ parser = new $parserName$(tokens);\n");
-		}
-		outputFileST.setAttribute("createParser", createParserST);
-		outputFileST.setAttribute("parserName", parserName);
-		outputFileST.setAttribute("lexerName", lexerName);
-		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.toString());
-	}
-
-	protected void eraseFiles(final String filesEndingWith) {
-		File tmpdirF = new File(tmpdir);
-		String[] files = tmpdirF.list();
-		for(int i = 0; files!=null && i < files.length; i++) {
-			if ( files[i].endsWith(filesEndingWith) ) {
-        		new File(tmpdir+"/"+files[i]).delete();
-			}
-		}
-	}
-
-	public String getFirstLineOfException() {
-		if ( this.stderr==null ) {
-			return null;
-		}
-		String[] lines = this.stderr.split("\n");
-		String prefix="Exception in thread \"main\" ";
-		return lines[0].substring(prefix.length(),lines[0].length());
-	}
-}
diff --git a/src/org/antlr/test/DebugTestRewriteAST.java b/src/org/antlr/test/DebugTestRewriteAST.java
deleted file mode 100644
index 156edeb..0000000
--- a/src/org/antlr/test/DebugTestRewriteAST.java
+++ /dev/null
@@ -1,6 +0,0 @@
-package org.antlr.test;
-
-public class DebugTestRewriteAST extends TestRewriteAST {
-	public DebugTestRewriteAST() {debug=true;}
-}
-
diff --git a/src/org/antlr/test/ErrorQueue.java b/src/org/antlr/test/ErrorQueue.java
deleted file mode 100644
index c75e900..0000000
--- a/src/org/antlr/test/ErrorQueue.java
+++ /dev/null
@@ -1,41 +0,0 @@
-package org.antlr.test;
-
-import org.antlr.tool.ANTLRErrorListener;
-import org.antlr.tool.Message;
-import org.antlr.tool.ToolMessage;
-
-import java.util.List;
-import java.util.LinkedList;
-
-public class ErrorQueue implements ANTLRErrorListener {
-	List infos = new LinkedList();
-	List errors = new LinkedList();
-	List warnings = new LinkedList();
-
-	public void info(String msg) {
-		infos.add(msg);
-	}
-
-	public void error(Message msg) {
-		errors.add(msg);
-	}
-
-	public void warning(Message msg) {
-		warnings.add(msg);
-	}
-
-	public void error(ToolMessage msg) {
-		errors.add(msg);
-	}
-
-	public int size() {
-		return infos.size() + errors.size() + warnings.size();
-	}
-
-	public String toString() {
-		return "infos: "+infos+
-			"errors: "+errors+
-			"warnings: "+warnings;
-	}
-}
-
diff --git a/src/org/antlr/test/TestCommonTreeNodeStream.java b/src/org/antlr/test/TestCommonTreeNodeStream.java
deleted file mode 100644
index b191194..0000000
--- a/src/org/antlr/test/TestCommonTreeNodeStream.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.test;
-
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.antlr.runtime.tree.Tree;
-
-/** Tests specific to CommonTreeNodeStream */
-public class TestCommonTreeNodeStream extends TestTreeNodeStream {
-	public void testPushPop() throws Exception {
-		// ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
-		// stream has 9 real + 8 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		r0.addChild(r1);
-		Tree r2 = new CommonTree(new CommonToken(104));
-		r2.addChild(new CommonTree(new CommonToken(105)));
-		r0.addChild(r2);
-		Tree r3 = new CommonTree(new CommonToken(106));
-		r3.addChild(new CommonTree(new CommonToken(107)));
-		r0.addChild(r3);
-		r0.addChild(new CommonTree(new CommonToken(108)));
-		r0.addChild(new CommonTree(new CommonToken(109)));
-
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
-		String expecting = " 101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3";
-		String found = stream.toString();
-		assertEquals(expecting, found);
-
-		// Assume we want to hit node 107 and then "call 102" then return
-
-		int indexOf102 = 2;
-		int indexOf107 = 12;
-		for (int k=1; k<=indexOf107; k++) { // consume til 107 node
-			stream.consume();
-		}
-		// CALL 102
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		stream.push(indexOf102);
-		assertEquals(102, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 102
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume DN
-		assertEquals(103, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 103
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		// RETURN
-		stream.pop();
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-	}
-
-	public void testNestedPushPop() throws Exception {
-		// ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
-		// stream has 9 real + 8 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		r0.addChild(r1);
-		Tree r2 = new CommonTree(new CommonToken(104));
-		r2.addChild(new CommonTree(new CommonToken(105)));
-		r0.addChild(r2);
-		Tree r3 = new CommonTree(new CommonToken(106));
-		r3.addChild(new CommonTree(new CommonToken(107)));
-		r0.addChild(r3);
-		r0.addChild(new CommonTree(new CommonToken(108)));
-		r0.addChild(new CommonTree(new CommonToken(109)));
-
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
-
-		// Assume we want to hit node 107 and then "call 102", which
-		// calls 104, then return
-
-		int indexOf102 = 2;
-		int indexOf107 = 12;
-		for (int k=1; k<=indexOf107; k++) { // consume til 107 node
-			stream.consume();
-		}
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		// CALL 102
-		stream.push(indexOf102);
-		assertEquals(102, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 102
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume DN
-		assertEquals(103, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 103
-
-		// CALL 104
-		int indexOf104 = 6;
-		stream.push(indexOf104);
-		assertEquals(104, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 102
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume DN
-		assertEquals(105, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 103
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		// RETURN (to UP node in 102 subtree)
-		stream.pop();
-
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		// RETURN (to empty stack)
-		stream.pop();
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-	}
-
-	public void testPushPopFromEOF() throws Exception {
-		// ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
-		// stream has 9 real + 8 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		r0.addChild(r1);
-		Tree r2 = new CommonTree(new CommonToken(104));
-		r2.addChild(new CommonTree(new CommonToken(105)));
-		r0.addChild(r2);
-		Tree r3 = new CommonTree(new CommonToken(106));
-		r3.addChild(new CommonTree(new CommonToken(107)));
-		r0.addChild(r3);
-		r0.addChild(new CommonTree(new CommonToken(108)));
-		r0.addChild(new CommonTree(new CommonToken(109)));
-
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
-
-		while ( stream.LA(1)!=Token.EOF ) {
-			stream.consume();
-		}
-		int indexOf102 = 2;
-		int indexOf104 = 6;
-		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
-
-		// CALL 102
-		stream.push(indexOf102);
-		assertEquals(102, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 102
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume DN
-		assertEquals(103, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 103
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		// RETURN (to empty stack)
-		stream.pop();
-		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
-
-		// CALL 104
-		stream.push(indexOf104);
-		assertEquals(104, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 102
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume DN
-		assertEquals(105, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 103
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		// RETURN (to empty stack)
-		stream.pop();
-		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
-	}
-
-	public void testStackStretch() throws Exception {
-		// make more than INITIAL_CALL_STACK_SIZE pushes
-		Tree r0 = new CommonTree(new CommonToken(101));
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
-		// go 1 over initial size
-		for (int i=1; i<=CommonTreeNodeStream.INITIAL_CALL_STACK_SIZE+1; i++) {
-			stream.push(i);
-		}
-		assertEquals(10, stream.pop());
-		assertEquals(9, stream.pop());
-	}
-
-}
diff --git a/src/org/antlr/test/TestMessages.java b/src/org/antlr/test/TestMessages.java
deleted file mode 100644
index e527b04..0000000
--- a/src/org/antlr/test/TestMessages.java
+++ /dev/null
@@ -1,46 +0,0 @@
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.codegen.ActionTranslatorLexer;
-import org.antlr.tool.*;
-
-
-public class TestMessages extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestMessages() {
-	}
-
-
-	public void testMessageStringificationIsConsistent() throws Exception {
-		String action = "$other.tree = null;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-			"options { output = AST;}" +
-			"otherrule\n" +
-			"    : 'y' ;" +
-			"rule\n" +
-			"    : other=otherrule {" + action +"}\n" +
-			"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
-																	"rule",
-																	new antlr.CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
-		Object expectedArg = "other";
-		Object expectedArg2 = "tree";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		String expectedMessageString = expectedMessage.toString();
-		assertEquals(expectedMessageString, expectedMessage.toString());
-	}
-}
diff --git a/src/org/antlr/test/TestTokenRewriteStream.java b/src/org/antlr/test/TestTokenRewriteStream.java
deleted file mode 100644
index 18d1190..0000000
--- a/src/org/antlr/test/TestTokenRewriteStream.java
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.test;
-
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.CharStream;
-import org.antlr.runtime.TokenRewriteStream;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Interpreter;
-
-public class TestTokenRewriteStream extends BaseTest {
-
-    /** Public default constructor used by TestRig */
-    public TestTokenRewriteStream() {
-    }
-
-	public void testInsertBeforeIndex0() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertBefore(0, "0");
-		String result = tokens.toString();
-		String expecting = "0abc";
-		assertEquals(result, expecting);
-	}
-
-	public void testInsertAfterLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertAfter(2, "x");
-		String result = tokens.toString();
-		String expecting = "abcx";
-		assertEquals(result, expecting);
-	}
-
-	public void test2InsertBeforeAfterMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertBefore(1, "x");
-		tokens.insertAfter(1, "x");
-		String result = tokens.toString();
-		String expecting = "axbxc";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceIndex0() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(0, "x");
-		String result = tokens.toString();
-		String expecting = "xbc";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, "x");
-		String result = tokens.toString();
-		String expecting = "abx";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(1, "x");
-		String result = tokens.toString();
-		String expecting = "axc";
-		assertEquals(result, expecting);
-	}
-
-	public void test2ReplaceMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(1, "x");
-		tokens.replace(1, "y");
-		String result = tokens.toString();
-		String expecting = "ayc";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThenDeleteMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(1, "x");
-		tokens.delete(1);
-		String result = tokens.toString();
-		String expecting = "ac";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThenInsertSameIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(0, "x");
-		tokens.insertBefore(0, "0");
-		String result = tokens.toString();
-		String expecting = "0xbc";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThen2InsertSameIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(0, "x");
-		tokens.insertBefore(0, "y");
-		tokens.insertBefore(0, "z");
-		String result = tokens.toString();
-		String expecting = "zyxbc";
-		assertEquals(result, expecting);
-	}
-
-	public void testInsertThenReplaceSameIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertBefore(0, "0");
-		tokens.replace(0, "x");
-		String result = tokens.toString();
-		String expecting = "0xbc";
-		assertEquals(result, expecting);
-	}
-
-	public void test2InsertMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertBefore(1, "x");
-		tokens.insertBefore(1, "y");
-		String result = tokens.toString();
-		String expecting = "ayxbc";
-		assertEquals(result, expecting);
-	}
-
-	public void test2InsertThenReplaceIndex0() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertBefore(0, "x");
-		tokens.insertBefore(0, "y");
-		tokens.replace(0, "z");
-		String result = tokens.toString();
-		String expecting = "yxzbc";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThenInsertBeforeLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, "x");
-		tokens.insertBefore(2, "y");
-		String result = tokens.toString();
-		String expecting = "abyx";
-		assertEquals(result, expecting);
-	}
-
-	public void testInsertThenReplaceLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.insertBefore(2, "y");
-		tokens.replace(2, "x");
-		String result = tokens.toString();
-		String expecting = "abyx";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThenInsertAfterLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, "x");
-		tokens.insertAfter(2, "y");
-		String result = tokens.toString();
-		String expecting = "abxy";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceRangeThenInsertInMiddle() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "x");
-		tokens.insertBefore(3, "y"); // no effect; can't insert in middle of replaced region
-		String result = tokens.toString();
-		String expecting = "abxba";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "x");
-		tokens.insertBefore(2, "y");
-		String result = tokens.toString();
-		String expecting = "abyxba";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "x");
-		tokens.insertBefore(4, "y"); // no effect; within range of a replace
-		String result = tokens.toString();
-		String expecting = "abxba";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "x");
-		tokens.insertAfter(4, "y");
-		String result = tokens.toString();
-		String expecting = "abxyba";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceAll() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(0, 6, "x");
-		String result = tokens.toString();
-		String expecting = "x";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceSubsetThenFetch() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "xyz");
-		String result = tokens.toString(0,6);
-		String expecting = "abxyzba";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThenReplaceSuperset() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "xyz");
-		tokens.replace(2, 5, "foo"); // kills previous replace
-		String result = tokens.toString();
-		String expecting = "abfooa";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 4, "xyz");
-		tokens.replace(1, 3, "foo"); // executes first since 1<2; then ignores replace at 2 as it skips over 1..3
-		String result = tokens.toString();
-		String expecting = "afoocba";
-		assertEquals(result, expecting);
-	}
-
-	public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.LT(1); // fill buffer
-		tokens.replace(2, 2, "xyz");
-		tokens.replace(0, 3, "foo");
-		String result = tokens.toString();
-		String expecting = "fooa";
-		assertEquals(result, expecting);
-	}
-
-}
diff --git a/src/org/antlr/test/TestUnBufferedTreeNodeStream.java b/src/org/antlr/test/TestUnBufferedTreeNodeStream.java
deleted file mode 100644
index 8baaeee..0000000
--- a/src/org/antlr/test/TestUnBufferedTreeNodeStream.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.test;
-
-import org.antlr.runtime.tree.*;
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-
-/**
- * Created by IntelliJ IDEA.
- * User: parrt
- * Date: Dec 22, 2006
- * Time: 11:47:55 AM
- * To change this template use File | Settings | File Templates.
- */
-public class TestUnBufferedTreeNodeStream extends TestTreeNodeStream {
-
-	public TreeNodeStream newStream(Object t) {
-		return new UnBufferedTreeNodeStream(t);
-	}
-
-	public void testBufferOverflow() throws Exception {
-		StringBuffer buf = new StringBuffer();
-		StringBuffer buf2 = new StringBuffer();
-		// make ^(101 102 ... n)
-		Tree t = new CommonTree(new CommonToken(101));
-		buf.append(" 101");
-		buf2.append(" 101");
-		buf2.append(" ");
-		buf2.append(Token.DOWN);
-		for (int i=0; i<= UnBufferedTreeNodeStream.INITIAL_LOOKAHEAD_BUFFER_SIZE+10; i++) {
-			t.addChild(new CommonTree(new CommonToken(102+i)));
-			buf.append(" ");
-			buf.append(102+i);
-			buf2.append(" ");
-			buf2.append(102+i);
-		}
-		buf2.append(" ");
-		buf2.append(Token.UP);
-
-		TreeNodeStream stream = newStream(t);
-		String expecting = buf.toString();
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = buf2.toString();
-		found = stream.toString();
-		assertEquals(expecting, found);
-	}
-
-	/** Test what happens when tail hits the end of the buffer, but there
-	 *  is more room left.  Specifically that would mean that head is not
-	 *  at 0 but has advanced somewhere to the middle of the lookahead
-	 *  buffer.
-	 *
-	 *  Use consume() to advance N nodes into lookahead.  Then use LT()
-	 *  to load at least INITIAL_LOOKAHEAD_BUFFER_SIZE-N nodes so the
-	 *  buffer has to wrap.
-	 */
-	public void testBufferWrap() throws Exception {
-		int N = 10;
-		// make tree with types: 1 2 ... INITIAL_LOOKAHEAD_BUFFER_SIZE+N
-		Tree t = new CommonTree((Token)null);
-		for (int i=0; i<UnBufferedTreeNodeStream.INITIAL_LOOKAHEAD_BUFFER_SIZE+N; i++) {
-			t.addChild(new CommonTree(new CommonToken(i+1)));
-		}
-
-		// move head to index N
-		TreeNodeStream stream = newStream(t);
-		for (int i=1; i<=N; i++) { // consume N
-			Tree node = (Tree)stream.LT(1);
-			assertEquals(i, node.getType());
-			stream.consume();
-		}
-
-		// now use LT to lookahead past end of buffer
-		int remaining = UnBufferedTreeNodeStream.INITIAL_LOOKAHEAD_BUFFER_SIZE-N;
-		int wrapBy = 4; // wrap around by 4 nodes
-		assertTrue("bad test code; wrapBy must be less than N", wrapBy<N);
-		for (int i=1; i<=remaining+wrapBy; i++) { // wrap past end of buffer
-			Tree node = (Tree)stream.LT(i); // look ahead to ith token
-			assertEquals(N + i, node.getType());
-		}
-	}
-
-}
diff --git a/src/org/antlr/tool/ANTLRLexer.java b/src/org/antlr/tool/ANTLRLexer.java
deleted file mode 100644
index 5e94d77..0000000
--- a/src/org/antlr/tool/ANTLRLexer.java
+++ /dev/null
@@ -1,1794 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRLexer.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.tool;
-import java.util.*;
-import java.io.*;
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import antlr.*;
-
-import java.io.InputStream;
-import antlr.TokenStreamException;
-import antlr.TokenStreamIOException;
-import antlr.TokenStreamRecognitionException;
-import antlr.CharStreamException;
-import antlr.CharStreamIOException;
-import antlr.ANTLRException;
-import java.io.Reader;
-import java.util.Hashtable;
-import antlr.CharScanner;
-import antlr.InputBuffer;
-import antlr.ByteBuffer;
-import antlr.CharBuffer;
-import antlr.Token;
-import antlr.CommonToken;
-import antlr.RecognitionException;
-import antlr.NoViableAltForCharException;
-import antlr.MismatchedCharException;
-import antlr.TokenStream;
-import antlr.ANTLRHashString;
-import antlr.LexerSharedInputState;
-import antlr.collections.impl.BitSet;
-import antlr.SemanticException;
-
-public class ANTLRLexer extends antlr.CharScanner implements ANTLRTokenTypes, TokenStream
- {
-
-    /** advance the current column number by one; don't do tabs.
-     *  we want char position in line to be sent to AntlrWorks.
-     */
-    public void tab() {
-		setColumn( getColumn()+1 );
-    }
-public ANTLRLexer(InputStream in) {
-	this(new ByteBuffer(in));
-}
-public ANTLRLexer(Reader in) {
-	this(new CharBuffer(in));
-}
-public ANTLRLexer(InputBuffer ib) {
-	this(new LexerSharedInputState(ib));
-}
-public ANTLRLexer(LexerSharedInputState state) {
-	super(state);
-	caseSensitiveLiterals = true;
-	setCaseSensitive(true);
-	literals = new Hashtable();
-	literals.put(new ANTLRHashString("lexer", this), new Integer(40));
-	literals.put(new ANTLRHashString("scope", this), new Integer(32));
-	literals.put(new ANTLRHashString("finally", this), new Integer(64));
-	literals.put(new ANTLRHashString("throws", this), new Integer(58));
-	literals.put(new ANTLRHashString("fragment", this), new Integer(36));
-	literals.put(new ANTLRHashString("private", this), new Integer(54));
-	literals.put(new ANTLRHashString("grammar", this), new Integer(42));
-	literals.put(new ANTLRHashString("tokens", this), new Integer(5));
-	literals.put(new ANTLRHashString("options", this), new Integer(4));
-	literals.put(new ANTLRHashString("parser", this), new Integer(6));
-	literals.put(new ANTLRHashString("tree", this), new Integer(41));
-	literals.put(new ANTLRHashString("protected", this), new Integer(52));
-	literals.put(new ANTLRHashString("returns", this), new Integer(57));
-	literals.put(new ANTLRHashString("public", this), new Integer(53));
-	literals.put(new ANTLRHashString("catch", this), new Integer(63));
-}
-
-public Token nextToken() throws TokenStreamException {
-	Token theRetToken=null;
-tryAgain:
-	for (;;) {
-		Token _token = null;
-		int _ttype = Token.INVALID_TYPE;
-		resetText();
-		try {   // for char stream error handling
-			try {   // for lexical error handling
-				switch ( LA(1)) {
-				case '\t':  case '\n':  case '\r':  case ' ':
-				{
-					mWS(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '/':
-				{
-					mCOMMENT(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '>':
-				{
-					mCLOSE_ELEMENT_OPTION(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '@':
-				{
-					mAMPERSAND(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case ',':
-				{
-					mCOMMA(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '?':
-				{
-					mQUESTION(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '(':
-				{
-					mLPAREN(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case ')':
-				{
-					mRPAREN(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case ':':
-				{
-					mCOLON(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '*':
-				{
-					mSTAR(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '-':
-				{
-					mREWRITE(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case ';':
-				{
-					mSEMI(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '!':
-				{
-					mBANG(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '|':
-				{
-					mOR(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '~':
-				{
-					mNOT(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '}':
-				{
-					mRCURLY(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '$':
-				{
-					mDOLLAR(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '\'':
-				{
-					mCHAR_LITERAL(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '"':
-				{
-					mDOUBLE_QUOTE_STRING_LITERAL(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '0':  case '1':  case '2':  case '3':
-				case '4':  case '5':  case '6':  case '7':
-				case '8':  case '9':
-				{
-					mINT(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '[':
-				{
-					mARG_ACTION(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case '{':
-				{
-					mACTION(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case 'A':  case 'B':  case 'C':  case 'D':
-				case 'E':  case 'F':  case 'G':  case 'H':
-				case 'I':  case 'J':  case 'K':  case 'L':
-				case 'M':  case 'N':  case 'O':  case 'P':
-				case 'Q':  case 'R':  case 'S':  case 'T':
-				case 'U':  case 'V':  case 'W':  case 'X':
-				case 'Y':  case 'Z':
-				{
-					mTOKEN_REF(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				case 'a':  case 'b':  case 'c':  case 'd':
-				case 'e':  case 'f':  case 'g':  case 'h':
-				case 'i':  case 'j':  case 'k':  case 'l':
-				case 'm':  case 'n':  case 'o':  case 'p':
-				case 'q':  case 'r':  case 's':  case 't':
-				case 'u':  case 'v':  case 'w':  case 'x':
-				case 'y':  case 'z':
-				{
-					mRULE_REF(true);
-					theRetToken=_returnToken;
-					break;
-				}
-				default:
-					if ((LA(1)=='^') && (LA(2)=='(')) {
-						mTREE_BEGIN(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='+') && (LA(2)=='=')) {
-						mPLUS_ASSIGN(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='=') && (LA(2)=='>')) {
-						mIMPLIES(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='.') && (LA(2)=='.')) {
-						mRANGE(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='<') && (LA(2)=='<')) {
-						mDOUBLE_ANGLE_STRING_LITERAL(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='<') && (true)) {
-						mOPEN_ELEMENT_OPTION(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='+') && (true)) {
-						mPLUS(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='=') && (true)) {
-						mASSIGN(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='^') && (true)) {
-						mROOT(true);
-						theRetToken=_returnToken;
-					}
-					else if ((LA(1)=='.') && (true)) {
-						mWILDCARD(true);
-						theRetToken=_returnToken;
-					}
-				else {
-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
-				}
-				}
-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
-				_ttype = _returnToken.getType();
-				_returnToken.setType(_ttype);
-				return _returnToken;
-			}
-			catch (RecognitionException e) {
-				throw new TokenStreamRecognitionException(e);
-			}
-		}
-		catch (CharStreamException cse) {
-			if ( cse instanceof CharStreamIOException ) {
-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
-			}
-			else {
-				throw new TokenStreamException(cse.getMessage());
-			}
-		}
-	}
-}
-
-	public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = WS;
-		int _saveIndex;
-		
-		{
-		switch ( LA(1)) {
-		case ' ':
-		{
-			match(' ');
-			break;
-		}
-		case '\t':
-		{
-			match('\t');
-			break;
-		}
-		case '\n':  case '\r':
-		{
-			{
-			switch ( LA(1)) {
-			case '\r':
-			{
-				match('\r');
-				break;
-			}
-			case '\n':
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-			}
-			}
-			}
-			match('\n');
-			if ( inputState.guessing==0 ) {
-				newline();
-			}
-			break;
-		}
-		default:
-		{
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		}
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = COMMENT;
-		int _saveIndex;
-		Token t=null;
-		
-		{
-		if ((LA(1)=='/') && (LA(2)=='/')) {
-			mSL_COMMENT(false);
-		}
-		else if ((LA(1)=='/') && (LA(2)=='*')) {
-			mML_COMMENT(true);
-			t=_returnToken;
-			if ( inputState.guessing==0 ) {
-				_ttype = t.getType();
-			}
-		}
-		else {
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = SL_COMMENT;
-		int _saveIndex;
-		
-		match("//");
-		{
-		boolean synPredMatched139 = false;
-		if (((LA(1)==' ') && (LA(2)=='$'))) {
-			int _m139 = mark();
-			synPredMatched139 = true;
-			inputState.guessing++;
-			try {
-				{
-				match(" $ANTLR");
-				}
-			}
-			catch (RecognitionException pe) {
-				synPredMatched139 = false;
-			}
-			rewind(_m139);
-inputState.guessing--;
-		}
-		if ( synPredMatched139 ) {
-			match(" $ANTLR ");
-			mSRC(false);
-			{
-			switch ( LA(1)) {
-			case '\r':
-			{
-				match('\r');
-				break;
-			}
-			case '\n':
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-			}
-			}
-			}
-			match('\n');
-		}
-		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
-			{
-			_loop142:
-			do {
-				// nongreedy exit test
-				if ((LA(1)=='\n'||LA(1)=='\r') && (true)) break _loop142;
-				if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-					matchNot(EOF_CHAR);
-				}
-				else {
-					break _loop142;
-				}
-				
-			} while (true);
-			}
-			{
-			switch ( LA(1)) {
-			case '\r':
-			{
-				match('\r');
-				break;
-			}
-			case '\n':
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-			}
-			}
-			}
-			match('\n');
-		}
-		else {
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		
-		}
-		if ( inputState.guessing==0 ) {
-			newline();
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ML_COMMENT;
-		int _saveIndex;
-		
-		match("/*");
-		{
-		if (((LA(1)=='*') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&( LA(2)!='/' )) {
-			match('*');
-			if ( inputState.guessing==0 ) {
-				_ttype = DOC_COMMENT;
-			}
-		}
-		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-		}
-		else {
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		
-		}
-		{
-		_loop148:
-		do {
-			// nongreedy exit test
-			if ((LA(1)=='*') && (LA(2)=='/')) break _loop148;
-			switch ( LA(1)) {
-			case '\r':
-			{
-				match('\r');
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			case '\n':
-			{
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			default:
-				if ((_tokenSet_0.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-					{
-					match(_tokenSet_0);
-					}
-				}
-			else {
-				break _loop148;
-			}
-			}
-		} while (true);
-		}
-		match("*/");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-/** Reset the file and line information; useful when the grammar
- *  has been generated so that errors are shown relative to the
- *  original file like the old C preprocessor used to do.
- */
-	protected final void mSRC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = SRC;
-		int _saveIndex;
-		Token file=null;
-		Token line=null;
-		
-		match("src");
-		match(' ');
-		mACTION_STRING_LITERAL(true);
-		file=_returnToken;
-		match(' ');
-		mINT(true);
-		line=_returnToken;
-		if ( inputState.guessing==0 ) {
-			
-					newline();
-					setFilename(file.getText().substring(1,file.getText().length()-1));
-					setLine(Integer.parseInt(line.getText())-1);  // -1 because SL_COMMENT will increment the line no. KR
-					_ttype = Token.SKIP; // don't let this go to the parser
-					
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mOPEN_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = OPEN_ELEMENT_OPTION;
-		int _saveIndex;
-		
-		match('<');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mCLOSE_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = CLOSE_ELEMENT_OPTION;
-		int _saveIndex;
-		
-		match('>');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mAMPERSAND(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = AMPERSAND;
-		int _saveIndex;
-		
-		match('@');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mCOMMA(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = COMMA;
-		int _saveIndex;
-		
-		match(',');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mQUESTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = QUESTION;
-		int _saveIndex;
-		
-		match('?');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mTREE_BEGIN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = TREE_BEGIN;
-		int _saveIndex;
-		
-		match("^(");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = LPAREN;
-		int _saveIndex;
-		
-		match('(');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = RPAREN;
-		int _saveIndex;
-		
-		match(')');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mCOLON(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = COLON;
-		int _saveIndex;
-		
-		match(':');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mSTAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = STAR;
-		int _saveIndex;
-		
-		match('*');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mPLUS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = PLUS;
-		int _saveIndex;
-		
-		match('+');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ASSIGN;
-		int _saveIndex;
-		
-		match('=');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mPLUS_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = PLUS_ASSIGN;
-		int _saveIndex;
-		
-		match("+=");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mIMPLIES(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = IMPLIES;
-		int _saveIndex;
-		
-		match("=>");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mREWRITE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = REWRITE;
-		int _saveIndex;
-		
-		match("->");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mSEMI(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = SEMI;
-		int _saveIndex;
-		
-		match(';');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mROOT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ROOT;
-		int _saveIndex;
-		
-		match('^');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mBANG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = BANG;
-		int _saveIndex;
-		
-		match('!');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = OR;
-		int _saveIndex;
-		
-		match('|');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mWILDCARD(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = WILDCARD;
-		int _saveIndex;
-		
-		match('.');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mRANGE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = RANGE;
-		int _saveIndex;
-		
-		match("..");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mNOT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = NOT;
-		int _saveIndex;
-		
-		match('~');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mRCURLY(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = RCURLY;
-		int _saveIndex;
-		
-		match('}');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mDOLLAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = DOLLAR;
-		int _saveIndex;
-		
-		match('$');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mCHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = CHAR_LITERAL;
-		int _saveIndex;
-		
-		match('\'');
-		{
-		_loop175:
-		do {
-			switch ( LA(1)) {
-			case '\\':
-			{
-				mESC(false);
-				break;
-			}
-			case '\n':
-			{
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			default:
-				if ((_tokenSet_1.member(LA(1)))) {
-					matchNot('\'');
-				}
-			else {
-				break _loop175;
-			}
-			}
-		} while (true);
-		}
-		match('\'');
-		if ( inputState.guessing==0 ) {
-			
-					StringBuffer s = Grammar.getUnescapedStringFromGrammarStringLiteral(new String(text.getBuffer(),_begin,text.length()-_begin));
-					if ( s.length()>1 ) {
-						_ttype = STRING_LITERAL;
-					}
-					
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ESC;
-		int _saveIndex;
-		
-		match('\\');
-		{
-		if ((LA(1)=='n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('n');
-		}
-		else if ((LA(1)=='r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('r');
-		}
-		else if ((LA(1)=='t') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('t');
-		}
-		else if ((LA(1)=='b') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('b');
-		}
-		else if ((LA(1)=='f') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('f');
-		}
-		else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('"');
-		}
-		else if ((LA(1)=='\'') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('\'');
-		}
-		else if ((LA(1)=='\\') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('\\');
-		}
-		else if ((LA(1)=='>') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			match('>');
-		}
-		else if (((LA(1) >= '0' && LA(1) <= '3')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			{
-			matchRange('0','3');
-			}
-			{
-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				{
-				matchRange('0','9');
-				}
-				{
-				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-					matchRange('0','9');
-				}
-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
-				}
-				else {
-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-				}
-				
-				}
-			}
-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
-			}
-			else {
-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-			}
-			
-			}
-		}
-		else if (((LA(1) >= '4' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			{
-			matchRange('4','7');
-			}
-			{
-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				{
-				matchRange('0','9');
-				}
-			}
-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
-			}
-			else {
-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-			}
-			
-			}
-		}
-		else if ((LA(1)=='u') && (_tokenSet_2.member(LA(2)))) {
-			match('u');
-			mXDIGIT(false);
-			mXDIGIT(false);
-			mXDIGIT(false);
-			mXDIGIT(false);
-		}
-		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-			matchNot(EOF_CHAR);
-		}
-		else {
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mDOUBLE_QUOTE_STRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = DOUBLE_QUOTE_STRING_LITERAL;
-		int _saveIndex;
-		
-		match('"');
-		{
-		_loop178:
-		do {
-			switch ( LA(1)) {
-			case '\\':
-			{
-				_saveIndex=text.length();
-				match('\\');
-				text.setLength(_saveIndex);
-				match('"');
-				break;
-			}
-			case '\n':
-			{
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			default:
-				if ((_tokenSet_3.member(LA(1)))) {
-					matchNot('"');
-				}
-			else {
-				break _loop178;
-			}
-			}
-		} while (true);
-		}
-		match('"');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mDOUBLE_ANGLE_STRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = DOUBLE_ANGLE_STRING_LITERAL;
-		int _saveIndex;
-		
-		match("<<");
-		{
-		_loop181:
-		do {
-			// nongreedy exit test
-			if ((LA(1)=='>') && (LA(2)=='>')) break _loop181;
-			if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-			}
-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				matchNot(EOF_CHAR);
-			}
-			else {
-				break _loop181;
-			}
-			
-		} while (true);
-		}
-		match(">>");
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = XDIGIT;
-		int _saveIndex;
-		
-		switch ( LA(1)) {
-		case '0':  case '1':  case '2':  case '3':
-		case '4':  case '5':  case '6':  case '7':
-		case '8':  case '9':
-		{
-			matchRange('0','9');
-			break;
-		}
-		case 'a':  case 'b':  case 'c':  case 'd':
-		case 'e':  case 'f':
-		{
-			matchRange('a','f');
-			break;
-		}
-		case 'A':  case 'B':  case 'C':  case 'D':
-		case 'E':  case 'F':
-		{
-			matchRange('A','F');
-			break;
-		}
-		default:
-		{
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = DIGIT;
-		int _saveIndex;
-		
-		matchRange('0','9');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = INT;
-		int _saveIndex;
-		
-		{
-		int _cnt195=0;
-		_loop195:
-		do {
-			if (((LA(1) >= '0' && LA(1) <= '9'))) {
-				matchRange('0','9');
-			}
-			else {
-				if ( _cnt195>=1 ) { break _loop195; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
-			}
-			
-			_cnt195++;
-		} while (true);
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ARG_ACTION;
-		int _saveIndex;
-		
-		mNESTED_ARG_ACTION(false);
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mNESTED_ARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = NESTED_ARG_ACTION;
-		int _saveIndex;
-		
-		_saveIndex=text.length();
-		match('[');
-		text.setLength(_saveIndex);
-		{
-		_loop199:
-		do {
-			switch ( LA(1)) {
-			case '[':
-			{
-				mNESTED_ARG_ACTION(false);
-				break;
-			}
-			case '\r':
-			{
-				match('\r');
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			case '\n':
-			{
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			case '"':
-			{
-				mACTION_STRING_LITERAL(false);
-				break;
-			}
-			default:
-				if ((_tokenSet_4.member(LA(1)))) {
-					matchNot(']');
-				}
-			else {
-				break _loop199;
-			}
-			}
-		} while (true);
-		}
-		_saveIndex=text.length();
-		match(']');
-		text.setLength(_saveIndex);
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mACTION_STRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ACTION_STRING_LITERAL;
-		int _saveIndex;
-		
-		match('"');
-		{
-		_loop211:
-		do {
-			switch ( LA(1)) {
-			case '\\':
-			{
-				mACTION_ESC(false);
-				break;
-			}
-			case '\n':
-			{
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			default:
-				if ((_tokenSet_3.member(LA(1)))) {
-					matchNot('"');
-				}
-			else {
-				break _loop211;
-			}
-			}
-		} while (true);
-		}
-		match('"');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ACTION;
-		int _saveIndex;
-		int actionLine=getLine(); int actionColumn = getColumn();
-		
-		mNESTED_ACTION(false);
-		{
-		if ((LA(1)=='?')) {
-			_saveIndex=text.length();
-			match('?');
-			text.setLength(_saveIndex);
-			if ( inputState.guessing==0 ) {
-				_ttype = SEMPRED;
-			}
-		}
-		else {
-		}
-		
-		}
-		if ( inputState.guessing==0 ) {
-			
-						Token t = makeToken(_ttype);
-						String action = new String(text.getBuffer(),_begin,text.length()-_begin);
-						action = action.substring(1,action.length()-1);
-						t.setText(action);
-						t.setLine(actionLine);			// set action line to start
-						t.setColumn(actionColumn);
-						_token = t;
-					
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mNESTED_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = NESTED_ACTION;
-		int _saveIndex;
-		
-		match('{');
-		{
-		_loop205:
-		do {
-			// nongreedy exit test
-			if ((LA(1)=='}') && (true)) break _loop205;
-			if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				{
-				switch ( LA(1)) {
-				case '\r':
-				{
-					match('\r');
-					match('\n');
-					if ( inputState.guessing==0 ) {
-						newline();
-					}
-					break;
-				}
-				case '\n':
-				{
-					match('\n');
-					if ( inputState.guessing==0 ) {
-						newline();
-					}
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-				}
-				}
-				}
-			}
-			else if ((LA(1)=='{') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				mNESTED_ACTION(false);
-			}
-			else if ((LA(1)=='\'') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				mACTION_CHAR_LITERAL(false);
-			}
-			else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
-				mCOMMENT(false);
-			}
-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				mACTION_STRING_LITERAL(false);
-			}
-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
-				matchNot(EOF_CHAR);
-			}
-			else {
-				break _loop205;
-			}
-			
-		} while (true);
-		}
-		match('}');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mACTION_CHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ACTION_CHAR_LITERAL;
-		int _saveIndex;
-		
-		match('\'');
-		{
-		_loop208:
-		do {
-			switch ( LA(1)) {
-			case '\\':
-			{
-				mACTION_ESC(false);
-				break;
-			}
-			case '\n':
-			{
-				match('\n');
-				if ( inputState.guessing==0 ) {
-					newline();
-				}
-				break;
-			}
-			default:
-				if ((_tokenSet_1.member(LA(1)))) {
-					matchNot('\'');
-				}
-			else {
-				break _loop208;
-			}
-			}
-		} while (true);
-		}
-		match('\'');
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mACTION_ESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = ACTION_ESC;
-		int _saveIndex;
-		
-		if ((LA(1)=='\\') && (LA(2)=='\'')) {
-			match("\\'");
-		}
-		else if ((LA(1)=='\\') && (LA(2)=='"')) {
-			match("\\\"");
-		}
-		else if ((LA(1)=='\\') && (_tokenSet_5.member(LA(2)))) {
-			match('\\');
-			{
-			match(_tokenSet_5);
-			}
-		}
-		else {
-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
-		}
-		
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mTOKEN_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = TOKEN_REF;
-		int _saveIndex;
-		
-		matchRange('A','Z');
-		{
-		_loop216:
-		do {
-			switch ( LA(1)) {
-			case 'a':  case 'b':  case 'c':  case 'd':
-			case 'e':  case 'f':  case 'g':  case 'h':
-			case 'i':  case 'j':  case 'k':  case 'l':
-			case 'm':  case 'n':  case 'o':  case 'p':
-			case 'q':  case 'r':  case 's':  case 't':
-			case 'u':  case 'v':  case 'w':  case 'x':
-			case 'y':  case 'z':
-			{
-				matchRange('a','z');
-				break;
-			}
-			case 'A':  case 'B':  case 'C':  case 'D':
-			case 'E':  case 'F':  case 'G':  case 'H':
-			case 'I':  case 'J':  case 'K':  case 'L':
-			case 'M':  case 'N':  case 'O':  case 'P':
-			case 'Q':  case 'R':  case 'S':  case 'T':
-			case 'U':  case 'V':  case 'W':  case 'X':
-			case 'Y':  case 'Z':
-			{
-				matchRange('A','Z');
-				break;
-			}
-			case '_':
-			{
-				match('_');
-				break;
-			}
-			case '0':  case '1':  case '2':  case '3':
-			case '4':  case '5':  case '6':  case '7':
-			case '8':  case '9':
-			{
-				matchRange('0','9');
-				break;
-			}
-			default:
-			{
-				break _loop216;
-			}
-			}
-		} while (true);
-		}
-		_ttype = testLiteralsTable(_ttype);
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	public final void mRULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = RULE_REF;
-		int _saveIndex;
-		
-			int t=0;
-		
-		
-		t=mINTERNAL_RULE_REF(false);
-		if ( inputState.guessing==0 ) {
-			_ttype=t;
-		}
-		{
-		if (( true )&&(t==OPTIONS)) {
-			mWS_LOOP(false);
-			{
-			if ((LA(1)=='{')) {
-				match('{');
-				if ( inputState.guessing==0 ) {
-					_ttype = OPTIONS;
-				}
-			}
-			else {
-			}
-			
-			}
-		}
-		else if (( true )&&(t==TOKENS)) {
-			mWS_LOOP(false);
-			{
-			if ((LA(1)=='{')) {
-				match('{');
-				if ( inputState.guessing==0 ) {
-					_ttype = TOKENS;
-				}
-			}
-			else {
-			}
-			
-			}
-		}
-		else {
-		}
-		
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final int  mINTERNAL_RULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int t;
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = INTERNAL_RULE_REF;
-		int _saveIndex;
-		
-			t = RULE_REF;
-		
-		
-		matchRange('a','z');
-		{
-		_loop226:
-		do {
-			switch ( LA(1)) {
-			case 'a':  case 'b':  case 'c':  case 'd':
-			case 'e':  case 'f':  case 'g':  case 'h':
-			case 'i':  case 'j':  case 'k':  case 'l':
-			case 'm':  case 'n':  case 'o':  case 'p':
-			case 'q':  case 'r':  case 's':  case 't':
-			case 'u':  case 'v':  case 'w':  case 'x':
-			case 'y':  case 'z':
-			{
-				matchRange('a','z');
-				break;
-			}
-			case 'A':  case 'B':  case 'C':  case 'D':
-			case 'E':  case 'F':  case 'G':  case 'H':
-			case 'I':  case 'J':  case 'K':  case 'L':
-			case 'M':  case 'N':  case 'O':  case 'P':
-			case 'Q':  case 'R':  case 'S':  case 'T':
-			case 'U':  case 'V':  case 'W':  case 'X':
-			case 'Y':  case 'Z':
-			{
-				matchRange('A','Z');
-				break;
-			}
-			case '_':
-			{
-				match('_');
-				break;
-			}
-			case '0':  case '1':  case '2':  case '3':
-			case '4':  case '5':  case '6':  case '7':
-			case '8':  case '9':
-			{
-				matchRange('0','9');
-				break;
-			}
-			default:
-			{
-				break _loop226;
-			}
-			}
-		} while (true);
-		}
-		if ( inputState.guessing==0 ) {
-			t = testLiteralsTable(t);
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-		return t;
-	}
-	
-	protected final void mWS_LOOP(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = WS_LOOP;
-		int _saveIndex;
-		
-		{
-		_loop223:
-		do {
-			switch ( LA(1)) {
-			case '\t':  case '\n':  case '\r':  case ' ':
-			{
-				mWS(false);
-				break;
-			}
-			case '/':
-			{
-				mCOMMENT(false);
-				break;
-			}
-			default:
-			{
-				break _loop223;
-			}
-			}
-		} while (true);
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	protected final void mWS_OPT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
-		int _ttype; Token _token=null; int _begin=text.length();
-		_ttype = WS_OPT;
-		int _saveIndex;
-		
-		{
-		if ((_tokenSet_6.member(LA(1)))) {
-			mWS(false);
-		}
-		else {
-		}
-		
-		}
-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
-			_token = makeToken(_ttype);
-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
-		}
-		_returnToken = _token;
-	}
-	
-	
-	private static final long[] mk_tokenSet_0() {
-		long[] data = new long[8];
-		data[0]=-9224L;
-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
-		return data;
-	}
-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
-	private static final long[] mk_tokenSet_1() {
-		long[] data = new long[8];
-		data[0]=-549755814920L;
-		data[1]=-268435457L;
-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
-		return data;
-	}
-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
-	private static final long[] mk_tokenSet_2() {
-		long[] data = { 287948901175001088L, 541165879422L, 0L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
-	private static final long[] mk_tokenSet_3() {
-		long[] data = new long[8];
-		data[0]=-17179870216L;
-		data[1]=-268435457L;
-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
-		return data;
-	}
-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
-	private static final long[] mk_tokenSet_4() {
-		long[] data = new long[8];
-		data[0]=-17179878408L;
-		data[1]=-671088641L;
-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
-		return data;
-	}
-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
-	private static final long[] mk_tokenSet_5() {
-		long[] data = new long[8];
-		data[0]=-566935683080L;
-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
-		return data;
-	}
-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
-	private static final long[] mk_tokenSet_6() {
-		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
-	
-	}
diff --git a/src/org/antlr/tool/ANTLRLexer.smap b/src/org/antlr/tool/ANTLRLexer.smap
deleted file mode 100644
index 21339b2..0000000
--- a/src/org/antlr/tool/ANTLRLexer.smap
+++ /dev/null
@@ -1,1203 +0,0 @@
-SMAP
-ANTLRLexer.java
-G
-*S G
-*F
-+ 0 antlr.g
-antlr.g
-*L
-0:113
-0:119
-0:125
-0:131
-0:137
-0:143
-0:149
-0:155
-0:161
-0:167
-0:173
-0:179
-0:185
-0:191
-0:197
-0:203
-0:209
-0:215
-0:221
-0:229
-0:235
-0:241
-0:253
-0:265
-0:271
-0:275
-0:279
-0:283
-0:287
-0:291
-0:295
-0:299
-0:303
-0:307
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-1:32
-1:33
-1:34
-1:35
-913:64
-914:65
-915:66
-916:67
-917:68
-918:69
-921:335
-921:336
-921:337
-921:338
-921:341
-921:342
-921:343
-921:344
-921:377
-921:378
-921:379
-921:380
-921:381
-921:383
-921:384
-921:385
-921:386
-921:387
-921:388
-922:347
-922:348
-922:349
-923:352
-923:353
-923:355
-923:356
-923:357
-923:358
-923:365
-923:366
-923:367
-923:368
-923:369
-923:371
-923:372
-923:373
-927:390
-927:391
-927:392
-927:393
-927:412
-927:413
-927:414
-927:415
-927:416
-927:417
-928:394
-928:397
-928:398
-928:399
-928:400
-928:401
-928:402
-928:403
-928:404
-928:406
-928:407
-928:408
-928:409
-932:419
-932:420
-932:421
-932:422
-932:506
-932:507
-932:508
-932:509
-932:510
-932:511
-933:424
-934:426
-934:427
-934:428
-934:429
-934:430
-934:431
-934:433
-934:435
-934:436
-934:437
-934:438
-934:439
-934:440
-934:441
-934:442
-934:443
-934:444
-934:446
-934:447
-934:448
-934:449
-934:456
-934:457
-934:458
-934:459
-934:460
-934:462
-934:463
-934:497
-934:498
-934:499
-934:500
-935:464
-935:465
-935:466
-935:467
-935:468
-935:469
-935:470
-935:471
-935:472
-935:473
-935:474
-935:475
-935:477
-935:478
-935:480
-935:481
-935:482
-935:483
-935:490
-935:491
-935:492
-935:493
-935:494
-935:496
-937:503
-937:504
-941:513
-941:514
-941:515
-941:516
-941:569
-941:570
-941:571
-941:572
-941:573
-941:574
-942:518
-943:520
-943:521
-943:522
-943:523
-943:525
-943:527
-943:528
-943:529
-943:530
-946:533
-946:534
-946:535
-946:536
-946:537
-946:538
-946:556
-946:561
-946:562
-946:563
-946:564
-946:565
-946:566
-946:567
-950:539
-950:540
-950:541
-950:542
-950:543
-950:544
-951:548
-951:549
-951:550
-951:551
-951:552
-952:557
-952:559
-954:568
-957:609
-957:610
-957:611
-957:612
-957:615
-957:616
-957:617
-957:618
-957:619
-957:620
-958:614
-961:622
-961:623
-961:624
-961:625
-961:628
-961:629
-961:630
-961:631
-961:632
-961:633
-962:627
-965:635
-965:636
-965:637
-965:638
-965:640
-965:641
-965:642
-965:643
-965:644
-965:645
-965:646
-967:648
-967:649
-967:650
-967:651
-967:653
-967:654
-967:655
-967:656
-967:657
-967:658
-967:659
-969:661
-969:662
-969:663
-969:664
-969:666
-969:667
-969:668
-969:669
-969:670
-969:671
-969:672
-971:674
-971:675
-971:676
-971:677
-971:679
-971:680
-971:681
-971:682
-971:683
-971:684
-971:685
-973:687
-973:688
-973:689
-973:690
-973:692
-973:693
-973:694
-973:695
-973:696
-973:697
-973:698
-975:700
-975:701
-975:702
-975:703
-975:705
-975:706
-975:707
-975:708
-975:709
-975:710
-975:711
-977:713
-977:714
-977:715
-977:716
-977:718
-977:719
-977:720
-977:721
-977:722
-977:723
-977:724
-979:726
-979:727
-979:728
-979:729
-979:731
-979:732
-979:733
-979:734
-979:735
-979:736
-979:737
-981:739
-981:740
-981:741
-981:742
-981:744
-981:745
-981:746
-981:747
-981:748
-981:749
-981:750
-983:752
-983:753
-983:754
-983:755
-983:757
-983:758
-983:759
-983:760
-983:761
-983:762
-983:763
-985:765
-985:766
-985:767
-985:768
-985:770
-985:771
-985:772
-985:773
-985:774
-985:775
-985:776
-987:778
-987:779
-987:780
-987:781
-987:783
-987:784
-987:785
-987:786
-987:787
-987:788
-987:789
-989:791
-989:792
-989:793
-989:794
-989:796
-989:797
-989:798
-989:799
-989:800
-989:801
-989:802
-991:804
-991:805
-991:806
-991:807
-991:809
-991:810
-991:811
-991:812
-991:813
-991:814
-991:815
-993:817
-993:818
-993:819
-993:820
-993:822
-993:823
-993:824
-993:825
-993:826
-993:827
-993:828
-995:830
-995:831
-995:832
-995:833
-995:835
-995:836
-995:837
-995:838
-995:839
-995:840
-995:841
-997:843
-997:844
-997:845
-997:846
-997:848
-997:849
-997:850
-997:851
-997:852
-997:853
-997:854
-999:856
-999:857
-999:858
-999:859
-999:861
-999:862
-999:863
-999:864
-999:865
-999:866
-999:867
-1001:869
-1001:870
-1001:871
-1001:872
-1001:874
-1001:875
-1001:876
-1001:877
-1001:878
-1001:879
-1001:880
-1003:882
-1003:883
-1003:884
-1003:885
-1003:887
-1003:888
-1003:889
-1003:890
-1003:891
-1003:892
-1003:893
-1005:895
-1005:896
-1005:897
-1005:898
-1005:900
-1005:901
-1005:902
-1005:903
-1005:904
-1005:905
-1005:906
-1007:908
-1007:909
-1007:910
-1007:911
-1007:913
-1007:914
-1007:915
-1007:916
-1007:917
-1007:918
-1007:919
-1009:921
-1009:922
-1009:923
-1009:924
-1009:963
-1009:964
-1009:965
-1009:966
-1009:967
-1009:968
-1010:926
-1010:927
-1010:928
-1010:929
-1010:930
-1010:931
-1010:932
-1010:933
-1010:936
-1010:937
-1010:938
-1010:939
-1010:940
-1010:944
-1010:945
-1010:946
-1010:947
-1010:948
-1010:949
-1010:950
-1010:951
-1010:952
-1010:953
-1010:954
-1011:955
-1012:957
-1013:958
-1014:959
-1015:960
-1019:1073
-1019:1074
-1019:1075
-1019:1076
-1019:1110
-1019:1111
-1019:1112
-1019:1113
-1019:1114
-1019:1115
-1020:1078
-1020:1079
-1020:1080
-1020:1081
-1020:1082
-1020:1083
-1020:1084
-1020:1085
-1020:1086
-1020:1087
-1020:1088
-1020:1091
-1020:1092
-1020:1093
-1020:1094
-1020:1095
-1020:1099
-1020:1100
-1020:1101
-1020:1102
-1020:1103
-1020:1104
-1020:1105
-1020:1106
-1020:1107
-1020:1108
-1020:1109
-1023:1117
-1023:1118
-1023:1119
-1023:1120
-1023:1144
-1023:1145
-1023:1146
-1023:1147
-1023:1148
-1023:1149
-1024:1122
-1024:1123
-1024:1124
-1024:1125
-1024:1126
-1024:1127
-1024:1128
-1024:1129
-1024:1130
-1024:1131
-1024:1133
-1024:1134
-1024:1135
-1024:1136
-1024:1137
-1024:1138
-1024:1139
-1024:1141
-1024:1142
-1024:1143
-1028:970
-1028:971
-1028:972
-1028:973
-1028:975
-1028:1066
-1028:1067
-1028:1068
-1028:1069
-1028:1070
-1028:1071
-1029:977
-1029:978
-1029:979
-1029:982
-1029:985
-1029:988
-1029:991
-1029:994
-1029:997
-1029:1000
-1029:1003
-1029:1032
-1029:1050
-1029:1057
-1029:1060
-1029:1061
-1029:1062
-1029:1063
-1030:980
-1030:981
-1031:983
-1031:984
-1032:986
-1032:987
-1033:989
-1033:990
-1034:992
-1034:993
-1035:995
-1035:996
-1036:998
-1036:999
-1037:1001
-1037:1002
-1038:1004
-1038:1006
-1039:1024
-1039:1026
-1039:1027
-1039:1028
-1039:1029
-1044:1009
-1044:1011
-1045:1016
-1045:1018
-1045:1019
-1045:1020
-1045:1021
-1050:1014
-1050:1015
-1053:1033
-1053:1035
-1054:1042
-1054:1044
-1054:1045
-1054:1046
-1054:1047
-1059:1038
-1059:1040
-1061:1051
-1061:1052
-1061:1053
-1061:1054
-1061:1055
-1061:1056
-1062:1058
-1062:1059
-1067:1188
-1067:1189
-1067:1190
-1067:1191
-1067:1194
-1067:1195
-1067:1196
-1067:1197
-1067:1198
-1067:1199
-1068:1193
-1072:1151
-1072:1152
-1072:1153
-1072:1154
-1072:1156
-1072:1176
-1072:1177
-1072:1178
-1072:1179
-1072:1180
-1072:1181
-1072:1182
-1072:1183
-1072:1184
-1072:1185
-1072:1186
-1073:1157
-1073:1158
-1073:1159
-1073:1160
-1073:1161
-1074:1164
-1074:1165
-1074:1166
-1074:1167
-1075:1170
-1075:1171
-1075:1172
-1075:1173
-1078:1201
-1078:1202
-1078:1203
-1078:1204
-1078:1207
-1078:1208
-1078:1209
-1078:1210
-1078:1211
-1078:1212
-1078:1213
-1078:1214
-1078:1215
-1078:1217
-1078:1218
-1078:1219
-1078:1220
-1078:1221
-1078:1222
-1078:1223
-1078:1224
-1078:1225
-1081:1227
-1081:1228
-1081:1229
-1081:1230
-1081:1233
-1081:1234
-1081:1235
-1081:1236
-1081:1237
-1081:1238
-1083:1232
-1087:1240
-1087:1241
-1087:1242
-1087:1243
-1087:1292
-1087:1293
-1087:1294
-1087:1295
-1087:1296
-1087:1297
-1088:1245
-1088:1246
-1088:1247
-1089:1248
-1089:1249
-1089:1250
-1089:1251
-1089:1279
-1089:1282
-1089:1283
-1089:1284
-1089:1285
-1089:1286
-1089:1287
-1089:1288
-1090:1252
-1090:1253
-1090:1254
-1091:1257
-1091:1258
-1091:1259
-1091:1260
-1091:1261
-1091:1262
-1092:1266
-1092:1267
-1092:1268
-1092:1269
-1092:1270
-1093:1274
-1093:1275
-1093:1276
-1094:1280
-1094:1281
-1096:1289
-1096:1290
-1096:1291
-1099:1340
-1099:1341
-1099:1342
-1099:1343
-1099:1344
-1099:1371
-1099:1372
-1099:1373
-1099:1374
-1099:1375
-1099:1376
-1101:1346
-1102:1348
-1102:1349
-1102:1350
-1102:1351
-1102:1352
-1102:1353
-1102:1355
-1102:1357
-1103:1360
-1104:1362
-1105:1363
-1106:1364
-1107:1365
-1108:1366
-1109:1367
-1110:1368
-1115:1378
-1115:1379
-1115:1380
-1115:1381
-1115:1438
-1115:1439
-1115:1440
-1115:1441
-1115:1442
-1115:1443
-1116:1383
-1117:1384
-1117:1385
-1117:1386
-1117:1387
-1117:1388
-1117:1415
-1117:1418
-1117:1421
-1117:1424
-1117:1427
-1117:1430
-1117:1431
-1117:1432
-1117:1433
-1117:1435
-1117:1436
-1122:1389
-1122:1391
-1122:1409
-1122:1410
-1122:1411
-1122:1412
-1122:1413
-1123:1392
-1123:1393
-1123:1394
-1123:1395
-1123:1396
-1123:1397
-1124:1401
-1124:1402
-1124:1403
-1124:1404
-1124:1405
-1126:1416
-1126:1417
-1127:1419
-1127:1420
-1128:1422
-1128:1423
-1129:1425
-1129:1426
-1130:1428
-1130:1429
-1132:1437
-1136:1445
-1136:1446
-1136:1447
-1136:1448
-1136:1479
-1136:1480
-1136:1481
-1136:1482
-1136:1483
-1136:1484
-1137:1450
-1137:1451
-1137:1452
-1137:1453
-1137:1454
-1137:1455
-1137:1456
-1137:1457
-1137:1460
-1137:1461
-1137:1462
-1137:1463
-1137:1464
-1137:1468
-1137:1469
-1137:1470
-1137:1471
-1137:1472
-1137:1473
-1137:1474
-1137:1475
-1137:1476
-1137:1477
-1137:1478
-1141:1299
-1141:1300
-1141:1301
-1141:1302
-1141:1333
-1141:1334
-1141:1335
-1141:1336
-1141:1337
-1141:1338
-1142:1304
-1142:1305
-1142:1306
-1142:1307
-1142:1308
-1142:1309
-1142:1310
-1142:1311
-1142:1314
-1142:1315
-1142:1316
-1142:1317
-1142:1318
-1142:1322
-1142:1323
-1142:1324
-1142:1325
-1142:1326
-1142:1327
-1142:1328
-1142:1329
-1142:1330
-1142:1331
-1142:1332
-1146:1486
-1146:1487
-1146:1488
-1146:1489
-1146:1493
-1146:1496
-1146:1502
-1146:1503
-1146:1504
-1146:1505
-1146:1507
-1146:1508
-1146:1509
-1146:1510
-1146:1511
-1146:1512
-1147:1491
-1147:1492
-1148:1494
-1148:1495
-1149:1497
-1149:1498
-1149:1500
-1152:1514
-1152:1515
-1152:1516
-1152:1517
-1152:1565
-1152:1566
-1152:1567
-1152:1568
-1152:1569
-1152:1570
-1152:1571
-1154:1519
-1155:1520
-1155:1521
-1155:1522
-1155:1523
-1155:1558
-1155:1559
-1155:1560
-1155:1561
-1155:1562
-1155:1563
-1155:1564
-1160:1524
-1160:1525
-1160:1526
-1160:1527
-1160:1528
-1160:1529
-1160:1530
-1160:1531
-1160:1532
-1160:1535
-1160:1536
-1160:1537
-1160:1538
-1160:1539
-1160:1540
-1160:1541
-1160:1542
-1160:1543
-1160:1546
-1160:1547
-1160:1548
-1160:1551
-1160:1552
-1160:1553
-1160:1554
-1160:1555
-1165:1573
-1165:1574
-1165:1575
-1165:1576
-1165:1618
-1165:1619
-1165:1620
-1165:1621
-1165:1622
-1165:1623
-1166:1578
-1169:1581
-1169:1582
-1169:1583
-1170:1586
-1170:1587
-1170:1589
-1170:1590
-1170:1591
-1170:1592
-1170:1594
-1170:1596
-1170:1599
-1170:1613
-1170:1615
-1171:1600
-1171:1601
-1171:1603
-1171:1604
-1171:1605
-1171:1606
-1171:1608
-1171:1610
-1177:1691
-1177:1692
-1177:1693
-1177:1694
-1177:1717
-1177:1718
-1177:1719
-1177:1720
-1177:1721
-1177:1722
-1178:1696
-1178:1697
-1178:1698
-1178:1699
-1178:1710
-1178:1711
-1178:1712
-1178:1713
-1178:1714
-1178:1715
-1178:1716
-1183:1700
-1183:1701
-1183:1702
-1184:1705
-1184:1706
-1184:1707
-1189:1625
-1189:1626
-1189:1627
-1189:1628
-1189:1629
-1189:1683
-1189:1684
-1189:1685
-1189:1686
-1189:1687
-1189:1688
-1189:1689
-1190:1631
-1193:1634
-1194:1635
-1194:1636
-1194:1637
-1194:1638
-1194:1673
-1194:1674
-1194:1675
-1194:1676
-1194:1677
-1194:1678
-1194:1679
-1199:1639
-1199:1640
-1199:1641
-1199:1642
-1199:1643
-1199:1644
-1199:1645
-1199:1646
-1199:1647
-1199:1650
-1199:1651
-1199:1652
-1199:1653
-1199:1654
-1199:1655
-1199:1656
-1199:1657
-1199:1658
-1199:1661
-1199:1662
-1199:1663
-1199:1666
-1199:1667
-1199:1668
-1199:1669
-1199:1670
-1201:1680
-1201:1681
-1205:1724
-1205:1725
-1205:1726
-1205:1727
-1205:1737
-1205:1738
-1205:1739
-1205:1740
-1205:1741
-1205:1742
-1206:1730
-1206:1731
-1206:1732
-1206:1734
-1214:576
-1214:580
-1214:581
-1214:582
-1214:583
-1214:584
-1214:585
-1214:587
-1214:588
-1214:589
-1214:590
-1214:591
-1214:592
-1214:593
-1214:602
-1214:603
-1214:604
-1214:605
-1214:606
-1214:607
-1215:577
-1215:594
-1216:578
-1216:596
-1217:579
-1217:597
-1218:598
-1219:599
-*E
diff --git a/src/org/antlr/tool/ANTLRParser.java b/src/org/antlr/tool/ANTLRParser.java
deleted file mode 100644
index bda3da7..0000000
--- a/src/org/antlr/tool/ANTLRParser.java
+++ /dev/null
@@ -1,4172 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRParser.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.tool;
-import java.util.*;
-import java.io.*;
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import antlr.*;
-
-import antlr.TokenBuffer;
-import antlr.TokenStreamException;
-import antlr.TokenStreamIOException;
-import antlr.ANTLRException;
-import antlr.LLkParser;
-import antlr.Token;
-import antlr.TokenStream;
-import antlr.RecognitionException;
-import antlr.NoViableAltException;
-import antlr.MismatchedTokenException;
-import antlr.SemanticException;
-import antlr.ParserSharedInputState;
-import antlr.collections.impl.BitSet;
-import antlr.collections.AST;
-import java.util.Hashtable;
-import antlr.ASTFactory;
-import antlr.ASTPair;
-import antlr.collections.impl.ASTArray;
-
-/** Read in an ANTLR grammar and build an AST.  Try not to do
- *  any actions, just build the tree.
- *
- *  The phases are:
- *
- *		antlr.g (this file)
- *		assign.types.g
- *		define.g
- *		buildnfa.g
- *		antlr.print.g (optional)
- *		codegen.g
- *
- *  Terence Parr
- *  University of San Francisco
- *  2005
- */
-public class ANTLRParser extends antlr.LLkParser       implements ANTLRTokenTypes
- {
-
-	Grammar grammar = null;
-	protected int gtype = 0;
-	protected String currentRuleName = null;
-	protected GrammarAST currentBlockAST = null;
-
-	/* this next stuff supports construction of the Tokens artificial rule.
-	   I hate having some partial functionality here, I like doing everything
-	   in future tree passes, but the Tokens rule is sensitive to filter mode.
-	   And if it adds syn preds, future tree passes will need to process the
-	   fragments defined in Tokens; a cyclic dependency.
-	   As of 1-17-06 then, Tokens is created for lexer grammars in the
-	   antlr grammar parser itself.
-
-	   This grammar is also sensitive to the backtrack grammar option that
-	   tells ANTLR to automatically backtrack when it can't compute a DFA.
-
-	   7-2-06 I moved all option processing to antlr.g from define.g as I
-	   need backtrack option etc... for blocks.  Got messy.
-	*/
-	protected List lexerRuleNames = new ArrayList();
-	public List getLexerRuleNames() { return lexerRuleNames; }
-
-	protected GrammarAST setToBlockWithSet(GrammarAST b) {
-		GrammarAST alt = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT,"ALT")).add(b).add((GrammarAST)astFactory.create(EOA,"<end-of-alt>")));
-		prefixWithSynPred(alt);
-		return (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK,"BLOCK")).add(alt).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));
-	}
-
-	/** Create a copy of the alt and make it into a BLOCK; all actions,
-	 *  labels, tree operators, rewrites are removed.
-	 */
-	protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {
-		//GrammarAST nalt = (GrammarAST)astFactory.dupTree(alt);
-		GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);
-		GrammarAST blk = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK,"BLOCK")).add(nalt).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));
-		return blk;
-	}
-
-	/** Rewrite alt to have a synpred as first element;
-	 *  (xxx)=>xxx
-	 *  but only if they didn't specify one manually.
-	 */
-	protected void prefixWithSynPred(GrammarAST alt) {
-		// if they want backtracking and it's not a lexer rule in combined grammar
-		String autoBacktrack = (String)currentBlockAST.getOption("backtrack");
-		if ( autoBacktrack==null ) {
-			autoBacktrack = (String)grammar.getOption("backtrack");
-		}
-		if ( autoBacktrack!=null&&autoBacktrack.equals("true") &&
-			 !(gtype==COMBINED_GRAMMAR &&
-			 Character.isUpperCase(currentRuleName.charAt(0))) &&
-			 alt.getFirstChild().getType()!=SYN_SEMPRED )
-		{
-			// duplicate alt and make a synpred block around that dup'd alt
-			GrammarAST synpredBlockAST = createBlockFromDupAlt(alt);
-
-			// Create a BACKTRACK_SEMPRED node as if user had typed this in
-			// Effectively we replace (xxx)=>xxx with {synpredxxx}? xxx
-			GrammarAST synpredAST = createSynSemPredFromBlock(synpredBlockAST,
-															  BACKTRACK_SEMPRED);
-
-			// insert BACKTRACK_SEMPRED as first element of alt
-			synpredAST.getLastSibling().setNextSibling(alt.getFirstChild());
-			alt.setFirstChild(synpredAST);
-		}
-	}
-
-	protected GrammarAST createSynSemPredFromBlock(GrammarAST synpredBlockAST,
-												   int synpredTokenType)
-	{
-		// add grammar fragment to a list so we can make fake rules for them
-		// later.
-		String predName = grammar.defineSyntacticPredicate(synpredBlockAST,currentRuleName);
-		// convert (alpha)=> into {synpredN}? where N is some pred count
-		// during code gen we convert to function call with templates
-		String synpredinvoke = predName;
-		GrammarAST p = (GrammarAST)astFactory.create(synpredTokenType,synpredinvoke);
-		p.setEnclosingRule(currentRuleName);
-		// track how many decisions have synpreds
-		grammar.blocksWithSynPreds.add(currentBlockAST);
-		return p;
-	}
-
-	public GrammarAST createSimpleRuleAST(String name,
-										  GrammarAST block,
-										  boolean fragment)
-   {
-   		GrammarAST modifier = null;
-   		if ( fragment ) {
-   			modifier = (GrammarAST)astFactory.create(FRAGMENT,"fragment");
-   		}
-   		GrammarAST EORAST = (GrammarAST)astFactory.create(EOR,"<end-of-rule>");
-   		GrammarAST EOBAST = block.getLastChild();
-		EORAST.setLine(EOBAST.getLine());
-		EORAST.setColumn(EOBAST.getColumn());
-		GrammarAST ruleAST =
-		   (GrammarAST)astFactory.make( (new ASTArray(8)).add((GrammarAST)astFactory.create(RULE,"rule")).add((GrammarAST)astFactory.create(ID,name)).add(modifier).add((GrammarAST)astFactory.create(ARG,"ARG")).add((GrammarAST)astFactory.create(RET,"RET")).add((GrammarAST)astFactory.create(SCOPE,"scope")).add(block).add(EORAST));
-		ruleAST.setLine(block.getLine());
-		ruleAST.setColumn(block.getColumn());
-		return ruleAST;
-	}
-
-    public void reportError(RecognitionException ex) {
-		Token token = null;
-		try {
-			token = LT(1);
-		}
-		catch (TokenStreamException tse) {
-			ErrorManager.internalError("can't get token???", tse);
-		}
-		ErrorManager.syntaxError(
-			ErrorManager.MSG_SYNTAX_ERROR,
-			grammar,
-			token,
-			"antlr: "+ex.toString(),
-			ex);
-    }
-
-    public void cleanup(GrammarAST root) {
-		if ( gtype==LEXER_GRAMMAR ) {
-			String filter = (String)grammar.getOption("filter");
-			GrammarAST tokensRuleAST =
-			    grammar.addArtificialMatchTokensRule(
-			    	root,
-			    	lexerRuleNames,
-			    	filter!=null&&filter.equals("true"));
-		}
-    }
-
-protected ANTLRParser(TokenBuffer tokenBuf, int k) {
-  super(tokenBuf,k);
-  tokenNames = _tokenNames;
-  buildTokenTypeASTClassMap();
-  astFactory = new ASTFactory(getTokenTypeToASTClassMap());
-}
-
-public ANTLRParser(TokenBuffer tokenBuf) {
-  this(tokenBuf,2);
-}
-
-protected ANTLRParser(TokenStream lexer, int k) {
-  super(lexer,k);
-  tokenNames = _tokenNames;
-  buildTokenTypeASTClassMap();
-  astFactory = new ASTFactory(getTokenTypeToASTClassMap());
-}
-
-public ANTLRParser(TokenStream lexer) {
-  this(lexer,2);
-}
-
-public ANTLRParser(ParserSharedInputState state) {
-  super(state,2);
-  tokenNames = _tokenNames;
-  buildTokenTypeASTClassMap();
-  astFactory = new ASTFactory(getTokenTypeToASTClassMap());
-}
-
-	public final void grammar(
-		Grammar g
-	) throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST grammar_AST = null;
-		Token  cmt = null;
-		GrammarAST cmt_AST = null;
-		GrammarAST gr_AST = null;
-		GrammarAST gid_AST = null;
-		GrammarAST ts_AST = null;
-		GrammarAST scopes_AST = null;
-		GrammarAST a_AST = null;
-		GrammarAST r_AST = null;
-		
-			this.grammar = g;
-			GrammarAST opt=null;
-			Token optionsStartToken = null;
-			Map opts;
-		
-		
-		try {      // for error handling
-			{
-			switch ( LA(1)) {
-			case ACTION:
-			{
-				GrammarAST tmp1_AST = null;
-				tmp1_AST = (GrammarAST)astFactory.create(LT(1));
-				match(ACTION);
-				break;
-			}
-			case PARSER:
-			case DOC_COMMENT:
-			case LITERAL_lexer:
-			case LITERAL_tree:
-			case LITERAL_grammar:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case DOC_COMMENT:
-			{
-				cmt = LT(1);
-				cmt_AST = (GrammarAST)astFactory.create(cmt);
-				match(DOC_COMMENT);
-				break;
-			}
-			case PARSER:
-			case LITERAL_lexer:
-			case LITERAL_tree:
-			case LITERAL_grammar:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			grammarType();
-			gr_AST = (GrammarAST)returnAST;
-			id();
-			gid_AST = (GrammarAST)returnAST;
-			GrammarAST tmp2_AST = null;
-			tmp2_AST = (GrammarAST)astFactory.create(LT(1));
-			match(SEMI);
-			{
-			switch ( LA(1)) {
-			case OPTIONS:
-			{
-				optionsStartToken=LT(1);
-				opts=optionsSpec();
-				grammar.setOptions(opts, optionsStartToken);
-				opt=(GrammarAST)returnAST;
-				break;
-			}
-			case TOKENS:
-			case SCOPE:
-			case FRAGMENT:
-			case DOC_COMMENT:
-			case AMPERSAND:
-			case TOKEN_REF:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			case RULE_REF:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case TOKENS:
-			{
-				tokensSpec();
-				ts_AST = (GrammarAST)returnAST;
-				break;
-			}
-			case SCOPE:
-			case FRAGMENT:
-			case DOC_COMMENT:
-			case AMPERSAND:
-			case TOKEN_REF:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			case RULE_REF:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			attrScopes();
-			scopes_AST = (GrammarAST)returnAST;
-			{
-			switch ( LA(1)) {
-			case AMPERSAND:
-			{
-				actions();
-				a_AST = (GrammarAST)returnAST;
-				break;
-			}
-			case FRAGMENT:
-			case DOC_COMMENT:
-			case TOKEN_REF:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			case RULE_REF:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			rules();
-			r_AST = (GrammarAST)returnAST;
-			GrammarAST tmp3_AST = null;
-			tmp3_AST = (GrammarAST)astFactory.create(LT(1));
-			match(Token.EOF_TYPE);
-			grammar_AST = (GrammarAST)currentAST.root;
-			
-			grammar_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(null).add((GrammarAST)astFactory.make( (new ASTArray(8)).add(gr_AST).add(gid_AST).add(cmt_AST).add(opt).add(ts_AST).add(scopes_AST).add(a_AST).add(r_AST))));
-			cleanup(grammar_AST);
-			
-			currentAST.root = grammar_AST;
-			currentAST.child = grammar_AST!=null &&grammar_AST.getFirstChild()!=null ?
-				grammar_AST.getFirstChild() : grammar_AST;
-			currentAST.advanceChildToEnd();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_0);
-		}
-		returnAST = grammar_AST;
-	}
-	
-	public final void grammarType() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST grammarType_AST = null;
-		Token  gr = null;
-		GrammarAST gr_AST = null;
-		
-		try {      // for error handling
-			{
-			switch ( LA(1)) {
-			case LITERAL_lexer:
-			{
-				match(LITERAL_lexer);
-				gtype=LEXER_GRAMMAR;
-				break;
-			}
-			case PARSER:
-			{
-				match(PARSER);
-				gtype=PARSER_GRAMMAR;
-				break;
-			}
-			case LITERAL_tree:
-			{
-				match(LITERAL_tree);
-				gtype=TREE_GRAMMAR;
-				break;
-			}
-			case LITERAL_grammar:
-			{
-				gtype=COMBINED_GRAMMAR;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			gr = LT(1);
-			gr_AST = (GrammarAST)astFactory.create(gr);
-			astFactory.addASTChild(currentAST, gr_AST);
-			match(LITERAL_grammar);
-			gr_AST.setType(gtype);
-			grammarType_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_1);
-		}
-		returnAST = grammarType_AST;
-	}
-	
-	public final void id() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST id_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case TOKEN_REF:
-			{
-				GrammarAST tmp7_AST = null;
-				tmp7_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp7_AST);
-				match(TOKEN_REF);
-				id_AST = (GrammarAST)currentAST.root;
-				id_AST.setType(ID);
-				id_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case RULE_REF:
-			{
-				GrammarAST tmp8_AST = null;
-				tmp8_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp8_AST);
-				match(RULE_REF);
-				id_AST = (GrammarAST)currentAST.root;
-				id_AST.setType(ID);
-				id_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_2);
-		}
-		returnAST = id_AST;
-	}
-	
-	public final Map  optionsSpec() throws RecognitionException, TokenStreamException {
-		Map opts=new HashMap();
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST optionsSpec_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp9_AST = null;
-			tmp9_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp9_AST);
-			match(OPTIONS);
-			{
-			int _cnt17=0;
-			_loop17:
-			do {
-				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
-					option(opts);
-					astFactory.addASTChild(currentAST, returnAST);
-					match(SEMI);
-				}
-				else {
-					if ( _cnt17>=1 ) { break _loop17; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt17++;
-			} while (true);
-			}
-			match(RCURLY);
-			optionsSpec_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_3);
-		}
-		returnAST = optionsSpec_AST;
-		return opts;
-	}
-	
-	public final void tokensSpec() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST tokensSpec_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp12_AST = null;
-			tmp12_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp12_AST);
-			match(TOKENS);
-			{
-			int _cnt22=0;
-			_loop22:
-			do {
-				if ((LA(1)==TOKEN_REF)) {
-					tokenSpec();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					if ( _cnt22>=1 ) { break _loop22; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt22++;
-			} while (true);
-			}
-			match(RCURLY);
-			tokensSpec_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_4);
-		}
-		returnAST = tokensSpec_AST;
-	}
-	
-	public final void attrScopes() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST attrScopes_AST = null;
-		
-		try {      // for error handling
-			{
-			_loop28:
-			do {
-				if ((LA(1)==SCOPE)) {
-					attrScope();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					break _loop28;
-				}
-				
-			} while (true);
-			}
-			attrScopes_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_5);
-		}
-		returnAST = attrScopes_AST;
-	}
-	
-	public final void actions() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST actions_AST = null;
-		
-		try {      // for error handling
-			{
-			int _cnt11=0;
-			_loop11:
-			do {
-				if ((LA(1)==AMPERSAND)) {
-					action();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					if ( _cnt11>=1 ) { break _loop11; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt11++;
-			} while (true);
-			}
-			actions_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_6);
-		}
-		returnAST = actions_AST;
-	}
-	
-	public final void rules() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rules_AST = null;
-		
-		try {      // for error handling
-			{
-			int _cnt32=0;
-			_loop32:
-			do {
-				if ((_tokenSet_6.member(LA(1)))) {
-					rule();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					if ( _cnt32>=1 ) { break _loop32; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt32++;
-			} while (true);
-			}
-			rules_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_0);
-		}
-		returnAST = rules_AST;
-	}
-	
-/** Match stuff like @parser::members {int i;} */
-	public final void action() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST action_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp14_AST = null;
-			tmp14_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp14_AST);
-			match(AMPERSAND);
-			{
-			if ((_tokenSet_7.member(LA(1))) && (LA(2)==COLON)) {
-				actionScopeName();
-				astFactory.addASTChild(currentAST, returnAST);
-				match(COLON);
-				match(COLON);
-			}
-			else if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==ACTION)) {
-			}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			
-			}
-			id();
-			astFactory.addASTChild(currentAST, returnAST);
-			GrammarAST tmp17_AST = null;
-			tmp17_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp17_AST);
-			match(ACTION);
-			action_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_5);
-		}
-		returnAST = action_AST;
-	}
-	
-/** Sometimes the scope names will collide with keywords; allow them as
- *  ids for action scopes.
- */
-	public final void actionScopeName() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST actionScopeName_AST = null;
-		Token  l = null;
-		GrammarAST l_AST = null;
-		Token  p = null;
-		GrammarAST p_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case TOKEN_REF:
-			case RULE_REF:
-			{
-				id();
-				astFactory.addASTChild(currentAST, returnAST);
-				actionScopeName_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case LITERAL_lexer:
-			{
-				l = LT(1);
-				l_AST = (GrammarAST)astFactory.create(l);
-				astFactory.addASTChild(currentAST, l_AST);
-				match(LITERAL_lexer);
-				l_AST.setType(ID);
-				actionScopeName_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case PARSER:
-			{
-				p = LT(1);
-				p_AST = (GrammarAST)astFactory.create(p);
-				astFactory.addASTChild(currentAST, p_AST);
-				match(PARSER);
-				p_AST.setType(ID);
-				actionScopeName_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_8);
-		}
-		returnAST = actionScopeName_AST;
-	}
-	
-	public final void option(
-		Map opts
-	) throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST option_AST = null;
-		GrammarAST o_AST = null;
-		
-		Object value=null;
-		
-		
-		try {      // for error handling
-			id();
-			o_AST = (GrammarAST)returnAST;
-			astFactory.addASTChild(currentAST, returnAST);
-			GrammarAST tmp18_AST = null;
-			tmp18_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp18_AST);
-			match(ASSIGN);
-			value=optionValue();
-			astFactory.addASTChild(currentAST, returnAST);
-			
-				opts.put(o_AST.getText(), value);
-				
-			option_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_9);
-		}
-		returnAST = option_AST;
-	}
-	
-	public final Object  optionValue() throws RecognitionException, TokenStreamException {
-		Object value=null;
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST optionValue_AST = null;
-		GrammarAST x_AST = null;
-		Token  s = null;
-		GrammarAST s_AST = null;
-		Token  c = null;
-		GrammarAST c_AST = null;
-		Token  i = null;
-		GrammarAST i_AST = null;
-		Token  ss = null;
-		GrammarAST ss_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case TOKEN_REF:
-			case RULE_REF:
-			{
-				id();
-				x_AST = (GrammarAST)returnAST;
-				astFactory.addASTChild(currentAST, returnAST);
-				value = x_AST.getText();
-				optionValue_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = LT(1);
-				s_AST = (GrammarAST)astFactory.create(s);
-				astFactory.addASTChild(currentAST, s_AST);
-				match(STRING_LITERAL);
-				String vs = s_AST.getText();
-				value=vs.substring(1,vs.length()-1);
-				optionValue_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				c = LT(1);
-				c_AST = (GrammarAST)astFactory.create(c);
-				astFactory.addASTChild(currentAST, c_AST);
-				match(CHAR_LITERAL);
-				String vs = c_AST.getText();
-				value=vs.substring(1,vs.length()-1);
-				optionValue_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case INT:
-			{
-				i = LT(1);
-				i_AST = (GrammarAST)astFactory.create(i);
-				astFactory.addASTChild(currentAST, i_AST);
-				match(INT);
-				value = new Integer(i_AST.getText());
-				optionValue_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case STAR:
-			{
-				ss = LT(1);
-				ss_AST = (GrammarAST)astFactory.create(ss);
-				astFactory.addASTChild(currentAST, ss_AST);
-				match(STAR);
-				ss_AST.setType(STRING_LITERAL); value = "*";
-				optionValue_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_9);
-		}
-		returnAST = optionValue_AST;
-		return value;
-	}
-	
-	public final void tokenSpec() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST tokenSpec_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp19_AST = null;
-			tmp19_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp19_AST);
-			match(TOKEN_REF);
-			{
-			switch ( LA(1)) {
-			case ASSIGN:
-			{
-				GrammarAST tmp20_AST = null;
-				tmp20_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.makeASTRoot(currentAST, tmp20_AST);
-				match(ASSIGN);
-				{
-				switch ( LA(1)) {
-				case STRING_LITERAL:
-				{
-					GrammarAST tmp21_AST = null;
-					tmp21_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.addASTChild(currentAST, tmp21_AST);
-					match(STRING_LITERAL);
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					GrammarAST tmp22_AST = null;
-					tmp22_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.addASTChild(currentAST, tmp22_AST);
-					match(CHAR_LITERAL);
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				break;
-			}
-			case SEMI:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			match(SEMI);
-			tokenSpec_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_10);
-		}
-		returnAST = tokenSpec_AST;
-	}
-	
-	public final void attrScope() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST attrScope_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp24_AST = null;
-			tmp24_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp24_AST);
-			match(SCOPE);
-			id();
-			astFactory.addASTChild(currentAST, returnAST);
-			GrammarAST tmp25_AST = null;
-			tmp25_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp25_AST);
-			match(ACTION);
-			attrScope_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_4);
-		}
-		returnAST = attrScope_AST;
-	}
-	
-	public final void rule() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rule_AST = null;
-		Token  d = null;
-		GrammarAST d_AST = null;
-		Token  p1 = null;
-		GrammarAST p1_AST = null;
-		Token  p2 = null;
-		GrammarAST p2_AST = null;
-		Token  p3 = null;
-		GrammarAST p3_AST = null;
-		Token  p4 = null;
-		GrammarAST p4_AST = null;
-		GrammarAST ruleName_AST = null;
-		Token  aa = null;
-		GrammarAST aa_AST = null;
-		Token  rt = null;
-		GrammarAST rt_AST = null;
-		GrammarAST scopes_AST = null;
-		GrammarAST a_AST = null;
-		Token  colon = null;
-		GrammarAST colon_AST = null;
-		GrammarAST b_AST = null;
-		Token  semi = null;
-		GrammarAST semi_AST = null;
-		GrammarAST ex_AST = null;
-		
-		GrammarAST modifier=null, blk=null, blkRoot=null, eob=null;
-		int start = ((TokenWithIndex)LT(1)).getIndex();
-		int startLine = LT(1).getLine();
-		GrammarAST opt = null;
-		Map opts = null;
-		
-		
-		try {      // for error handling
-			{
-			switch ( LA(1)) {
-			case DOC_COMMENT:
-			{
-				d = LT(1);
-				d_AST = (GrammarAST)astFactory.create(d);
-				match(DOC_COMMENT);
-				break;
-			}
-			case FRAGMENT:
-			case TOKEN_REF:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			case RULE_REF:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case LITERAL_protected:
-			{
-				p1 = LT(1);
-				p1_AST = (GrammarAST)astFactory.create(p1);
-				match(LITERAL_protected);
-				modifier=p1_AST;
-				break;
-			}
-			case LITERAL_public:
-			{
-				p2 = LT(1);
-				p2_AST = (GrammarAST)astFactory.create(p2);
-				match(LITERAL_public);
-				modifier=p2_AST;
-				break;
-			}
-			case LITERAL_private:
-			{
-				p3 = LT(1);
-				p3_AST = (GrammarAST)astFactory.create(p3);
-				match(LITERAL_private);
-				modifier=p3_AST;
-				break;
-			}
-			case FRAGMENT:
-			{
-				p4 = LT(1);
-				p4_AST = (GrammarAST)astFactory.create(p4);
-				match(FRAGMENT);
-				modifier=p4_AST;
-				break;
-			}
-			case TOKEN_REF:
-			case RULE_REF:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			id();
-			ruleName_AST = (GrammarAST)returnAST;
-			currentRuleName=ruleName_AST.getText();
-			if ( gtype==LEXER_GRAMMAR && p4_AST==null ) {
-			lexerRuleNames.add(currentRuleName);
-				 }
-				
-			{
-			switch ( LA(1)) {
-			case BANG:
-			{
-				GrammarAST tmp26_AST = null;
-				tmp26_AST = (GrammarAST)astFactory.create(LT(1));
-				match(BANG);
-				break;
-			}
-			case OPTIONS:
-			case SCOPE:
-			case AMPERSAND:
-			case COLON:
-			case ARG_ACTION:
-			case LITERAL_returns:
-			case LITERAL_throws:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case ARG_ACTION:
-			{
-				aa = LT(1);
-				aa_AST = (GrammarAST)astFactory.create(aa);
-				match(ARG_ACTION);
-				break;
-			}
-			case OPTIONS:
-			case SCOPE:
-			case AMPERSAND:
-			case COLON:
-			case LITERAL_returns:
-			case LITERAL_throws:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case LITERAL_returns:
-			{
-				match(LITERAL_returns);
-				rt = LT(1);
-				rt_AST = (GrammarAST)astFactory.create(rt);
-				match(ARG_ACTION);
-				break;
-			}
-			case OPTIONS:
-			case SCOPE:
-			case AMPERSAND:
-			case COLON:
-			case LITERAL_throws:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case LITERAL_throws:
-			{
-				throwsSpec();
-				break;
-			}
-			case OPTIONS:
-			case SCOPE:
-			case AMPERSAND:
-			case COLON:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			{
-			switch ( LA(1)) {
-			case OPTIONS:
-			{
-				opts=optionsSpec();
-				opt=(GrammarAST)returnAST;
-				break;
-			}
-			case SCOPE:
-			case AMPERSAND:
-			case COLON:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			ruleScopeSpec();
-			scopes_AST = (GrammarAST)returnAST;
-			{
-			switch ( LA(1)) {
-			case AMPERSAND:
-			{
-				ruleActions();
-				a_AST = (GrammarAST)returnAST;
-				break;
-			}
-			case COLON:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			colon = LT(1);
-			colon_AST = (GrammarAST)astFactory.create(colon);
-			match(COLON);
-			
-				blkRoot = (GrammarAST)astFactory.create(BLOCK,"BLOCK");
-				blkRoot.options = opts;
-				blkRoot.setLine(colon.getLine());
-				blkRoot.setColumn(colon.getColumn());
-				eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
-			
-			altList(opts);
-			b_AST = (GrammarAST)returnAST;
-			blk = b_AST;
-			semi = LT(1);
-			semi_AST = (GrammarAST)astFactory.create(semi);
-			match(SEMI);
-			{
-			switch ( LA(1)) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup();
-				ex_AST = (GrammarAST)returnAST;
-				break;
-			}
-			case EOF:
-			case FRAGMENT:
-			case DOC_COMMENT:
-			case TOKEN_REF:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			case RULE_REF:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			rule_AST = (GrammarAST)currentAST.root;
-			
-			int stop = ((TokenWithIndex)LT(1)).getIndex()-1; // point at the semi or exception thingie
-				eob.setLine(semi.getLine());
-				eob.setColumn(semi.getColumn());
-			GrammarAST eor = (GrammarAST)astFactory.create(EOR,"<end-of-rule>");
-				eor.setEnclosingRule(ruleName_AST.getText());
-				eor.setLine(semi.getLine());
-				eor.setColumn(semi.getColumn());
-				GrammarAST root = (GrammarAST)astFactory.create(RULE,"rule");
-				root.ruleStartTokenIndex = start;
-				root.ruleStopTokenIndex = stop;
-				root.setLine(startLine);
-				root.options = opts;
-			rule_AST = (GrammarAST)astFactory.make( (new ASTArray(11)).add(root).add(ruleName_AST).add(modifier).add((GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(ARG,"ARG")).add(aa_AST))).add((GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(RET,"RET")).add(rt_AST))).add(opt).add(scopes_AST).add(a_AST).add(blk).add(ex_AST).add(eor));
-				currentRuleName=null;
-			
-			currentAST.root = rule_AST;
-			currentAST.child = rule_AST!=null &&rule_AST.getFirstChild()!=null ?
-				rule_AST.getFirstChild() : rule_AST;
-			currentAST.advanceChildToEnd();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_11);
-		}
-		returnAST = rule_AST;
-	}
-	
-	public final void throwsSpec() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST throwsSpec_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp28_AST = null;
-			tmp28_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp28_AST);
-			match(LITERAL_throws);
-			id();
-			astFactory.addASTChild(currentAST, returnAST);
-			{
-			_loop49:
-			do {
-				if ((LA(1)==COMMA)) {
-					GrammarAST tmp29_AST = null;
-					tmp29_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.addASTChild(currentAST, tmp29_AST);
-					match(COMMA);
-					id();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					break _loop49;
-				}
-				
-			} while (true);
-			}
-			throwsSpec_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_12);
-		}
-		returnAST = throwsSpec_AST;
-	}
-	
-	public final void ruleScopeSpec() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST ruleScopeSpec_AST = null;
-		Token  a = null;
-		GrammarAST a_AST = null;
-		GrammarAST ids_AST = null;
-		
-		int line = LT(1).getLine();
-		int column = LT(1).getColumn();
-		
-		
-		try {      // for error handling
-			{
-			if ((LA(1)==SCOPE) && (LA(2)==ACTION)) {
-				match(SCOPE);
-				a = LT(1);
-				a_AST = (GrammarAST)astFactory.create(a);
-				match(ACTION);
-			}
-			else if ((LA(1)==SCOPE||LA(1)==AMPERSAND||LA(1)==COLON) && (_tokenSet_13.member(LA(2)))) {
-			}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			
-			}
-			{
-			_loop53:
-			do {
-				if ((LA(1)==SCOPE)) {
-					match(SCOPE);
-					idList();
-					ids_AST = (GrammarAST)returnAST;
-					match(SEMI);
-				}
-				else {
-					break _loop53;
-				}
-				
-			} while (true);
-			}
-			ruleScopeSpec_AST = (GrammarAST)currentAST.root;
-			
-					GrammarAST scopeRoot = (GrammarAST)(GrammarAST)astFactory.create(SCOPE,"scope");
-					scopeRoot.setLine(line);
-					scopeRoot.setColumn(column);
-					ruleScopeSpec_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(scopeRoot).add(a_AST).add(ids_AST));
-					
-			currentAST.root = ruleScopeSpec_AST;
-			currentAST.child = ruleScopeSpec_AST!=null &&ruleScopeSpec_AST.getFirstChild()!=null ?
-				ruleScopeSpec_AST.getFirstChild() : ruleScopeSpec_AST;
-			currentAST.advanceChildToEnd();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_14);
-		}
-		returnAST = ruleScopeSpec_AST;
-	}
-	
-	public final void ruleActions() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST ruleActions_AST = null;
-		
-		try {      // for error handling
-			{
-			int _cnt45=0;
-			_loop45:
-			do {
-				if ((LA(1)==AMPERSAND)) {
-					ruleAction();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					if ( _cnt45>=1 ) { break _loop45; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt45++;
-			} while (true);
-			}
-			ruleActions_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_8);
-		}
-		returnAST = ruleActions_AST;
-	}
-	
-	public final void altList(
-		Map opts
-	) throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST altList_AST = null;
-		GrammarAST a1_AST = null;
-		GrammarAST a2_AST = null;
-		
-			GrammarAST blkRoot = (GrammarAST)astFactory.create(BLOCK,"BLOCK");
-			blkRoot.options = opts;
-			blkRoot.setLine(LT(0).getLine()); // set to : or (
-			blkRoot.setColumn(LT(0).getColumn());
-			GrammarAST save = currentBlockAST;
-			currentBlockAST = blkRoot;
-		
-		
-		try {      // for error handling
-			alternative();
-			a1_AST = (GrammarAST)returnAST;
-			astFactory.addASTChild(currentAST, returnAST);
-			rewrite();
-			astFactory.addASTChild(currentAST, returnAST);
-			if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a1_AST);
-			{
-			_loop62:
-			do {
-				if ((LA(1)==OR)) {
-					match(OR);
-					alternative();
-					a2_AST = (GrammarAST)returnAST;
-					astFactory.addASTChild(currentAST, returnAST);
-					rewrite();
-					astFactory.addASTChild(currentAST, returnAST);
-					if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a2_AST);
-				}
-				else {
-					break _loop62;
-				}
-				
-			} while (true);
-			}
-			altList_AST = (GrammarAST)currentAST.root;
-			
-			altList_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(blkRoot).add(altList_AST).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));
-			currentBlockAST = save;
-			
-			currentAST.root = altList_AST;
-			currentAST.child = altList_AST!=null &&altList_AST.getFirstChild()!=null ?
-				altList_AST.getFirstChild() : altList_AST;
-			currentAST.advanceChildToEnd();
-			altList_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_9);
-		}
-		returnAST = altList_AST;
-	}
-	
-	public final void exceptionGroup() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST exceptionGroup_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case LITERAL_catch:
-			{
-				{
-				int _cnt68=0;
-				_loop68:
-				do {
-					if ((LA(1)==LITERAL_catch)) {
-						exceptionHandler();
-						astFactory.addASTChild(currentAST, returnAST);
-					}
-					else {
-						if ( _cnt68>=1 ) { break _loop68; } else {throw new NoViableAltException(LT(1), getFilename());}
-					}
-					
-					_cnt68++;
-				} while (true);
-				}
-				{
-				switch ( LA(1)) {
-				case LITERAL_finally:
-				{
-					finallyClause();
-					astFactory.addASTChild(currentAST, returnAST);
-					break;
-				}
-				case EOF:
-				case FRAGMENT:
-				case DOC_COMMENT:
-				case TOKEN_REF:
-				case LITERAL_protected:
-				case LITERAL_public:
-				case LITERAL_private:
-				case RULE_REF:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				exceptionGroup_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case LITERAL_finally:
-			{
-				finallyClause();
-				astFactory.addASTChild(currentAST, returnAST);
-				exceptionGroup_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_11);
-		}
-		returnAST = exceptionGroup_AST;
-	}
-	
-/** Match stuff like @init {int i;} */
-	public final void ruleAction() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST ruleAction_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp34_AST = null;
-			tmp34_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp34_AST);
-			match(AMPERSAND);
-			id();
-			astFactory.addASTChild(currentAST, returnAST);
-			GrammarAST tmp35_AST = null;
-			tmp35_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp35_AST);
-			match(ACTION);
-			ruleAction_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_14);
-		}
-		returnAST = ruleAction_AST;
-	}
-	
-	public final void idList() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST idList_AST = null;
-		
-		try {      // for error handling
-			{
-			int _cnt103=0;
-			_loop103:
-			do {
-				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
-					id();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					if ( _cnt103>=1 ) { break _loop103; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt103++;
-			} while (true);
-			}
-			idList_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_9);
-		}
-		returnAST = idList_AST;
-	}
-	
-/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
-	public final void block() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST block_AST = null;
-		Token  lp = null;
-		GrammarAST lp_AST = null;
-		GrammarAST a1_AST = null;
-		GrammarAST a2_AST = null;
-		Token  rp = null;
-		GrammarAST rp_AST = null;
-		
-		GrammarAST save = currentBlockAST;
-		Map opts=null;
-		
-		
-		try {      // for error handling
-			lp = LT(1);
-			lp_AST = (GrammarAST)astFactory.create(lp);
-			astFactory.makeASTRoot(currentAST, lp_AST);
-			match(LPAREN);
-			lp_AST.setType(BLOCK); lp_AST.setText("BLOCK");
-			{
-			if ((LA(1)==OPTIONS||LA(1)==AMPERSAND||LA(1)==COLON)) {
-				{
-				switch ( LA(1)) {
-				case OPTIONS:
-				{
-					opts=optionsSpec();
-					astFactory.addASTChild(currentAST, returnAST);
-					block_AST = (GrammarAST)currentAST.root;
-					block_AST.setOptions(grammar,opts);
-					break;
-				}
-				case AMPERSAND:
-				case COLON:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				{
-				switch ( LA(1)) {
-				case AMPERSAND:
-				{
-					ruleActions();
-					astFactory.addASTChild(currentAST, returnAST);
-					break;
-				}
-				case COLON:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				match(COLON);
-			}
-			else if ((LA(1)==ACTION) && (LA(2)==COLON)) {
-				GrammarAST tmp37_AST = null;
-				tmp37_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp37_AST);
-				match(ACTION);
-				match(COLON);
-			}
-			else if ((_tokenSet_15.member(LA(1))) && (_tokenSet_16.member(LA(2)))) {
-			}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			
-			}
-			currentBlockAST = lp_AST;
-			alternative();
-			a1_AST = (GrammarAST)returnAST;
-			astFactory.addASTChild(currentAST, returnAST);
-			rewrite();
-			astFactory.addASTChild(currentAST, returnAST);
-			if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a1_AST);
-			{
-			_loop59:
-			do {
-				if ((LA(1)==OR)) {
-					match(OR);
-					alternative();
-					a2_AST = (GrammarAST)returnAST;
-					astFactory.addASTChild(currentAST, returnAST);
-					rewrite();
-					astFactory.addASTChild(currentAST, returnAST);
-					if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(a2_AST);
-				}
-				else {
-					break _loop59;
-				}
-				
-			} while (true);
-			}
-			rp = LT(1);
-			rp_AST = (GrammarAST)astFactory.create(rp);
-			match(RPAREN);
-			block_AST = (GrammarAST)currentAST.root;
-			
-					currentBlockAST = save;
-			GrammarAST eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
-			eob.setLine(rp.getLine());
-			eob.setColumn(rp.getColumn());
-			block_AST.addChild(eob);
-			
-			block_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_17);
-		}
-		returnAST = block_AST;
-	}
-	
-	public final void alternative() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST alternative_AST = null;
-		GrammarAST el_AST = null;
-		
-		GrammarAST eoa = (GrammarAST)astFactory.create(EOA,"<end-of-alt>");
-		GrammarAST altRoot = (GrammarAST)astFactory.create(ALT,"ALT");
-		altRoot.setLine(LT(1).getLine());
-		altRoot.setColumn(LT(1).getColumn());
-		
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case ACTION:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case LPAREN:
-			case SEMPRED:
-			case RULE_REF:
-			case NOT:
-			case TREE_BEGIN:
-			case WILDCARD:
-			{
-				{
-				int _cnt65=0;
-				_loop65:
-				do {
-					if ((_tokenSet_18.member(LA(1)))) {
-						element();
-						el_AST = (GrammarAST)returnAST;
-						astFactory.addASTChild(currentAST, returnAST);
-					}
-					else {
-						if ( _cnt65>=1 ) { break _loop65; } else {throw new NoViableAltException(LT(1), getFilename());}
-					}
-					
-					_cnt65++;
-				} while (true);
-				}
-				alternative_AST = (GrammarAST)currentAST.root;
-				
-				if ( alternative_AST==null ) {
-				alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add((GrammarAST)astFactory.create(EPSILON,"epsilon")).add(eoa));
-				}
-				else {
-					// we have a real list of stuff
-					alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add(alternative_AST).add(eoa));
-				}
-				
-				currentAST.root = alternative_AST;
-				currentAST.child = alternative_AST!=null &&alternative_AST.getFirstChild()!=null ?
-					alternative_AST.getFirstChild() : alternative_AST;
-				currentAST.advanceChildToEnd();
-				alternative_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case SEMI:
-			case OR:
-			case RPAREN:
-			case REWRITE:
-			{
-				alternative_AST = (GrammarAST)currentAST.root;
-				
-					GrammarAST eps = (GrammarAST)astFactory.create(EPSILON,"epsilon");
-						eps.setLine(LT(0).getLine()); // get line/col of '|' or ':' (prev token)
-						eps.setColumn(LT(0).getColumn());
-					alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add(eps).add(eoa));
-					
-				currentAST.root = alternative_AST;
-				currentAST.child = alternative_AST!=null &&alternative_AST.getFirstChild()!=null ?
-					alternative_AST.getFirstChild() : alternative_AST;
-				currentAST.advanceChildToEnd();
-				alternative_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_19);
-		}
-		returnAST = alternative_AST;
-	}
-	
-	public final void rewrite() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_AST = null;
-		Token  rew = null;
-		GrammarAST rew_AST = null;
-		Token  pred = null;
-		GrammarAST pred_AST = null;
-		GrammarAST alt_AST = null;
-		Token  rew2 = null;
-		GrammarAST rew2_AST = null;
-		GrammarAST alt2_AST = null;
-		
-		GrammarAST root = new GrammarAST();
-		
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case REWRITE:
-			{
-				{
-				_loop108:
-				do {
-					if ((LA(1)==REWRITE) && (LA(2)==SEMPRED)) {
-						rew = LT(1);
-						rew_AST = (GrammarAST)astFactory.create(rew);
-						match(REWRITE);
-						pred = LT(1);
-						pred_AST = (GrammarAST)astFactory.create(pred);
-						match(SEMPRED);
-						rewrite_alternative();
-						alt_AST = (GrammarAST)returnAST;
-						root.addChild( (GrammarAST)astFactory.make( (new ASTArray(3)).add(rew_AST).add(pred_AST).add(alt_AST)) );
-						
-						pred_AST.setEnclosingRule(currentRuleName);
-						rew_AST.setEnclosingRule(currentRuleName);
-						
-					}
-					else {
-						break _loop108;
-					}
-					
-				} while (true);
-				}
-				rew2 = LT(1);
-				rew2_AST = (GrammarAST)astFactory.create(rew2);
-				match(REWRITE);
-				rewrite_alternative();
-				alt2_AST = (GrammarAST)returnAST;
-				rewrite_AST = (GrammarAST)currentAST.root;
-				
-				root.addChild( (GrammarAST)astFactory.make( (new ASTArray(2)).add(rew2_AST).add(alt2_AST)) );
-				rewrite_AST = (GrammarAST)root.getFirstChild();
-				
-				currentAST.root = rewrite_AST;
-				currentAST.child = rewrite_AST!=null &&rewrite_AST.getFirstChild()!=null ?
-					rewrite_AST.getFirstChild() : rewrite_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case SEMI:
-			case OR:
-			case RPAREN:
-			{
-				rewrite_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_20);
-		}
-		returnAST = rewrite_AST;
-	}
-	
-	public final void element() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST element_AST = null;
-		
-		try {      // for error handling
-			elementNoOptionSpec();
-			astFactory.addASTChild(currentAST, returnAST);
-			element_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_21);
-		}
-		returnAST = element_AST;
-	}
-	
-	public final void exceptionHandler() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST exceptionHandler_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp40_AST = null;
-			tmp40_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp40_AST);
-			match(LITERAL_catch);
-			GrammarAST tmp41_AST = null;
-			tmp41_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp41_AST);
-			match(ARG_ACTION);
-			GrammarAST tmp42_AST = null;
-			tmp42_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp42_AST);
-			match(ACTION);
-			exceptionHandler_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_22);
-		}
-		returnAST = exceptionHandler_AST;
-	}
-	
-	public final void finallyClause() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST finallyClause_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp43_AST = null;
-			tmp43_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp43_AST);
-			match(LITERAL_finally);
-			GrammarAST tmp44_AST = null;
-			tmp44_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp44_AST);
-			match(ACTION);
-			finallyClause_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_11);
-		}
-		returnAST = finallyClause_AST;
-	}
-	
-	public final void elementNoOptionSpec() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST elementNoOptionSpec_AST = null;
-		Token  p = null;
-		GrammarAST p_AST = null;
-		GrammarAST t3_AST = null;
-		
-		IntSet elements=null;
-		GrammarAST sub, sub2;
-		
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case LPAREN:
-			{
-				ebnf();
-				astFactory.addASTChild(currentAST, returnAST);
-				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp45_AST = null;
-				tmp45_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp45_AST);
-				match(ACTION);
-				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case SEMPRED:
-			{
-				p = LT(1);
-				p_AST = (GrammarAST)astFactory.create(p);
-				astFactory.addASTChild(currentAST, p_AST);
-				match(SEMPRED);
-				{
-				switch ( LA(1)) {
-				case IMPLIES:
-				{
-					match(IMPLIES);
-					p_AST.setType(GATED_SEMPRED);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				
-						p_AST.setEnclosingRule(currentRuleName);
-						grammar.blocksWithSemPreds.add(currentBlockAST);
-						
-				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				tree();
-				t3_AST = (GrammarAST)returnAST;
-				astFactory.addASTChild(currentAST, returnAST);
-				elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==ASSIGN||LA(2)==PLUS_ASSIGN)) {
-					id();
-					astFactory.addASTChild(currentAST, returnAST);
-					{
-					switch ( LA(1)) {
-					case ASSIGN:
-					{
-						GrammarAST tmp47_AST = null;
-						tmp47_AST = (GrammarAST)astFactory.create(LT(1));
-						astFactory.makeASTRoot(currentAST, tmp47_AST);
-						match(ASSIGN);
-						break;
-					}
-					case PLUS_ASSIGN:
-					{
-						GrammarAST tmp48_AST = null;
-						tmp48_AST = (GrammarAST)astFactory.create(LT(1));
-						astFactory.makeASTRoot(currentAST, tmp48_AST);
-						match(PLUS_ASSIGN);
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(LT(1), getFilename());
-					}
-					}
-					}
-					{
-					switch ( LA(1)) {
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case TOKEN_REF:
-					case RULE_REF:
-					case NOT:
-					case WILDCARD:
-					{
-						atom();
-						astFactory.addASTChild(currentAST, returnAST);
-						break;
-					}
-					case LPAREN:
-					{
-						block();
-						astFactory.addASTChild(currentAST, returnAST);
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(LT(1), getFilename());
-					}
-					}
-					}
-					{
-					switch ( LA(1)) {
-					case STAR:
-					case QUESTION:
-					case PLUS:
-					{
-						sub=ebnfSuffix((GrammarAST)currentAST.root,false);
-						elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-						elementNoOptionSpec_AST=sub;
-						currentAST.root = elementNoOptionSpec_AST;
-						currentAST.child = elementNoOptionSpec_AST!=null &&elementNoOptionSpec_AST.getFirstChild()!=null ?
-							elementNoOptionSpec_AST.getFirstChild() : elementNoOptionSpec_AST;
-						currentAST.advanceChildToEnd();
-						break;
-					}
-					case ACTION:
-					case SEMI:
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case TOKEN_REF:
-					case LPAREN:
-					case OR:
-					case RPAREN:
-					case SEMPRED:
-					case RULE_REF:
-					case NOT:
-					case TREE_BEGIN:
-					case WILDCARD:
-					case REWRITE:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(LT(1), getFilename());
-					}
-					}
-					}
-					elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-				}
-				else if ((_tokenSet_23.member(LA(1))) && (_tokenSet_24.member(LA(2)))) {
-					atom();
-					astFactory.addASTChild(currentAST, returnAST);
-					{
-					switch ( LA(1)) {
-					case STAR:
-					case QUESTION:
-					case PLUS:
-					{
-						sub2=ebnfSuffix((GrammarAST)currentAST.root,false);
-						elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-						elementNoOptionSpec_AST=sub2;
-						currentAST.root = elementNoOptionSpec_AST;
-						currentAST.child = elementNoOptionSpec_AST!=null &&elementNoOptionSpec_AST.getFirstChild()!=null ?
-							elementNoOptionSpec_AST.getFirstChild() : elementNoOptionSpec_AST;
-						currentAST.advanceChildToEnd();
-						break;
-					}
-					case ACTION:
-					case SEMI:
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case TOKEN_REF:
-					case LPAREN:
-					case OR:
-					case RPAREN:
-					case SEMPRED:
-					case RULE_REF:
-					case NOT:
-					case TREE_BEGIN:
-					case WILDCARD:
-					case REWRITE:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(LT(1), getFilename());
-					}
-					}
-					}
-					elementNoOptionSpec_AST = (GrammarAST)currentAST.root;
-				}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_21);
-		}
-		returnAST = elementNoOptionSpec_AST;
-	}
-	
-	public final void atom() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST atom_AST = null;
-		Token  rr = null;
-		GrammarAST rr_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case NOT:
-			{
-				notSet();
-				astFactory.addASTChild(currentAST, returnAST);
-				{
-				switch ( LA(1)) {
-				case ROOT:
-				{
-					GrammarAST tmp49_AST = null;
-					tmp49_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp49_AST);
-					match(ROOT);
-					break;
-				}
-				case BANG:
-				{
-					GrammarAST tmp50_AST = null;
-					tmp50_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp50_AST);
-					match(BANG);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case RULE_REF:
-			{
-				rr = LT(1);
-				rr_AST = (GrammarAST)astFactory.create(rr);
-				astFactory.makeASTRoot(currentAST, rr_AST);
-				match(RULE_REF);
-				{
-				switch ( LA(1)) {
-				case ARG_ACTION:
-				{
-					GrammarAST tmp51_AST = null;
-					tmp51_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.addASTChild(currentAST, tmp51_AST);
-					match(ARG_ACTION);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case BANG:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case ROOT:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				{
-				switch ( LA(1)) {
-				case ROOT:
-				{
-					GrammarAST tmp52_AST = null;
-					tmp52_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp52_AST);
-					match(ROOT);
-					break;
-				}
-				case BANG:
-				{
-					GrammarAST tmp53_AST = null;
-					tmp53_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp53_AST);
-					match(BANG);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-				if ((LA(1)==CHAR_LITERAL) && (LA(2)==RANGE)) {
-					range();
-					astFactory.addASTChild(currentAST, returnAST);
-					{
-					switch ( LA(1)) {
-					case ROOT:
-					{
-						GrammarAST tmp54_AST = null;
-						tmp54_AST = (GrammarAST)astFactory.create(LT(1));
-						astFactory.makeASTRoot(currentAST, tmp54_AST);
-						match(ROOT);
-						break;
-					}
-					case BANG:
-					{
-						GrammarAST tmp55_AST = null;
-						tmp55_AST = (GrammarAST)astFactory.create(LT(1));
-						astFactory.makeASTRoot(currentAST, tmp55_AST);
-						match(BANG);
-						break;
-					}
-					case ACTION:
-					case SEMI:
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case STAR:
-					case TOKEN_REF:
-					case LPAREN:
-					case OR:
-					case RPAREN:
-					case SEMPRED:
-					case RULE_REF:
-					case NOT:
-					case TREE_BEGIN:
-					case QUESTION:
-					case PLUS:
-					case WILDCARD:
-					case REWRITE:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(LT(1), getFilename());
-					}
-					}
-					}
-					atom_AST = (GrammarAST)currentAST.root;
-				}
-				else if ((_tokenSet_25.member(LA(1))) && (_tokenSet_26.member(LA(2)))) {
-					terminal();
-					astFactory.addASTChild(currentAST, returnAST);
-					atom_AST = (GrammarAST)currentAST.root;
-				}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_27);
-		}
-		returnAST = atom_AST;
-	}
-	
-	public final GrammarAST  ebnfSuffix(
-		GrammarAST elemAST, boolean inRewrite
-	) throws RecognitionException, TokenStreamException {
-		GrammarAST subrule=null;
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST ebnfSuffix_AST = null;
-		
-		GrammarAST ebnfRoot=null;
-		
-		
-		try {      // for error handling
-			{
-			switch ( LA(1)) {
-			case QUESTION:
-			{
-				GrammarAST tmp56_AST = null;
-				tmp56_AST = (GrammarAST)astFactory.create(LT(1));
-				match(QUESTION);
-				ebnfRoot = (GrammarAST)astFactory.create(OPTIONAL,"?");
-				break;
-			}
-			case STAR:
-			{
-				GrammarAST tmp57_AST = null;
-				tmp57_AST = (GrammarAST)astFactory.create(LT(1));
-				match(STAR);
-				ebnfRoot = (GrammarAST)astFactory.create(CLOSURE,"*");
-				break;
-			}
-			case PLUS:
-			{
-				GrammarAST tmp58_AST = null;
-				tmp58_AST = (GrammarAST)astFactory.create(LT(1));
-				match(PLUS);
-				ebnfRoot = (GrammarAST)astFactory.create(POSITIVE_CLOSURE,"+");
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			
-					GrammarAST save = currentBlockAST;
-				ebnfRoot.setLine(elemAST.getLine());
-				ebnfRoot.setColumn(elemAST.getColumn());
-				GrammarAST blkRoot = (GrammarAST)astFactory.create(BLOCK,"BLOCK");
-				currentBlockAST = blkRoot;
-				GrammarAST eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
-					eob.setLine(elemAST.getLine());
-					eob.setColumn(elemAST.getColumn());
-					GrammarAST alt = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT,"ALT")).add(elemAST).add((GrammarAST)astFactory.create(EOA,"<end-of-alt>")));
-				if ( !inRewrite ) {
-					prefixWithSynPred(alt);
-				}
-					subrule =
-					     (GrammarAST)astFactory.make( (new ASTArray(2)).add(ebnfRoot).add((GrammarAST)astFactory.make( (new ASTArray(3)).add(blkRoot).add(alt).add(eob))));
-					currentBlockAST = save;
-					
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_28);
-		}
-		returnAST = ebnfSuffix_AST;
-		return subrule;
-	}
-	
-/** matches ENBF blocks (and sets via block rule) */
-	public final void ebnf() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST ebnf_AST = null;
-		GrammarAST b_AST = null;
-		
-		int line = LT(1).getLine();
-		int col = LT(1).getColumn();
-		
-		
-		try {      // for error handling
-			block();
-			b_AST = (GrammarAST)returnAST;
-			{
-			switch ( LA(1)) {
-			case QUESTION:
-			{
-				GrammarAST tmp59_AST = null;
-				tmp59_AST = (GrammarAST)astFactory.create(LT(1));
-				match(QUESTION);
-				ebnf_AST = (GrammarAST)currentAST.root;
-				ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(OPTIONAL,"?")).add(b_AST));
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case STAR:
-			{
-				GrammarAST tmp60_AST = null;
-				tmp60_AST = (GrammarAST)astFactory.create(LT(1));
-				match(STAR);
-				ebnf_AST = (GrammarAST)currentAST.root;
-				ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(CLOSURE,"*")).add(b_AST));
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case PLUS:
-			{
-				GrammarAST tmp61_AST = null;
-				tmp61_AST = (GrammarAST)astFactory.create(LT(1));
-				match(PLUS);
-				ebnf_AST = (GrammarAST)currentAST.root;
-				ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(POSITIVE_CLOSURE,"+")).add(b_AST));
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case IMPLIES:
-			{
-				match(IMPLIES);
-				ebnf_AST = (GrammarAST)currentAST.root;
-				
-							if ( gtype==COMBINED_GRAMMAR &&
-							     Character.isUpperCase(currentRuleName.charAt(0)) )
-						    {
-				// ignore for lexer rules in combined
-						    	ebnf_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(SYNPRED,"=>")).add(b_AST)); 
-						    }
-						    else {
-						    	// create manually specified (...)=> predicate;
-				// convert to sempred
-						    	ebnf_AST = createSynSemPredFromBlock(b_AST, SYN_SEMPRED);
-							}
-							
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case ROOT:
-			{
-				GrammarAST tmp63_AST = null;
-				tmp63_AST = (GrammarAST)astFactory.create(LT(1));
-				match(ROOT);
-				ebnf_AST = (GrammarAST)currentAST.root;
-				ebnf_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(tmp63_AST).add(b_AST));
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case BANG:
-			{
-				GrammarAST tmp64_AST = null;
-				tmp64_AST = (GrammarAST)astFactory.create(LT(1));
-				match(BANG);
-				ebnf_AST = (GrammarAST)currentAST.root;
-				ebnf_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(tmp64_AST).add(b_AST));
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case ACTION:
-			case SEMI:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case LPAREN:
-			case OR:
-			case RPAREN:
-			case SEMPRED:
-			case RULE_REF:
-			case NOT:
-			case TREE_BEGIN:
-			case WILDCARD:
-			case REWRITE:
-			{
-				ebnf_AST = (GrammarAST)currentAST.root;
-				ebnf_AST = b_AST;
-				currentAST.root = ebnf_AST;
-				currentAST.child = ebnf_AST!=null &&ebnf_AST.getFirstChild()!=null ?
-					ebnf_AST.getFirstChild() : ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			ebnf_AST = (GrammarAST)currentAST.root;
-			ebnf_AST.setLine(line); ebnf_AST.setColumn(col);
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_21);
-		}
-		returnAST = ebnf_AST;
-	}
-	
-	public final void tree() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST tree_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp65_AST = null;
-			tmp65_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp65_AST);
-			match(TREE_BEGIN);
-			element();
-			astFactory.addASTChild(currentAST, returnAST);
-			{
-			int _cnt88=0;
-			_loop88:
-			do {
-				if ((_tokenSet_18.member(LA(1)))) {
-					element();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					if ( _cnt88>=1 ) { break _loop88; } else {throw new NoViableAltException(LT(1), getFilename());}
-				}
-				
-				_cnt88++;
-			} while (true);
-			}
-			match(RPAREN);
-			tree_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_21);
-		}
-		returnAST = tree_AST;
-	}
-	
-	public final void range() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST range_AST = null;
-		Token  c1 = null;
-		GrammarAST c1_AST = null;
-		Token  c2 = null;
-		GrammarAST c2_AST = null;
-		
-		GrammarAST subrule=null, root=null;
-		
-		
-		try {      // for error handling
-			c1 = LT(1);
-			c1_AST = (GrammarAST)astFactory.create(c1);
-			match(CHAR_LITERAL);
-			GrammarAST tmp67_AST = null;
-			tmp67_AST = (GrammarAST)astFactory.create(LT(1));
-			match(RANGE);
-			c2 = LT(1);
-			c2_AST = (GrammarAST)astFactory.create(c2);
-			match(CHAR_LITERAL);
-			range_AST = (GrammarAST)currentAST.root;
-			
-					GrammarAST r = (GrammarAST)astFactory.create(CHAR_RANGE,"..");
-					r.setLine(c1.getLine());
-					r.setColumn(c1.getColumn());
-					range_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(r).add(c1_AST).add(c2_AST));
-					root = range_AST;
-					
-			currentAST.root = range_AST;
-			currentAST.child = range_AST!=null &&range_AST.getFirstChild()!=null ?
-				range_AST.getFirstChild() : range_AST;
-			currentAST.advanceChildToEnd();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_29);
-		}
-		returnAST = range_AST;
-	}
-	
-	public final void terminal() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST terminal_AST = null;
-		Token  cl = null;
-		GrammarAST cl_AST = null;
-		Token  tr = null;
-		GrammarAST tr_AST = null;
-		Token  sl = null;
-		GrammarAST sl_AST = null;
-		Token  wi = null;
-		GrammarAST wi_AST = null;
-		
-		GrammarAST ebnfRoot=null, subrule=null;
-		
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case CHAR_LITERAL:
-			{
-				cl = LT(1);
-				cl_AST = (GrammarAST)astFactory.create(cl);
-				astFactory.makeASTRoot(currentAST, cl_AST);
-				match(CHAR_LITERAL);
-				{
-				switch ( LA(1)) {
-				case ROOT:
-				{
-					GrammarAST tmp68_AST = null;
-					tmp68_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp68_AST);
-					match(ROOT);
-					break;
-				}
-				case BANG:
-				{
-					GrammarAST tmp69_AST = null;
-					tmp69_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp69_AST);
-					match(BANG);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				terminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case TOKEN_REF:
-			{
-				tr = LT(1);
-				tr_AST = (GrammarAST)astFactory.create(tr);
-				astFactory.makeASTRoot(currentAST, tr_AST);
-				match(TOKEN_REF);
-				{
-				switch ( LA(1)) {
-				case ARG_ACTION:
-				{
-					GrammarAST tmp70_AST = null;
-					tmp70_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.addASTChild(currentAST, tmp70_AST);
-					match(ARG_ACTION);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case BANG:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case ROOT:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				{
-				switch ( LA(1)) {
-				case ROOT:
-				{
-					GrammarAST tmp71_AST = null;
-					tmp71_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp71_AST);
-					match(ROOT);
-					break;
-				}
-				case BANG:
-				{
-					GrammarAST tmp72_AST = null;
-					tmp72_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp72_AST);
-					match(BANG);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				terminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				sl = LT(1);
-				sl_AST = (GrammarAST)astFactory.create(sl);
-				astFactory.addASTChild(currentAST, sl_AST);
-				match(STRING_LITERAL);
-				{
-				switch ( LA(1)) {
-				case ROOT:
-				{
-					GrammarAST tmp73_AST = null;
-					tmp73_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp73_AST);
-					match(ROOT);
-					break;
-				}
-				case BANG:
-				{
-					GrammarAST tmp74_AST = null;
-					tmp74_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp74_AST);
-					match(BANG);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				terminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case WILDCARD:
-			{
-				wi = LT(1);
-				wi_AST = (GrammarAST)astFactory.create(wi);
-				astFactory.addASTChild(currentAST, wi_AST);
-				match(WILDCARD);
-				{
-				switch ( LA(1)) {
-				case ROOT:
-				{
-					GrammarAST tmp75_AST = null;
-					tmp75_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp75_AST);
-					match(ROOT);
-					break;
-				}
-				case BANG:
-				{
-					GrammarAST tmp76_AST = null;
-					tmp76_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.makeASTRoot(currentAST, tmp76_AST);
-					match(BANG);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case SEMPRED:
-				case RULE_REF:
-				case NOT:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case WILDCARD:
-				case REWRITE:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				terminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_27);
-		}
-		returnAST = terminal_AST;
-	}
-	
-	public final void notSet() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST notSet_AST = null;
-		Token  n = null;
-		GrammarAST n_AST = null;
-		
-		int line = LT(1).getLine();
-		int col = LT(1).getColumn();
-		GrammarAST subrule=null;
-		
-		
-		try {      // for error handling
-			n = LT(1);
-			n_AST = (GrammarAST)astFactory.create(n);
-			astFactory.makeASTRoot(currentAST, n_AST);
-			match(NOT);
-			{
-			switch ( LA(1)) {
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			{
-				notTerminal();
-				astFactory.addASTChild(currentAST, returnAST);
-				break;
-			}
-			case LPAREN:
-			{
-				block();
-				astFactory.addASTChild(currentAST, returnAST);
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			notSet_AST = (GrammarAST)currentAST.root;
-			notSet_AST.setLine(line); notSet_AST.setColumn(col);
-			notSet_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_29);
-		}
-		returnAST = notSet_AST;
-	}
-	
-	public final void notTerminal() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST notTerminal_AST = null;
-		Token  cl = null;
-		GrammarAST cl_AST = null;
-		Token  tr = null;
-		GrammarAST tr_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case CHAR_LITERAL:
-			{
-				cl = LT(1);
-				cl_AST = (GrammarAST)astFactory.create(cl);
-				astFactory.addASTChild(currentAST, cl_AST);
-				match(CHAR_LITERAL);
-				notTerminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case TOKEN_REF:
-			{
-				tr = LT(1);
-				tr_AST = (GrammarAST)astFactory.create(tr);
-				astFactory.addASTChild(currentAST, tr_AST);
-				match(TOKEN_REF);
-				notTerminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				GrammarAST tmp77_AST = null;
-				tmp77_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp77_AST);
-				match(STRING_LITERAL);
-				notTerminal_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_29);
-		}
-		returnAST = notTerminal_AST;
-	}
-	
-/** Match anything that looks like an ID and return tree as token type ID */
-	public final void idToken() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST idToken_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case TOKEN_REF:
-			{
-				GrammarAST tmp78_AST = null;
-				tmp78_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp78_AST);
-				match(TOKEN_REF);
-				idToken_AST = (GrammarAST)currentAST.root;
-				idToken_AST.setType(ID);
-				idToken_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case RULE_REF:
-			{
-				GrammarAST tmp79_AST = null;
-				tmp79_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp79_AST);
-				match(RULE_REF);
-				idToken_AST = (GrammarAST)currentAST.root;
-				idToken_AST.setType(ID);
-				idToken_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_0);
-		}
-		returnAST = idToken_AST;
-	}
-	
-	public final void rewrite_alternative() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_alternative_AST = null;
-		
-		GrammarAST eoa = (GrammarAST)astFactory.create(EOA,"<end-of-alt>");
-		GrammarAST altRoot = (GrammarAST)astFactory.create(ALT,"ALT");
-		altRoot.setLine(LT(1).getLine());
-		altRoot.setColumn(LT(1).getColumn());
-		
-		
-		try {      // for error handling
-			if (((_tokenSet_30.member(LA(1))) && (_tokenSet_31.member(LA(2))))&&(grammar.buildTemplate())) {
-				rewrite_template();
-				astFactory.addASTChild(currentAST, returnAST);
-				rewrite_alternative_AST = (GrammarAST)currentAST.root;
-			}
-			else if (((_tokenSet_32.member(LA(1))) && (_tokenSet_33.member(LA(2))))&&(grammar.buildAST())) {
-				{
-				int _cnt112=0;
-				_loop112:
-				do {
-					if ((_tokenSet_32.member(LA(1)))) {
-						rewrite_element();
-						astFactory.addASTChild(currentAST, returnAST);
-					}
-					else {
-						if ( _cnt112>=1 ) { break _loop112; } else {throw new NoViableAltException(LT(1), getFilename());}
-					}
-					
-					_cnt112++;
-				} while (true);
-				}
-				rewrite_alternative_AST = (GrammarAST)currentAST.root;
-				
-				if ( rewrite_alternative_AST==null ) {
-				rewrite_alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add((GrammarAST)astFactory.create(EPSILON,"epsilon")).add(eoa));
-				}
-				else {
-				rewrite_alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add(rewrite_alternative_AST).add(eoa));
-				}
-				
-				currentAST.root = rewrite_alternative_AST;
-				currentAST.child = rewrite_alternative_AST!=null &&rewrite_alternative_AST.getFirstChild()!=null ?
-					rewrite_alternative_AST.getFirstChild() : rewrite_alternative_AST;
-				currentAST.advanceChildToEnd();
-				rewrite_alternative_AST = (GrammarAST)currentAST.root;
-			}
-			else if ((_tokenSet_19.member(LA(1)))) {
-				rewrite_alternative_AST = (GrammarAST)currentAST.root;
-				rewrite_alternative_AST = (GrammarAST)astFactory.make( (new ASTArray(3)).add(altRoot).add((GrammarAST)astFactory.create(EPSILON,"epsilon")).add(eoa));
-				currentAST.root = rewrite_alternative_AST;
-				currentAST.child = rewrite_alternative_AST!=null &&rewrite_alternative_AST.getFirstChild()!=null ?
-					rewrite_alternative_AST.getFirstChild() : rewrite_alternative_AST;
-				currentAST.advanceChildToEnd();
-				rewrite_alternative_AST = (GrammarAST)currentAST.root;
-			}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_19);
-		}
-		returnAST = rewrite_alternative_AST;
-	}
-	
-	public final void rewrite_block() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_block_AST = null;
-		Token  lp = null;
-		GrammarAST lp_AST = null;
-		
-		try {      // for error handling
-			lp = LT(1);
-			lp_AST = (GrammarAST)astFactory.create(lp);
-			astFactory.makeASTRoot(currentAST, lp_AST);
-			match(LPAREN);
-			lp_AST.setType(BLOCK); lp_AST.setText("BLOCK");
-			rewrite_alternative();
-			astFactory.addASTChild(currentAST, returnAST);
-			match(RPAREN);
-			rewrite_block_AST = (GrammarAST)currentAST.root;
-			
-			GrammarAST eob = (GrammarAST)astFactory.create(EOB,"<end-of-block>");
-			eob.setLine(lp.getLine());
-			eob.setColumn(lp.getColumn());
-			rewrite_block_AST.addChild(eob);
-			
-			rewrite_block_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_34);
-		}
-		returnAST = rewrite_block_AST;
-	}
-	
-/** Build a tree for a template rewrite:
-      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
-    where ARGLIST is always there even if no args exist.
-    ID can be "template" keyword.  If first child is ACTION then it's
-    an indirect template ref
-
-    -> foo(a={...}, b={...})
-    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
-    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
-	-> {st-expr} // st-expr evaluates to ST
- */
-	public final void rewrite_template() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_template_AST = null;
-		Token st=null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case LPAREN:
-			{
-				rewrite_indirect_template_head();
-				astFactory.addASTChild(currentAST, returnAST);
-				rewrite_template_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp81_AST = null;
-				tmp81_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp81_AST);
-				match(ACTION);
-				rewrite_template_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-				if (((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==LPAREN))&&(LT(1).getText().equals("template"))) {
-					rewrite_template_head();
-					astFactory.addASTChild(currentAST, returnAST);
-					st=LT(1);
-					{
-					switch ( LA(1)) {
-					case DOUBLE_QUOTE_STRING_LITERAL:
-					{
-						match(DOUBLE_QUOTE_STRING_LITERAL);
-						break;
-					}
-					case DOUBLE_ANGLE_STRING_LITERAL:
-					{
-						match(DOUBLE_ANGLE_STRING_LITERAL);
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(LT(1), getFilename());
-					}
-					}
-					}
-					rewrite_template_AST = (GrammarAST)currentAST.root;
-					rewrite_template_AST.addChild((GrammarAST)astFactory.create(st));
-					rewrite_template_AST = (GrammarAST)currentAST.root;
-				}
-				else if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==LPAREN)) {
-					rewrite_template_head();
-					astFactory.addASTChild(currentAST, returnAST);
-					rewrite_template_AST = (GrammarAST)currentAST.root;
-				}
-			else {
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_19);
-		}
-		returnAST = rewrite_template_AST;
-	}
-	
-	public final void rewrite_element() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_element_AST = null;
-		GrammarAST t_AST = null;
-		GrammarAST tr_AST = null;
-		
-		GrammarAST subrule=null;
-		
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case ACTION:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case DOLLAR:
-			{
-				rewrite_atom();
-				t_AST = (GrammarAST)returnAST;
-				astFactory.addASTChild(currentAST, returnAST);
-				{
-				switch ( LA(1)) {
-				case STAR:
-				case QUESTION:
-				case PLUS:
-				{
-					subrule=ebnfSuffix(t_AST,true);
-					astFactory.addASTChild(currentAST, returnAST);
-					rewrite_element_AST = (GrammarAST)currentAST.root;
-					rewrite_element_AST=subrule;
-					currentAST.root = rewrite_element_AST;
-					currentAST.child = rewrite_element_AST!=null &&rewrite_element_AST.getFirstChild()!=null ?
-						rewrite_element_AST.getFirstChild() : rewrite_element_AST;
-					currentAST.advanceChildToEnd();
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case RULE_REF:
-				case TREE_BEGIN:
-				case REWRITE:
-				case DOLLAR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				rewrite_element_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case LPAREN:
-			{
-				rewrite_ebnf();
-				astFactory.addASTChild(currentAST, returnAST);
-				rewrite_element_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				rewrite_tree();
-				tr_AST = (GrammarAST)returnAST;
-				astFactory.addASTChild(currentAST, returnAST);
-				{
-				switch ( LA(1)) {
-				case STAR:
-				case QUESTION:
-				case PLUS:
-				{
-					subrule=ebnfSuffix(tr_AST,true);
-					astFactory.addASTChild(currentAST, returnAST);
-					rewrite_element_AST = (GrammarAST)currentAST.root;
-					rewrite_element_AST=subrule;
-					currentAST.root = rewrite_element_AST;
-					currentAST.child = rewrite_element_AST!=null &&rewrite_element_AST.getFirstChild()!=null ?
-						rewrite_element_AST.getFirstChild() : rewrite_element_AST;
-					currentAST.advanceChildToEnd();
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case RULE_REF:
-				case TREE_BEGIN:
-				case REWRITE:
-				case DOLLAR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				rewrite_element_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_35);
-		}
-		returnAST = rewrite_element_AST;
-	}
-	
-	public final void rewrite_atom() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_atom_AST = null;
-		Token  cl = null;
-		GrammarAST cl_AST = null;
-		Token  tr = null;
-		GrammarAST tr_AST = null;
-		Token  rr = null;
-		GrammarAST rr_AST = null;
-		Token  sl = null;
-		GrammarAST sl_AST = null;
-		Token  d = null;
-		GrammarAST d_AST = null;
-		GrammarAST i_AST = null;
-		
-		GrammarAST subrule=null;
-		
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case CHAR_LITERAL:
-			{
-				cl = LT(1);
-				cl_AST = (GrammarAST)astFactory.create(cl);
-				astFactory.addASTChild(currentAST, cl_AST);
-				match(CHAR_LITERAL);
-				rewrite_atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case TOKEN_REF:
-			{
-				tr = LT(1);
-				tr_AST = (GrammarAST)astFactory.create(tr);
-				astFactory.makeASTRoot(currentAST, tr_AST);
-				match(TOKEN_REF);
-				{
-				switch ( LA(1)) {
-				case ARG_ACTION:
-				{
-					GrammarAST tmp84_AST = null;
-					tmp84_AST = (GrammarAST)astFactory.create(LT(1));
-					astFactory.addASTChild(currentAST, tmp84_AST);
-					match(ARG_ACTION);
-					break;
-				}
-				case ACTION:
-				case SEMI:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case STAR:
-				case TOKEN_REF:
-				case LPAREN:
-				case OR:
-				case RPAREN:
-				case RULE_REF:
-				case TREE_BEGIN:
-				case QUESTION:
-				case PLUS:
-				case REWRITE:
-				case DOLLAR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(LT(1), getFilename());
-				}
-				}
-				}
-				rewrite_atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case RULE_REF:
-			{
-				rr = LT(1);
-				rr_AST = (GrammarAST)astFactory.create(rr);
-				astFactory.addASTChild(currentAST, rr_AST);
-				match(RULE_REF);
-				rewrite_atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				sl = LT(1);
-				sl_AST = (GrammarAST)astFactory.create(sl);
-				astFactory.addASTChild(currentAST, sl_AST);
-				match(STRING_LITERAL);
-				rewrite_atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case DOLLAR:
-			{
-				d = LT(1);
-				d_AST = (GrammarAST)astFactory.create(d);
-				match(DOLLAR);
-				id();
-				i_AST = (GrammarAST)returnAST;
-				rewrite_atom_AST = (GrammarAST)currentAST.root;
-				
-						rewrite_atom_AST = (GrammarAST)astFactory.create(LABEL,i_AST.getText());
-						rewrite_atom_AST.setLine(d_AST.getLine());
-						rewrite_atom_AST.setColumn(d_AST.getColumn());
-				rewrite_atom_AST.setEnclosingRule(currentRuleName);
-						
-				currentAST.root = rewrite_atom_AST;
-				currentAST.child = rewrite_atom_AST!=null &&rewrite_atom_AST.getFirstChild()!=null ?
-					rewrite_atom_AST.getFirstChild() : rewrite_atom_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp85_AST = null;
-				tmp85_AST = (GrammarAST)astFactory.create(LT(1));
-				astFactory.addASTChild(currentAST, tmp85_AST);
-				match(ACTION);
-				rewrite_atom_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_36);
-		}
-		returnAST = rewrite_atom_AST;
-	}
-	
-	public final void rewrite_ebnf() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_ebnf_AST = null;
-		GrammarAST b_AST = null;
-		
-		int line = LT(1).getLine();
-		int col = LT(1).getColumn();
-		
-		
-		try {      // for error handling
-			rewrite_block();
-			b_AST = (GrammarAST)returnAST;
-			{
-			switch ( LA(1)) {
-			case QUESTION:
-			{
-				GrammarAST tmp86_AST = null;
-				tmp86_AST = (GrammarAST)astFactory.create(LT(1));
-				match(QUESTION);
-				rewrite_ebnf_AST = (GrammarAST)currentAST.root;
-				rewrite_ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(OPTIONAL,"?")).add(b_AST));
-				currentAST.root = rewrite_ebnf_AST;
-				currentAST.child = rewrite_ebnf_AST!=null &&rewrite_ebnf_AST.getFirstChild()!=null ?
-					rewrite_ebnf_AST.getFirstChild() : rewrite_ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case STAR:
-			{
-				GrammarAST tmp87_AST = null;
-				tmp87_AST = (GrammarAST)astFactory.create(LT(1));
-				match(STAR);
-				rewrite_ebnf_AST = (GrammarAST)currentAST.root;
-				rewrite_ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(CLOSURE,"*")).add(b_AST));
-				currentAST.root = rewrite_ebnf_AST;
-				currentAST.child = rewrite_ebnf_AST!=null &&rewrite_ebnf_AST.getFirstChild()!=null ?
-					rewrite_ebnf_AST.getFirstChild() : rewrite_ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			case PLUS:
-			{
-				GrammarAST tmp88_AST = null;
-				tmp88_AST = (GrammarAST)astFactory.create(LT(1));
-				match(PLUS);
-				rewrite_ebnf_AST = (GrammarAST)currentAST.root;
-				rewrite_ebnf_AST=(GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(POSITIVE_CLOSURE,"+")).add(b_AST));
-				currentAST.root = rewrite_ebnf_AST;
-				currentAST.child = rewrite_ebnf_AST!=null &&rewrite_ebnf_AST.getFirstChild()!=null ?
-					rewrite_ebnf_AST.getFirstChild() : rewrite_ebnf_AST;
-				currentAST.advanceChildToEnd();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-			}
-			rewrite_ebnf_AST = (GrammarAST)currentAST.root;
-			rewrite_ebnf_AST.setLine(line); rewrite_ebnf_AST.setColumn(col);
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_35);
-		}
-		returnAST = rewrite_ebnf_AST;
-	}
-	
-	public final void rewrite_tree() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_tree_AST = null;
-		
-		try {      // for error handling
-			GrammarAST tmp89_AST = null;
-			tmp89_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.makeASTRoot(currentAST, tmp89_AST);
-			match(TREE_BEGIN);
-			rewrite_atom();
-			astFactory.addASTChild(currentAST, returnAST);
-			{
-			_loop122:
-			do {
-				if ((_tokenSet_32.member(LA(1)))) {
-					rewrite_element();
-					astFactory.addASTChild(currentAST, returnAST);
-				}
-				else {
-					break _loop122;
-				}
-				
-			} while (true);
-			}
-			match(RPAREN);
-			rewrite_tree_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_36);
-		}
-		returnAST = rewrite_tree_AST;
-	}
-	
-/** -> foo(a={...}, ...) */
-	public final void rewrite_template_head() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_template_head_AST = null;
-		Token  lp = null;
-		GrammarAST lp_AST = null;
-		
-		try {      // for error handling
-			id();
-			astFactory.addASTChild(currentAST, returnAST);
-			lp = LT(1);
-			lp_AST = (GrammarAST)astFactory.create(lp);
-			astFactory.makeASTRoot(currentAST, lp_AST);
-			match(LPAREN);
-			lp_AST.setType(TEMPLATE); lp_AST.setText("TEMPLATE");
-			rewrite_template_args();
-			astFactory.addASTChild(currentAST, returnAST);
-			match(RPAREN);
-			rewrite_template_head_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_37);
-		}
-		returnAST = rewrite_template_head_AST;
-	}
-	
-/** -> ({expr})(a={...}, ...) */
-	public final void rewrite_indirect_template_head() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_indirect_template_head_AST = null;
-		Token  lp = null;
-		GrammarAST lp_AST = null;
-		
-		try {      // for error handling
-			lp = LT(1);
-			lp_AST = (GrammarAST)astFactory.create(lp);
-			astFactory.makeASTRoot(currentAST, lp_AST);
-			match(LPAREN);
-			lp_AST.setType(TEMPLATE); lp_AST.setText("TEMPLATE");
-			GrammarAST tmp92_AST = null;
-			tmp92_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp92_AST);
-			match(ACTION);
-			match(RPAREN);
-			match(LPAREN);
-			rewrite_template_args();
-			astFactory.addASTChild(currentAST, returnAST);
-			match(RPAREN);
-			rewrite_indirect_template_head_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_19);
-		}
-		returnAST = rewrite_indirect_template_head_AST;
-	}
-	
-	public final void rewrite_template_args() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_template_args_AST = null;
-		
-		try {      // for error handling
-			switch ( LA(1)) {
-			case TOKEN_REF:
-			case RULE_REF:
-			{
-				rewrite_template_arg();
-				astFactory.addASTChild(currentAST, returnAST);
-				{
-				_loop129:
-				do {
-					if ((LA(1)==COMMA)) {
-						match(COMMA);
-						rewrite_template_arg();
-						astFactory.addASTChild(currentAST, returnAST);
-					}
-					else {
-						break _loop129;
-					}
-					
-				} while (true);
-				}
-				rewrite_template_args_AST = (GrammarAST)currentAST.root;
-				rewrite_template_args_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add((GrammarAST)astFactory.create(ARGLIST,"ARGLIST")).add(rewrite_template_args_AST));
-				currentAST.root = rewrite_template_args_AST;
-				currentAST.child = rewrite_template_args_AST!=null &&rewrite_template_args_AST.getFirstChild()!=null ?
-					rewrite_template_args_AST.getFirstChild() : rewrite_template_args_AST;
-				currentAST.advanceChildToEnd();
-				rewrite_template_args_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			case RPAREN:
-			{
-				rewrite_template_args_AST = (GrammarAST)currentAST.root;
-				rewrite_template_args_AST = (GrammarAST)astFactory.create(ARGLIST,"ARGLIST");
-				currentAST.root = rewrite_template_args_AST;
-				currentAST.child = rewrite_template_args_AST!=null &&rewrite_template_args_AST.getFirstChild()!=null ?
-					rewrite_template_args_AST.getFirstChild() : rewrite_template_args_AST;
-				currentAST.advanceChildToEnd();
-				rewrite_template_args_AST = (GrammarAST)currentAST.root;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(LT(1), getFilename());
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_38);
-		}
-		returnAST = rewrite_template_args_AST;
-	}
-	
-	public final void rewrite_template_arg() throws RecognitionException, TokenStreamException {
-		
-		returnAST = null;
-		ASTPair currentAST = new ASTPair();
-		GrammarAST rewrite_template_arg_AST = null;
-		Token  a = null;
-		GrammarAST a_AST = null;
-		
-		try {      // for error handling
-			id();
-			astFactory.addASTChild(currentAST, returnAST);
-			a = LT(1);
-			a_AST = (GrammarAST)astFactory.create(a);
-			astFactory.makeASTRoot(currentAST, a_AST);
-			match(ASSIGN);
-			a_AST.setType(ARG); a_AST.setText("ARG");
-			GrammarAST tmp97_AST = null;
-			tmp97_AST = (GrammarAST)astFactory.create(LT(1));
-			astFactory.addASTChild(currentAST, tmp97_AST);
-			match(ACTION);
-			rewrite_template_arg_AST = (GrammarAST)currentAST.root;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			recover(ex,_tokenSet_39);
-		}
-		returnAST = rewrite_template_arg_AST;
-	}
-	
-	
-	public static final String[] _tokenNames = {
-		"<0>",
-		"EOF",
-		"<2>",
-		"NULL_TREE_LOOKAHEAD",
-		"\"options\"",
-		"\"tokens\"",
-		"\"parser\"",
-		"LEXER",
-		"RULE",
-		"BLOCK",
-		"OPTIONAL",
-		"CLOSURE",
-		"POSITIVE_CLOSURE",
-		"SYNPRED",
-		"RANGE",
-		"CHAR_RANGE",
-		"EPSILON",
-		"ALT",
-		"EOR",
-		"EOB",
-		"EOA",
-		"ID",
-		"ARG",
-		"ARGLIST",
-		"RET",
-		"LEXER_GRAMMAR",
-		"PARSER_GRAMMAR",
-		"TREE_GRAMMAR",
-		"COMBINED_GRAMMAR",
-		"INITACTION",
-		"LABEL",
-		"TEMPLATE",
-		"\"scope\"",
-		"GATED_SEMPRED",
-		"SYN_SEMPRED",
-		"BACKTRACK_SEMPRED",
-		"\"fragment\"",
-		"ACTION",
-		"DOC_COMMENT",
-		"SEMI",
-		"\"lexer\"",
-		"\"tree\"",
-		"\"grammar\"",
-		"AMPERSAND",
-		"COLON",
-		"RCURLY",
-		"ASSIGN",
-		"STRING_LITERAL",
-		"CHAR_LITERAL",
-		"INT",
-		"STAR",
-		"TOKEN_REF",
-		"\"protected\"",
-		"\"public\"",
-		"\"private\"",
-		"BANG",
-		"ARG_ACTION",
-		"\"returns\"",
-		"\"throws\"",
-		"COMMA",
-		"LPAREN",
-		"OR",
-		"RPAREN",
-		"\"catch\"",
-		"\"finally\"",
-		"PLUS_ASSIGN",
-		"SEMPRED",
-		"IMPLIES",
-		"ROOT",
-		"RULE_REF",
-		"NOT",
-		"TREE_BEGIN",
-		"QUESTION",
-		"PLUS",
-		"WILDCARD",
-		"REWRITE",
-		"DOLLAR",
-		"DOUBLE_QUOTE_STRING_LITERAL",
-		"DOUBLE_ANGLE_STRING_LITERAL",
-		"WS",
-		"COMMENT",
-		"SL_COMMENT",
-		"ML_COMMENT",
-		"OPEN_ELEMENT_OPTION",
-		"CLOSE_ELEMENT_OPTION",
-		"ESC",
-		"DIGIT",
-		"XDIGIT",
-		"NESTED_ARG_ACTION",
-		"NESTED_ACTION",
-		"ACTION_CHAR_LITERAL",
-		"ACTION_STRING_LITERAL",
-		"ACTION_ESC",
-		"WS_LOOP",
-		"INTERNAL_RULE_REF",
-		"WS_OPT",
-		"SRC"
-	};
-	
-	protected void buildTokenTypeASTClassMap() {
-		tokenTypeToASTClassMap=null;
-	};
-	
-	private static final long[] mk_tokenSet_0() {
-		long[] data = { 2L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
-	private static final long[] mk_tokenSet_1() {
-		long[] data = { 2251799813685248L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
-	private static final long[] mk_tokenSet_2() {
-		long[] data = { 9191240600534384656L, 7074L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
-	private static final long[] mk_tokenSet_3() {
-		long[] data = { 33803733376696352L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
-	private static final long[] mk_tokenSet_4() {
-		long[] data = { 33786141190651904L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
-	private static final long[] mk_tokenSet_5() {
-		long[] data = { 33786136895684608L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
-	private static final long[] mk_tokenSet_6() {
-		long[] data = { 33777340802662400L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
-	private static final long[] mk_tokenSet_7() {
-		long[] data = { 2252899325313088L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
-	private static final long[] mk_tokenSet_8() {
-		long[] data = { 17592186044416L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
-	private static final long[] mk_tokenSet_9() {
-		long[] data = { 549755813888L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
-	private static final long[] mk_tokenSet_10() {
-		long[] data = { 2286984185774080L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
-	private static final long[] mk_tokenSet_11() {
-		long[] data = { 33777340802662402L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11());
-	private static final long[] mk_tokenSet_12() {
-		long[] data = { 26392574033936L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12());
-	private static final long[] mk_tokenSet_13() {
-		long[] data = { 3461439213294059520L, 3300L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13());
-	private static final long[] mk_tokenSet_14() {
-		long[] data = { 26388279066624L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14());
-	private static final long[] mk_tokenSet_15() {
-		long[] data = { 8073124681965633536L, 3300L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15());
-	private static final long[] mk_tokenSet_16() {
-		long[] data = { 8182434279708442640L, 8190L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16());
-	private static final long[] mk_tokenSet_17() {
-		long[] data = { 8110279928647254016L, 4092L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17());
-	private static final long[] mk_tokenSet_18() {
-		long[] data = { 1155595654324551680L, 1252L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18());
-	private static final long[] mk_tokenSet_19() {
-		long[] data = { 6917529577396895744L, 2048L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19());
-	private static final long[] mk_tokenSet_20() {
-		long[] data = { 6917529577396895744L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20());
-	private static final long[] mk_tokenSet_21() {
-		long[] data = { 8073125231721447424L, 3300L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21());
-	private static final long[] mk_tokenSet_22() {
-		long[] data = { -9189594696052113406L, 33L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22());
-	private static final long[] mk_tokenSet_23() {
-		long[] data = { 2674012278751232L, 1120L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23());
-	private static final long[] mk_tokenSet_24() {
-		long[] data = { 8182337522685198336L, 4084L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24());
-	private static final long[] mk_tokenSet_25() {
-		long[] data = { 2674012278751232L, 1024L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25());
-	private static final long[] mk_tokenSet_26() {
-		long[] data = { 8182337522685181952L, 4084L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_26 = new BitSet(mk_tokenSet_26());
-	private static final long[] mk_tokenSet_27() {
-		long[] data = { 8074251131628290048L, 4068L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_27 = new BitSet(mk_tokenSet_27());
-	private static final long[] mk_tokenSet_28() {
-		long[] data = { 8073125231721447424L, 7396L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_28 = new BitSet(mk_tokenSet_28());
-	private static final long[] mk_tokenSet_29() {
-		long[] data = { 8110279928647254016L, 4084L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_29 = new BitSet(mk_tokenSet_29());
-	private static final long[] mk_tokenSet_30() {
-		long[] data = { 1155173441859485696L, 32L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_30 = new BitSet(mk_tokenSet_30());
-	private static final long[] mk_tokenSet_31() {
-		long[] data = { 8070451219442696192L, 2048L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_31 = new BitSet(mk_tokenSet_31());
-	private static final long[] mk_tokenSet_32() {
-		long[] data = { 1155595654324551680L, 4256L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_32 = new BitSet(mk_tokenSet_32());
-	private static final long[] mk_tokenSet_33() {
-		long[] data = { 8146308725666217984L, 7072L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_33 = new BitSet(mk_tokenSet_33());
-	private static final long[] mk_tokenSet_34() {
-		long[] data = { 1125899906842624L, 768L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_34 = new BitSet(mk_tokenSet_34());
-	private static final long[] mk_tokenSet_35() {
-		long[] data = { 8073125231721447424L, 6304L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_35 = new BitSet(mk_tokenSet_35());
-	private static final long[] mk_tokenSet_36() {
-		long[] data = { 8074251131628290048L, 7072L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_36 = new BitSet(mk_tokenSet_36());
-	private static final long[] mk_tokenSet_37() {
-		long[] data = { 6917529577396895744L, 26624L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_37 = new BitSet(mk_tokenSet_37());
-	private static final long[] mk_tokenSet_38() {
-		long[] data = { 4611686018427387904L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_38 = new BitSet(mk_tokenSet_38());
-	private static final long[] mk_tokenSet_39() {
-		long[] data = { 5188146770730811392L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_39 = new BitSet(mk_tokenSet_39());
-	
-	}
diff --git a/src/org/antlr/tool/ANTLRParser.smap b/src/org/antlr/tool/ANTLRParser.smap
deleted file mode 100644
index ce01718..0000000
--- a/src/org/antlr/tool/ANTLRParser.smap
+++ /dev/null
@@ -1,2758 +0,0 @@
-SMAP
-ANTLRParser.java
-G
-*S G
-*F
-+ 0 antlr.g
-antlr.g
-*L
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-1:32
-1:33
-1:34
-1:35
-99:75
-100:76
-101:77
-102:78
-104:80
-105:81
-106:82
-107:83
-108:84
-109:85
-110:86
-112:88
-113:89
-115:91
-116:92
-117:93
-118:94
-119:95
-121:97
-122:98
-123:99
-124:100
-125:101
-127:103
-128:104
-129:105
-130:106
-131:107
-132:108
-133:109
-134:110
-135:111
-137:113
-138:114
-139:115
-140:116
-141:117
-142:118
-143:119
-144:120
-145:121
-146:122
-147:123
-148:124
-149:125
-150:126
-151:127
-152:128
-153:129
-155:131
-156:132
-157:133
-158:134
-160:136
-161:137
-162:138
-163:139
-164:140
-166:142
-167:143
-168:144
-169:145
-170:146
-171:147
-172:148
-173:149
-174:150
-175:151
-176:152
-177:153
-178:154
-179:155
-180:156
-182:158
-183:159
-184:160
-185:161
-186:162
-187:163
-188:164
-189:165
-190:166
-191:167
-192:168
-193:169
-194:170
-195:171
-196:172
-197:173
-198:174
-199:175
-201:177
-202:178
-203:179
-204:180
-205:181
-206:182
-207:183
-208:184
-209:185
-210:186
-211:187
-212:188
-213:189
-214:190
-215:191
-217:193
-218:194
-219:195
-220:196
-221:197
-222:198
-223:199
-224:200
-225:201
-226:202
-237:233
-237:234
-237:235
-237:237
-237:238
-237:239
-237:255
-237:403
-237:404
-237:405
-237:406
-237:407
-237:408
-237:409
-238:249
-239:250
-240:251
-241:252
-245:257
-245:258
-245:259
-245:260
-245:261
-245:262
-245:273
-245:274
-245:275
-245:276
-245:277
-246:240
-246:241
-246:280
-246:281
-246:282
-246:283
-246:284
-246:285
-246:295
-246:296
-246:297
-246:298
-246:299
-247:242
-247:243
-247:301
-247:302
-247:303
-247:304
-247:305
-247:306
-247:307
-248:309
-248:310
-248:311
-248:312
-248:331
-248:332
-248:333
-248:334
-248:335
-249:313
-249:314
-250:315
-252:244
-252:338
-252:339
-252:340
-252:341
-252:342
-252:357
-252:358
-252:359
-252:360
-252:361
-253:245
-253:363
-253:364
-254:246
-254:366
-254:367
-254:368
-254:369
-254:370
-254:383
-254:384
-254:385
-254:386
-254:387
-255:247
-255:389
-255:390
-256:391
-256:392
-256:393
-257:394
-257:399
-257:401
-257:402
-258:396
-259:397
-263:411
-263:413
-263:414
-263:415
-263:419
-263:457
-263:458
-263:459
-263:460
-263:461
-263:462
-263:463
-264:421
-264:422
-264:423
-264:424
-264:425
-264:445
-264:446
-264:447
-264:448
-264:449
-265:428
-265:429
-265:430
-265:431
-266:434
-266:435
-266:436
-266:437
-267:440
-267:441
-267:442
-269:416
-269:417
-269:451
-269:452
-269:453
-269:454
-269:455
-271:456
-272:612
-272:614
-272:615
-272:616
-272:618
-272:635
-272:636
-272:637
-272:638
-272:639
-272:640
-272:641
-273:620
-273:621
-273:622
-273:623
-273:624
-273:625
-273:626
-273:627
-273:628
-273:629
-273:631
-273:632
-273:633
-273:634
-277:674
-277:675
-277:677
-277:678
-277:679
-277:681
-277:707
-277:708
-277:709
-277:710
-277:711
-277:712
-277:713
-278:682
-278:683
-278:684
-278:685
-278:687
-278:688
-278:689
-278:690
-278:691
-278:692
-278:694
-278:695
-278:696
-278:697
-278:700
-278:701
-278:702
-278:703
-278:704
-278:705
-280:706
-284:715
-284:718
-284:720
-284:721
-284:722
-284:728
-284:729
-284:758
-284:759
-284:760
-284:761
-284:762
-284:763
-284:764
-284:765
-284:766
-284:767
-284:768
-284:769
-285:716
-285:730
-285:731
-285:732
-285:733
-285:734
-285:735
-286:717
-286:723
-286:724
-286:738
-286:739
-286:740
-286:741
-286:742
-286:743
-286:744
-287:725
-287:726
-287:748
-287:749
-287:750
-287:751
-287:752
-287:753
-287:754
-288:745
-289:755
-305:508
-305:509
-305:511
-305:512
-305:513
-305:515
-305:538
-305:539
-305:540
-305:541
-305:542
-305:543
-305:544
-305:545
-306:516
-306:517
-306:518
-306:519
-306:521
-306:522
-306:523
-306:524
-306:525
-306:526
-306:527
-306:528
-306:529
-306:530
-306:531
-306:533
-306:534
-306:535
-306:536
-308:537
-309:771
-309:772
-309:773
-309:775
-309:776
-309:777
-309:783
-309:797
-309:798
-309:799
-309:800
-309:801
-309:802
-309:803
-310:780
-313:778
-313:784
-313:785
-313:786
-313:787
-313:788
-313:789
-313:790
-313:791
-313:792
-315:794
-318:796
-334:805
-334:806
-334:808
-334:809
-334:810
-334:821
-334:822
-334:875
-334:876
-334:877
-334:878
-334:879
-334:880
-334:881
-334:882
-334:883
-334:884
-334:885
-334:886
-334:887
-335:811
-335:823
-335:824
-335:825
-335:826
-335:827
-335:828
-335:829
-336:812
-336:813
-336:833
-336:834
-336:835
-336:836
-336:837
-336:838
-336:839
-337:830
-337:840
-338:814
-338:815
-338:844
-338:845
-338:846
-338:847
-338:848
-338:849
-338:850
-339:841
-339:851
-340:816
-340:817
-340:855
-340:856
-340:857
-340:858
-340:859
-340:860
-340:861
-341:818
-341:819
-341:852
-341:865
-341:866
-341:867
-341:868
-341:869
-341:870
-341:871
-342:862
-343:872
-355:547
-355:549
-355:550
-355:551
-355:553
-355:575
-355:576
-355:577
-355:578
-355:579
-355:580
-355:581
-356:554
-356:555
-356:556
-356:557
-357:559
-357:560
-357:561
-357:562
-357:563
-357:564
-357:565
-357:566
-357:567
-357:568
-357:570
-357:571
-357:572
-358:573
-360:574
-361:889
-361:891
-361:892
-361:893
-361:895
-361:946
-361:947
-361:948
-361:949
-361:950
-361:951
-361:952
-362:896
-362:897
-362:898
-362:899
-362:901
-362:902
-362:903
-362:904
-362:905
-362:906
-362:907
-362:909
-362:910
-362:911
-362:912
-362:913
-362:914
-362:915
-362:918
-362:919
-362:920
-362:921
-362:922
-362:923
-362:926
-362:927
-362:928
-362:929
-362:930
-362:938
-362:939
-362:940
-362:941
-362:942
-362:944
-364:945
-365:583
-365:585
-365:586
-365:587
-365:589
-365:604
-365:605
-365:606
-365:607
-365:608
-365:609
-365:610
-366:590
-366:591
-366:592
-366:593
-366:594
-366:595
-366:596
-366:597
-366:598
-366:599
-366:601
-366:602
-366:603
-369:954
-369:956
-369:957
-369:958
-369:960
-369:972
-369:973
-369:974
-369:975
-369:976
-369:977
-369:978
-370:961
-370:962
-370:963
-370:964
-370:965
-370:966
-370:967
-370:968
-370:969
-370:970
-372:971
-373:643
-373:645
-373:646
-373:647
-373:649
-373:666
-373:667
-373:668
-373:669
-373:670
-373:671
-373:672
-374:651
-374:652
-374:653
-374:657
-374:658
-374:659
-374:660
-374:662
-374:663
-374:664
-374:665
-381:654
-381:655
-381:656
-385:980
-385:982
-385:983
-385:984
-385:1016
-385:1289
-385:1290
-385:1291
-385:1292
-385:1293
-385:1294
-385:1295
-386:1009
-387:1010
-388:1011
-389:1012
-390:1013
-394:985
-394:986
-394:1018
-394:1019
-394:1020
-394:1021
-394:1022
-394:1023
-394:1035
-394:1036
-394:1037
-394:1038
-394:1039
-396:987
-396:988
-396:1042
-396:1043
-396:1044
-396:1045
-396:1046
-396:1047
-396:1048
-396:1080
-396:1081
-396:1082
-396:1083
-396:1084
-397:989
-397:990
-397:1051
-397:1052
-397:1053
-397:1054
-397:1055
-397:1056
-398:991
-398:992
-398:1059
-398:1060
-398:1061
-398:1062
-398:1063
-398:1064
-399:993
-399:994
-399:1067
-399:1068
-399:1069
-399:1070
-399:1071
-399:1072
-401:995
-401:1086
-401:1087
-402:1088
-403:1089
-404:1090
-405:1091
-407:1094
-407:1095
-407:1096
-407:1097
-407:1098
-407:1099
-407:1112
-407:1113
-407:1114
-407:1115
-407:1116
-408:996
-408:997
-408:1119
-408:1120
-408:1121
-408:1122
-408:1123
-408:1124
-408:1136
-408:1137
-408:1138
-408:1139
-408:1140
-409:998
-409:999
-409:1143
-409:1144
-409:1145
-409:1146
-409:1147
-409:1148
-409:1149
-409:1160
-409:1161
-409:1162
-409:1163
-409:1164
-410:1167
-410:1168
-410:1169
-410:1170
-410:1180
-410:1181
-410:1182
-410:1183
-410:1184
-411:1187
-411:1188
-411:1189
-411:1190
-411:1191
-411:1200
-411:1201
-411:1202
-411:1203
-411:1204
-412:1000
-412:1206
-412:1207
-413:1001
-413:1209
-413:1210
-413:1211
-413:1212
-413:1213
-413:1220
-413:1221
-413:1222
-413:1223
-413:1224
-414:1002
-414:1003
-414:1226
-414:1227
-414:1228
-416:1230
-417:1231
-418:1232
-419:1233
-420:1234
-431:1004
-431:1236
-431:1237
-431:1238
-432:1005
-432:1006
-432:1239
-432:1240
-432:1241
-433:1007
-433:1243
-433:1244
-433:1245
-433:1246
-433:1247
-433:1248
-433:1262
-433:1263
-433:1264
-433:1265
-433:1266
-434:1268
-434:1285
-434:1287
-434:1288
-435:1270
-436:1271
-437:1272
-438:1273
-439:1274
-440:1275
-441:1276
-442:1277
-443:1278
-444:1279
-445:1280
-446:1281
-447:1282
-448:1283
-454:1398
-454:1400
-454:1401
-454:1402
-454:1404
-454:1421
-454:1422
-454:1423
-454:1424
-454:1425
-454:1426
-454:1427
-455:1406
-455:1407
-455:1408
-455:1409
-455:1410
-455:1411
-455:1412
-455:1413
-455:1414
-455:1415
-455:1417
-455:1418
-455:1419
-455:1420
-459:1563
-459:1564
-459:1566
-459:1567
-459:1568
-459:1570
-459:1582
-459:1583
-459:1584
-459:1585
-459:1586
-459:1587
-459:1588
-460:1571
-460:1572
-460:1573
-460:1574
-460:1575
-460:1576
-460:1577
-460:1578
-460:1579
-460:1580
-462:1581
-463:1297
-463:1299
-463:1300
-463:1301
-463:1303
-463:1328
-463:1329
-463:1330
-463:1331
-463:1332
-463:1333
-463:1334
-464:1304
-464:1305
-464:1306
-464:1307
-464:1308
-464:1309
-464:1310
-464:1311
-464:1312
-464:1313
-464:1314
-464:1315
-464:1316
-464:1317
-464:1318
-464:1319
-464:1320
-464:1321
-464:1322
-464:1323
-464:1325
-464:1326
-464:1327
-468:1336
-468:1338
-468:1339
-468:1340
-468:1349
-468:1390
-468:1391
-468:1392
-468:1393
-468:1394
-468:1395
-468:1396
-469:1345
-470:1346
-473:1341
-473:1342
-473:1351
-473:1352
-473:1353
-473:1354
-473:1355
-473:1356
-473:1358
-473:1359
-473:1360
-473:1361
-474:1343
-474:1364
-474:1365
-474:1366
-474:1367
-474:1368
-474:1369
-474:1370
-474:1371
-474:1372
-474:1373
-474:1374
-474:1375
-474:1377
-474:1378
-475:1379
-475:1386
-475:1388
-475:1389
-476:1381
-477:1382
-478:1383
-479:1384
-484:1621
-484:1622
-484:1624
-484:1625
-484:1626
-484:1638
-484:1738
-484:1739
-484:1740
-484:1741
-484:1742
-484:1743
-484:1744
-485:1634
-486:1635
-494:1627
-494:1628
-494:1639
-494:1640
-494:1641
-494:1642
-494:1643
-495:1686
-495:1693
-495:1695
-495:1696
-495:1697
-495:1698
-503:1645
-503:1647
-503:1648
-503:1649
-503:1650
-503:1651
-503:1652
-503:1653
-503:1661
-503:1662
-503:1663
-503:1664
-503:1665
-504:1668
-504:1669
-504:1670
-504:1671
-504:1672
-504:1679
-504:1680
-504:1681
-504:1682
-504:1683
-505:1685
-506:1687
-506:1688
-506:1689
-506:1690
-506:1691
-506:1692
-509:1701
-511:1629
-511:1702
-511:1703
-511:1704
-511:1705
-511:1706
-512:1707
-513:1630
-513:1708
-513:1709
-513:1710
-513:1711
-513:1712
-513:1713
-513:1714
-513:1715
-513:1716
-513:1717
-513:1719
-513:1720
-513:1721
-513:1722
-513:1724
-513:1725
-514:1718
-517:1631
-517:1632
-517:1726
-517:1727
-517:1728
-518:1729
-519:1731
-520:1732
-521:1733
-522:1734
-523:1735
-526:1737
-527:1429
-527:1430
-527:1431
-527:1433
-527:1434
-527:1435
-527:1447
-527:1482
-527:1483
-527:1484
-527:1485
-527:1486
-527:1487
-527:1488
-528:1439
-529:1440
-530:1441
-531:1442
-532:1443
-533:1444
-536:1436
-536:1448
-536:1449
-536:1450
-536:1451
-536:1452
-537:1453
-538:1437
-538:1454
-538:1455
-538:1456
-538:1457
-538:1458
-538:1459
-538:1460
-538:1461
-538:1462
-538:1463
-538:1465
-538:1466
-538:1467
-538:1468
-538:1470
-538:1471
-539:1464
-540:1472
-540:1477
-540:1479
-540:1480
-540:1481
-541:1474
-542:1475
-546:1746
-546:1748
-546:1749
-546:1750
-546:1759
-546:1760
-546:1824
-546:1825
-546:1826
-546:1827
-546:1828
-546:1829
-546:1830
-546:1831
-546:1832
-546:1833
-546:1834
-546:1835
-547:1753
-548:1754
-549:1755
-550:1756
-553:1751
-553:1761
-553:1762
-553:1763
-553:1764
-553:1765
-553:1766
-553:1767
-553:1768
-553:1769
-553:1770
-553:1771
-553:1773
-553:1774
-553:1775
-553:1776
-553:1777
-553:1778
-553:1779
-553:1780
-553:1781
-553:1782
-553:1783
-553:1785
-553:1786
-553:1787
-554:1788
-554:1798
-554:1800
-554:1801
-554:1802
-555:1790
-556:1791
-557:1792
-558:1793
-559:1794
-560:1795
-561:1796
-563:1805
-563:1806
-563:1807
-563:1808
-563:1809
-563:1810
-563:1817
-563:1819
-563:1820
-563:1821
-564:1812
-565:1813
-566:1814
-567:1815
-571:1490
-571:1492
-571:1493
-571:1494
-571:1496
-571:1497
-571:1550
-571:1551
-571:1552
-571:1553
-571:1554
-571:1555
-571:1556
-571:1557
-571:1558
-571:1559
-571:1560
-571:1561
-572:1498
-572:1499
-572:1501
-572:1502
-572:1503
-572:1504
-572:1505
-572:1506
-572:1507
-572:1508
-572:1509
-572:1510
-572:1512
-572:1513
-572:1514
-572:1516
-572:1517
-572:1518
-572:1519
-572:1520
-572:1534
-572:1535
-572:1536
-572:1537
-572:1538
-573:1540
-573:1543
-573:1544
-573:1545
-573:1546
-573:1547
-576:1936
-576:1938
-576:1939
-576:1940
-576:1942
-576:1956
-576:1957
-576:1958
-576:1959
-576:1960
-576:1961
-576:1962
-577:1943
-577:1944
-577:1945
-577:1946
-577:1947
-577:1948
-577:1949
-577:1950
-577:1951
-577:1952
-577:1953
-577:1954
-579:1955
-580:1964
-580:1966
-580:1967
-580:1968
-580:1970
-580:1980
-580:1981
-580:1982
-580:1983
-580:1984
-580:1985
-580:1986
-581:1971
-581:1972
-581:1973
-581:1974
-581:1975
-581:1976
-581:1977
-581:1978
-583:1979
-584:1918
-584:1920
-584:1921
-584:1922
-584:1924
-584:1928
-584:1929
-584:1930
-584:1931
-584:1932
-584:1933
-584:1934
-585:1925
-585:1926
-585:1927
-588:1988
-588:1990
-588:1991
-588:1992
-588:2001
-588:2002
-588:2071
-588:2163
-588:2206
-588:2207
-588:2208
-588:2209
-588:2210
-588:2211
-588:2212
-588:2213
-588:2214
-588:2215
-588:2216
-588:2217
-589:1997
-590:1998
-593:2072
-593:2073
-593:2074
-593:2076
-593:2077
-593:2078
-593:2079
-593:2080
-593:2081
-593:2082
-593:2085
-593:2086
-593:2087
-593:2088
-593:2089
-593:2090
-593:2093
-593:2094
-593:2095
-593:2096
-593:2097
-593:2100
-593:2101
-593:2102
-593:2103
-593:2104
-593:2105
-593:2106
-593:2107
-593:2108
-593:2109
-593:2112
-593:2113
-593:2114
-593:2115
-593:2118
-593:2119
-593:2120
-593:2121
-593:2122
-594:2125
-594:2126
-594:2127
-594:2128
-594:2129
-594:2130
-594:2131
-594:2132
-594:2133
-594:2135
-594:2136
-594:2156
-594:2157
-594:2158
-594:2159
-594:2160
-595:2162
-595:2164
-595:2165
-595:2166
-596:2168
-596:2169
-596:2170
-596:2171
-596:2172
-596:2173
-596:2174
-596:2175
-596:2176
-596:2178
-596:2179
-596:2199
-596:2200
-596:2201
-596:2202
-596:2203
-597:2003
-597:2004
-597:2005
-597:2006
-597:2007
-597:2205
-598:2010
-598:2011
-598:2012
-598:2013
-598:2014
-598:2015
-599:1993
-599:1994
-599:2019
-599:2020
-599:2021
-599:2022
-599:2023
-599:2024
-599:2026
-599:2027
-599:2028
-599:2029
-599:2030
-599:2050
-599:2051
-599:2052
-599:2053
-599:2054
-600:2016
-601:2057
-602:2058
-604:1995
-604:2063
-604:2064
-604:2065
-604:2066
-604:2067
-604:2068
-605:2060
-607:2219
-607:2221
-607:2222
-607:2223
-607:2227
-607:2228
-607:2371
-607:2372
-607:2373
-607:2374
-607:2376
-607:2377
-607:2378
-607:2379
-607:2380
-607:2381
-607:2382
-607:2385
-607:2386
-607:2387
-607:2388
-607:2389
-607:2390
-607:2413
-607:2414
-607:2415
-607:2416
-607:2417
-607:2420
-607:2425
-607:2426
-607:2427
-607:2428
-607:2429
-607:2430
-607:2431
-607:2432
-607:2433
-607:2434
-607:2435
-607:2436
-608:2419
-608:2421
-608:2422
-608:2423
-608:2424
-609:2229
-609:2230
-609:2231
-609:2232
-609:2234
-609:2235
-609:2236
-609:2237
-609:2238
-609:2239
-609:2240
-609:2243
-609:2244
-609:2245
-609:2246
-609:2247
-609:2248
-609:2271
-609:2272
-609:2273
-609:2274
-609:2275
-610:2224
-610:2225
-610:2277
-610:2280
-610:2281
-610:2282
-610:2283
-610:2284
-610:2285
-611:2287
-611:2288
-611:2289
-611:2290
-611:2291
-611:2292
-611:2293
-611:2318
-611:2319
-611:2320
-611:2321
-611:2322
-612:2325
-612:2326
-612:2327
-612:2328
-612:2329
-612:2330
-612:2331
-612:2334
-612:2335
-612:2336
-612:2337
-612:2338
-612:2339
-612:2362
-612:2363
-612:2364
-612:2365
-612:2366
-613:2368
-615:3016
-615:3018
-615:3019
-615:3020
-615:3029
-615:3059
-615:3060
-615:3061
-615:3062
-615:3063
-615:3064
-615:3065
-616:3024
-617:3025
-618:3026
-621:3021
-621:3022
-621:3030
-621:3031
-621:3032
-621:3033
-622:3035
-622:3036
-622:3037
-622:3038
-622:3039
-622:3040
-622:3041
-622:3050
-622:3051
-622:3052
-622:3053
-622:3054
-623:3044
-623:3045
-623:3046
-623:3047
-625:3056
-625:3057
-627:3058
-628:2653
-628:2655
-628:2656
-628:2657
-628:2659
-628:2683
-628:2684
-628:2685
-628:2686
-628:2687
-628:2688
-628:2689
-629:2660
-629:2661
-629:2662
-629:2663
-630:2664
-630:2665
-630:2667
-630:2668
-630:2669
-630:2670
-630:2671
-630:2672
-630:2673
-630:2674
-630:2675
-630:2676
-630:2678
-630:2679
-630:2680
-631:2681
-633:2682
-635:2509
-635:2510
-635:2512
-635:2513
-635:2514
-635:2521
-635:2645
-635:2646
-635:2647
-635:2648
-635:2649
-635:2650
-635:2651
-636:2517
-637:2518
-640:2515
-640:2522
-640:2523
-641:2525
-641:2526
-641:2527
-641:2528
-641:2529
-641:2530
-641:2531
-641:2532
-641:2533
-641:2535
-641:2536
-641:2637
-641:2638
-641:2639
-641:2640
-641:2641
-642:2539
-642:2540
-642:2541
-642:2542
-642:2543
-642:2544
-642:2545
-642:2546
-642:2548
-642:2549
-643:2552
-643:2553
-643:2554
-643:2555
-643:2556
-643:2557
-643:2558
-643:2559
-643:2561
-643:2562
-644:2565
-644:2566
-644:2567
-645:2568
-645:2582
-645:2584
-645:2585
-646:2570
-647:2571
-648:2572
-649:2573
-650:2574
-651:2575
-652:2576
-653:2577
-654:2578
-655:2579
-656:2580
-658:2588
-658:2589
-658:2590
-658:2591
-658:2592
-658:2593
-658:2594
-658:2595
-658:2597
-658:2598
-659:2601
-659:2602
-659:2603
-659:2604
-659:2605
-659:2606
-659:2607
-659:2608
-659:2610
-659:2611
-660:2614
-660:2615
-660:2616
-660:2617
-660:2618
-660:2619
-660:2620
-660:2621
-660:2622
-660:2623
-660:2624
-660:2625
-660:2626
-660:2627
-660:2628
-660:2629
-660:2630
-660:2631
-660:2633
-660:2634
-662:2643
-662:2644
-665:2691
-665:2693
-665:2694
-665:2695
-665:2704
-665:2726
-665:2727
-665:2728
-665:2729
-665:2730
-665:2731
-665:2732
-666:2701
-669:2696
-669:2697
-669:2698
-669:2699
-669:2705
-669:2706
-669:2707
-669:2708
-669:2709
-669:2710
-669:2711
-669:2712
-669:2713
-670:2714
-670:2722
-670:2724
-670:2725
-671:2716
-672:2717
-673:2718
-674:2719
-675:2720
-680:2734
-680:2736
-680:2737
-680:2738
-680:2751
-680:2752
-680:3003
-680:3004
-680:3005
-680:3006
-680:3007
-680:3008
-680:3009
-680:3010
-680:3011
-680:3012
-680:3013
-680:3014
-681:2748
-684:2739
-684:2740
-684:2753
-684:2754
-684:2755
-684:2756
-684:2757
-684:2758
-684:2760
-684:2761
-684:2762
-684:2763
-684:2764
-684:2765
-684:2766
-684:2769
-684:2770
-684:2771
-684:2772
-684:2773
-684:2774
-684:2797
-684:2798
-684:2799
-684:2800
-684:2801
-685:2803
-686:2741
-686:2742
-686:2806
-686:2807
-686:2808
-686:2809
-686:2810
-686:2811
-687:2813
-687:2814
-687:2815
-687:2816
-687:2817
-687:2818
-687:2819
-687:2844
-687:2845
-687:2846
-687:2847
-687:2848
-688:2851
-688:2852
-688:2853
-688:2854
-688:2855
-688:2856
-688:2857
-688:2860
-688:2861
-688:2862
-688:2863
-688:2864
-688:2865
-688:2888
-688:2889
-688:2890
-688:2891
-688:2892
-689:2894
-690:2743
-690:2744
-690:2897
-690:2898
-690:2899
-690:2900
-690:2901
-690:2902
-690:2904
-690:2905
-690:2906
-690:2907
-690:2908
-690:2909
-690:2910
-690:2913
-690:2914
-690:2915
-690:2916
-690:2917
-690:2918
-690:2941
-690:2942
-690:2943
-690:2944
-690:2945
-691:2947
-692:2745
-692:2746
-692:2950
-692:2951
-692:2952
-692:2953
-692:2954
-692:2955
-692:2957
-692:2958
-692:2959
-692:2960
-692:2961
-692:2962
-692:2963
-692:2966
-692:2967
-692:2968
-692:2969
-692:2970
-692:2971
-692:2994
-692:2995
-692:2996
-692:2997
-692:2998
-693:3000
-695:2438
-695:2439
-695:2440
-695:2441
-695:2443
-695:2444
-695:2445
-695:2450
-695:2500
-695:2501
-695:2502
-695:2503
-695:2504
-695:2505
-695:2506
-695:2507
-696:2447
-699:2452
-699:2453
-699:2454
-699:2455
-699:2456
-699:2457
-699:2458
-699:2477
-699:2478
-699:2479
-699:2480
-699:2481
-700:2461
-700:2462
-700:2463
-700:2464
-700:2465
-700:2466
-701:2469
-701:2470
-701:2471
-701:2472
-701:2473
-701:2474
-704:2484
-705:2485
-706:2486
-707:2487
-708:2488
-709:2489
-710:2490
-711:2491
-712:2492
-713:2493
-714:2494
-715:2495
-716:2496
-717:2497
-718:2498
-724:3067
-724:3069
-724:3070
-724:3071
-724:3077
-724:3078
-724:3106
-724:3107
-724:3108
-724:3109
-724:3110
-724:3111
-724:3112
-724:3113
-724:3114
-724:3115
-724:3116
-724:3117
-725:3072
-725:3073
-725:3079
-725:3080
-725:3081
-725:3082
-725:3083
-725:3084
-726:3074
-726:3075
-726:3088
-726:3089
-726:3090
-726:3091
-726:3092
-726:3093
-727:3085
-727:3097
-727:3098
-727:3099
-727:3100
-727:3101
-727:3102
-728:3094
-729:3103
-730:1590
-730:1592
-730:1593
-730:1594
-730:1596
-730:1613
-730:1614
-730:1615
-730:1616
-730:1617
-730:1618
-730:1619
-731:1598
-731:1599
-731:1600
-731:1601
-731:1602
-731:1603
-731:1604
-731:1605
-731:1606
-731:1607
-731:1609
-731:1610
-731:1611
-731:1612
-734:465
-734:467
-734:468
-734:469
-734:471
-734:472
-734:473
-734:474
-734:475
-734:476
-734:477
-734:478
-734:479
-734:480
-734:495
-734:496
-734:497
-734:498
-734:499
-734:500
-734:501
-734:502
-734:503
-734:504
-734:505
-734:506
-735:484
-735:485
-735:486
-735:487
-735:488
-735:489
-735:490
-735:491
-736:481
-737:492
-739:3119
-739:3120
-739:3122
-739:3123
-739:3124
-739:3126
-739:3127
-739:3150
-739:3151
-739:3152
-739:3153
-739:3154
-739:3155
-739:3156
-739:3157
-739:3158
-739:3159
-739:3160
-739:3161
-740:3128
-740:3129
-740:3130
-740:3131
-740:3132
-740:3133
-740:3134
-740:3135
-741:3139
-741:3140
-741:3141
-741:3142
-741:3143
-741:3144
-741:3145
-741:3146
-742:3136
-743:3147
-746:1837
-746:1839
-746:1840
-746:1841
-746:1854
-746:1855
-746:1905
-746:1906
-746:1907
-746:1908
-746:1909
-746:1910
-746:1911
-746:1912
-746:1913
-746:1914
-746:1915
-746:1916
-747:1851
-751:1856
-751:1857
-751:1858
-751:1859
-751:1860
-751:1875
-751:1876
-751:1877
-751:1878
-751:1880
-751:1881
-752:1842
-752:1843
-752:1844
-752:1845
-752:1846
-752:1861
-752:1862
-752:1863
-752:1864
-752:1865
-752:1866
-752:1867
-752:1868
-752:1869
-753:1870
-755:1872
-756:1873
-759:1847
-759:1848
-759:1849
-759:1882
-759:1883
-759:1884
-759:1885
-759:1886
-760:1887
-760:1892
-760:1894
-760:1895
-761:1889
-762:1890
-766:1902
-767:3233
-767:3235
-767:3236
-767:3237
-767:3241
-767:3258
-767:3259
-767:3260
-767:3261
-767:3262
-767:3263
-767:3264
-768:3238
-768:3239
-768:3242
-768:3243
-768:3244
-768:3245
-768:3246
-769:3247
-769:3248
-770:3249
-771:3250
-772:3252
-773:3253
-774:3254
-775:3255
-778:3257
-779:3163
-779:3165
-779:3166
-779:3167
-779:3175
-779:3180
-779:3211
-779:3220
-779:3221
-779:3222
-779:3223
-779:3225
-779:3226
-779:3227
-779:3228
-779:3229
-779:3230
-779:3231
-780:3169
-781:3170
-782:3171
-783:3172
-786:3176
-786:3177
-786:3178
-786:3179
-788:3181
-788:3183
-788:3184
-788:3185
-788:3186
-788:3187
-788:3188
-788:3189
-788:3190
-788:3191
-788:3192
-788:3194
-788:3195
-788:3196
-789:3197
-789:3206
-789:3208
-789:3209
-789:3210
-790:3199
-791:3200
-792:3201
-793:3202
-794:3203
-795:3204
-798:3212
-798:3213
-798:3214
-798:3215
-798:3217
-798:3218
-798:3219
-801:3346
-801:3348
-801:3349
-801:3350
-801:3357
-801:3358
-801:3461
-801:3462
-801:3463
-801:3464
-801:3465
-801:3466
-801:3467
-801:3468
-801:3469
-801:3470
-801:3471
-801:3472
-802:3354
-805:3351
-805:3359
-805:3360
-805:3361
-805:3362
-805:3363
-805:3364
-805:3365
-805:3366
-805:3367
-805:3368
-806:3370
-806:3371
-806:3372
-806:3373
-806:3374
-806:3375
-806:3376
-806:3377
-806:3378
-806:3379
-806:3381
-806:3382
-806:3400
-806:3401
-806:3402
-806:3403
-806:3404
-807:3406
-807:3409
-807:3410
-807:3411
-807:3412
-807:3413
-808:3352
-808:3416
-808:3417
-808:3418
-808:3419
-808:3420
-809:3422
-809:3423
-809:3424
-809:3425
-809:3426
-809:3427
-809:3428
-809:3429
-809:3430
-809:3431
-809:3433
-809:3434
-809:3452
-809:3453
-809:3454
-809:3455
-809:3456
-810:3458
-812:3474
-812:3476
-812:3477
-812:3478
-812:3494
-812:3495
-812:3595
-812:3596
-812:3597
-812:3598
-812:3599
-812:3600
-812:3601
-812:3602
-812:3603
-812:3604
-812:3605
-812:3606
-813:3491
-816:3479
-816:3480
-816:3496
-816:3497
-816:3498
-816:3499
-816:3500
-816:3501
-817:3481
-817:3482
-817:3505
-817:3506
-817:3507
-817:3508
-817:3509
-817:3510
-817:3512
-817:3513
-817:3514
-817:3515
-817:3516
-817:3517
-817:3518
-817:3539
-817:3540
-817:3541
-817:3542
-817:3543
-818:3483
-818:3484
-818:3502
-818:3545
-818:3548
-818:3549
-818:3550
-818:3551
-818:3552
-818:3553
-819:3485
-819:3486
-819:3557
-819:3558
-819:3559
-819:3560
-819:3561
-819:3562
-820:3487
-820:3488
-820:3489
-820:3554
-820:3566
-820:3567
-820:3568
-820:3569
-820:3570
-820:3571
-820:3572
-821:3563
-821:3573
-821:3580
-821:3582
-821:3583
-822:3575
-823:3576
-824:3577
-825:3578
-827:3586
-827:3587
-827:3588
-827:3589
-827:3590
-827:3591
-829:3592
-830:3608
-830:3610
-830:3611
-830:3612
-830:3619
-830:3671
-830:3672
-830:3673
-830:3674
-830:3675
-830:3676
-830:3677
-831:3615
-832:3616
-835:3613
-835:3620
-835:3621
-836:3623
-836:3624
-836:3625
-836:3626
-836:3627
-836:3628
-836:3629
-836:3630
-836:3631
-836:3633
-836:3634
-836:3663
-836:3664
-836:3665
-836:3666
-836:3667
-837:3637
-837:3638
-837:3639
-837:3640
-837:3641
-837:3642
-837:3643
-837:3644
-837:3646
-837:3647
-838:3650
-838:3651
-838:3652
-838:3653
-838:3654
-838:3655
-838:3656
-838:3657
-838:3659
-838:3660
-840:3669
-840:3670
-843:3679
-843:3681
-843:3682
-843:3683
-843:3685
-843:3707
-843:3708
-843:3709
-843:3710
-843:3711
-843:3712
-843:3713
-844:3686
-844:3687
-844:3688
-844:3689
-845:3690
-845:3691
-845:3692
-845:3693
-845:3694
-845:3695
-845:3696
-845:3697
-845:3698
-845:3699
-845:3700
-845:3701
-845:3703
-845:3704
-846:3705
-848:3706
-860:3266
-860:3277
-860:3279
-860:3280
-860:3281
-860:3282
-860:3284
-860:3285
-860:3302
-860:3328
-860:3333
-860:3334
-860:3335
-860:3336
-860:3337
-860:3338
-860:3339
-860:3340
-860:3341
-860:3342
-860:3343
-860:3344
-861:3267
-862:3268
-863:3269
-864:3270
-864:3303
-864:3304
-864:3305
-864:3306
-865:3308
-865:3309
-865:3310
-865:3311
-865:3314
-865:3315
-865:3316
-865:3319
-865:3320
-865:3321
-865:3322
-865:3323
-866:3272
-866:3325
-866:3326
-867:3273
-868:3274
-868:3327
-869:3275
-869:3329
-869:3330
-869:3331
-869:3332
-870:3276
-872:3286
-872:3287
-872:3288
-872:3289
-872:3290
-875:3293
-875:3294
-875:3295
-875:3296
-875:3297
-875:3298
-877:3299
-879:3715
-879:3716
-879:3718
-879:3719
-879:3720
-879:3724
-879:3736
-879:3737
-879:3738
-879:3739
-879:3740
-879:3741
-879:3742
-880:3721
-880:3722
-880:3725
-880:3726
-880:3727
-880:3728
-880:3729
-880:3730
-880:3731
-881:3732
-881:3733
-882:3734
-884:3735
-886:3744
-886:3745
-886:3747
-886:3748
-886:3749
-886:3753
-886:3769
-886:3770
-886:3771
-886:3772
-886:3773
-886:3774
-886:3775
-887:3750
-887:3751
-887:3754
-887:3755
-887:3756
-887:3757
-887:3758
-888:3759
-888:3760
-888:3761
-888:3762
-889:3763
-890:3764
-890:3765
-890:3766
-890:3767
-892:3768
-893:3777
-893:3779
-893:3780
-893:3781
-893:3783
-893:3784
-893:3824
-893:3825
-893:3826
-893:3827
-893:3828
-893:3829
-893:3830
-893:3831
-893:3832
-893:3833
-893:3834
-893:3835
-894:3785
-894:3786
-894:3787
-894:3788
-894:3789
-894:3790
-894:3791
-894:3792
-894:3793
-894:3794
-894:3795
-894:3796
-894:3797
-894:3798
-894:3799
-894:3800
-894:3802
-894:3803
-895:3804
-895:3805
-895:3806
-895:3808
-895:3809
-895:3810
-896:3813
-896:3814
-896:3815
-896:3816
-896:3817
-896:3819
-896:3820
-896:3821
-899:3837
-899:3839
-899:3840
-899:3841
-899:3845
-899:3858
-899:3859
-899:3860
-899:3861
-899:3862
-899:3863
-899:3864
-900:3842
-900:3843
-900:3846
-900:3847
-900:3848
-900:3849
-900:3850
-900:3851
-900:3852
-900:3853
-900:3854
-900:3855
-900:3856
-902:3857
-*E
diff --git a/src/org/antlr/tool/ANTLRTokenTypes.java b/src/org/antlr/tool/ANTLRTokenTypes.java
deleted file mode 100644
index 692c15a..0000000
--- a/src/org/antlr/tool/ANTLRTokenTypes.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRLexer.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.tool;
-import java.util.*;
-import java.io.*;
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import antlr.*;
-
-public interface ANTLRTokenTypes {
-	int EOF = 1;
-	int NULL_TREE_LOOKAHEAD = 3;
-	int OPTIONS = 4;
-	int TOKENS = 5;
-	int PARSER = 6;
-	int LEXER = 7;
-	int RULE = 8;
-	int BLOCK = 9;
-	int OPTIONAL = 10;
-	int CLOSURE = 11;
-	int POSITIVE_CLOSURE = 12;
-	int SYNPRED = 13;
-	int RANGE = 14;
-	int CHAR_RANGE = 15;
-	int EPSILON = 16;
-	int ALT = 17;
-	int EOR = 18;
-	int EOB = 19;
-	int EOA = 20;
-	int ID = 21;
-	int ARG = 22;
-	int ARGLIST = 23;
-	int RET = 24;
-	int LEXER_GRAMMAR = 25;
-	int PARSER_GRAMMAR = 26;
-	int TREE_GRAMMAR = 27;
-	int COMBINED_GRAMMAR = 28;
-	int INITACTION = 29;
-	int LABEL = 30;
-	int TEMPLATE = 31;
-	int SCOPE = 32;
-	int GATED_SEMPRED = 33;
-	int SYN_SEMPRED = 34;
-	int BACKTRACK_SEMPRED = 35;
-	int FRAGMENT = 36;
-	int ACTION = 37;
-	int DOC_COMMENT = 38;
-	int SEMI = 39;
-	int LITERAL_lexer = 40;
-	int LITERAL_tree = 41;
-	int LITERAL_grammar = 42;
-	int AMPERSAND = 43;
-	int COLON = 44;
-	int RCURLY = 45;
-	int ASSIGN = 46;
-	int STRING_LITERAL = 47;
-	int CHAR_LITERAL = 48;
-	int INT = 49;
-	int STAR = 50;
-	int TOKEN_REF = 51;
-	int LITERAL_protected = 52;
-	int LITERAL_public = 53;
-	int LITERAL_private = 54;
-	int BANG = 55;
-	int ARG_ACTION = 56;
-	int LITERAL_returns = 57;
-	int LITERAL_throws = 58;
-	int COMMA = 59;
-	int LPAREN = 60;
-	int OR = 61;
-	int RPAREN = 62;
-	int LITERAL_catch = 63;
-	int LITERAL_finally = 64;
-	int PLUS_ASSIGN = 65;
-	int SEMPRED = 66;
-	int IMPLIES = 67;
-	int ROOT = 68;
-	int RULE_REF = 69;
-	int NOT = 70;
-	int TREE_BEGIN = 71;
-	int QUESTION = 72;
-	int PLUS = 73;
-	int WILDCARD = 74;
-	int REWRITE = 75;
-	int DOLLAR = 76;
-	int DOUBLE_QUOTE_STRING_LITERAL = 77;
-	int DOUBLE_ANGLE_STRING_LITERAL = 78;
-	int WS = 79;
-	int COMMENT = 80;
-	int SL_COMMENT = 81;
-	int ML_COMMENT = 82;
-	int OPEN_ELEMENT_OPTION = 83;
-	int CLOSE_ELEMENT_OPTION = 84;
-	int ESC = 85;
-	int DIGIT = 86;
-	int XDIGIT = 87;
-	int NESTED_ARG_ACTION = 88;
-	int NESTED_ACTION = 89;
-	int ACTION_CHAR_LITERAL = 90;
-	int ACTION_STRING_LITERAL = 91;
-	int ACTION_ESC = 92;
-	int WS_LOOP = 93;
-	int INTERNAL_RULE_REF = 94;
-	int WS_OPT = 95;
-	int SRC = 96;
-}
diff --git a/src/org/antlr/tool/ANTLRTokenTypes.txt b/src/org/antlr/tool/ANTLRTokenTypes.txt
deleted file mode 100644
index 27eaa78..0000000
--- a/src/org/antlr/tool/ANTLRTokenTypes.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): antlr.g -> ANTLRTokenTypes.txt$
-ANTLR    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
diff --git a/src/org/antlr/tool/ANTLRTreePrinter.java b/src/org/antlr/tool/ANTLRTreePrinter.java
deleted file mode 100644
index 4b64428..0000000
--- a/src/org/antlr/tool/ANTLRTreePrinter.java
+++ /dev/null
@@ -1,2295 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "antlr.print.g" -> "ANTLRTreePrinter.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.tool;
-	import java.util.*;
-
-import antlr.TreeParser;
-import antlr.Token;
-import antlr.collections.AST;
-import antlr.RecognitionException;
-import antlr.ANTLRException;
-import antlr.NoViableAltException;
-import antlr.MismatchedTokenException;
-import antlr.SemanticException;
-import antlr.collections.impl.BitSet;
-import antlr.ASTPair;
-import antlr.collections.impl.ASTArray;
-
-
-/** Print out a grammar (no pretty printing).
- *
- *  Terence Parr
- *  University of San Francisco
- *  August 19, 2003
- */
-public class ANTLRTreePrinter extends antlr.TreeParser       implements ANTLRTreePrinterTokenTypes
- {
-
-	protected Grammar grammar;
-	protected boolean showActions;
-    protected StringBuffer buf = new StringBuffer(300);
-
-    public void out(String s) {
-        buf.append(s);
-    }
-
-    public void reportError(RecognitionException ex) {
-		Token token = null;
-		if ( ex instanceof MismatchedTokenException ) {
-			token = ((MismatchedTokenException)ex).token;
-		}
-		else if ( ex instanceof NoViableAltException ) {
-			token = ((NoViableAltException)ex).token;
-		}
-        ErrorManager.syntaxError(
-            ErrorManager.MSG_SYNTAX_ERROR,
-            grammar,
-            token,
-            "antlr.print: "+ex.toString(),
-            ex);
-    }
-
-	/** Normalize a grammar print out by removing all double spaces
-	 *  and trailing/beginning stuff.  FOr example, convert
-	 *
-	 *  ( A  |  B  |  C )*
-	 *
-	 *  to
-	 *
-	 *  ( A | B | C )*
-	 */
-	public static String normalize(String g) {
-	    StringTokenizer st = new StringTokenizer(g, " ", false);
-		StringBuffer buf = new StringBuffer();
-		while ( st.hasMoreTokens() ) {
-			String w = st.nextToken();
-			buf.append(w);
-			buf.append(" ");
-		}
-		return buf.toString().trim();
-	}
-public ANTLRTreePrinter() {
-	tokenNames = _tokenNames;
-}
-
-/** Call this to figure out how to print */
-	public final String  toString(AST _t,
-		Grammar g, boolean showActions
-	) throws RecognitionException {
-		String s=null;
-		
-		GrammarAST toString_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		grammar = g;
-		this.showActions = showActions;
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LEXER_GRAMMAR:
-			case PARSER_GRAMMAR:
-			case TREE_GRAMMAR:
-			case COMBINED_GRAMMAR:
-			{
-				grammar(_t);
-				_t = _retTree;
-				break;
-			}
-			case RULE:
-			{
-				rule(_t);
-				_t = _retTree;
-				break;
-			}
-			case ALT:
-			{
-				alternative(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			case SYNPRED:
-			case RANGE:
-			case CHAR_RANGE:
-			case EPSILON:
-			case LABEL:
-			case GATED_SEMPRED:
-			case SYN_SEMPRED:
-			case BACKTRACK_SEMPRED:
-			case ACTION:
-			case ASSIGN:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case BANG:
-			case PLUS_ASSIGN:
-			case SEMPRED:
-			case ROOT:
-			case RULE_REF:
-			case NOT:
-			case TREE_BEGIN:
-			case WILDCARD:
-			{
-				element(_t);
-				_t = _retTree;
-				break;
-			}
-			case REWRITE:
-			{
-				single_rewrite(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				GrammarAST tmp1_AST_in = (GrammarAST)_t;
-				match(_t,EOR);
-				_t = _t.getNextSibling();
-				s="EOR";
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			return normalize(buf.toString());
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return s;
-	}
-	
-	public final void grammar(AST _t) throws RecognitionException {
-		
-		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LEXER_GRAMMAR:
-			{
-				AST __t5 = _t;
-				GrammarAST tmp2_AST_in = (GrammarAST)_t;
-				match(_t,LEXER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t,"lexer " );
-				_t = _retTree;
-				_t = __t5;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PARSER_GRAMMAR:
-			{
-				AST __t6 = _t;
-				GrammarAST tmp3_AST_in = (GrammarAST)_t;
-				match(_t,PARSER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t,"parser ");
-				_t = _retTree;
-				_t = __t6;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TREE_GRAMMAR:
-			{
-				AST __t7 = _t;
-				GrammarAST tmp4_AST_in = (GrammarAST)_t;
-				match(_t,TREE_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t,"tree ");
-				_t = _retTree;
-				_t = __t7;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case COMBINED_GRAMMAR:
-			{
-				AST __t8 = _t;
-				GrammarAST tmp5_AST_in = (GrammarAST)_t;
-				match(_t,COMBINED_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t,"");
-				_t = _retTree;
-				_t = __t8;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rule(AST _t) throws RecognitionException {
-		
-		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST arg = null;
-		GrammarAST ret = null;
-		GrammarAST b = null;
-		
-		try {      // for error handling
-			AST __t42 = _t;
-			GrammarAST tmp6_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			out(id.getText());
-			AST __t44 = _t;
-			GrammarAST tmp7_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				arg = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				out("["+arg.getText()+"]");
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t44;
-			_t = _t.getNextSibling();
-			AST __t46 = _t;
-			GrammarAST tmp8_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				ret = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				out(" returns ["+ret.getText()+"]");
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t46;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop51:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					ruleAction(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop51;
-				}
-				
-			} while (true);
-			}
-			out(" : ");
-			b = _t==ASTNULL ? null : (GrammarAST)_t;
-			block(_t,false);
-			_t = _retTree;
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp9_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			out(";\n");
-			_t = __t42;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void alternative(AST _t) throws RecognitionException {
-		
-		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t74 = _t;
-			GrammarAST tmp10_AST_in = (GrammarAST)_t;
-			match(_t,ALT);
-			_t = _t.getFirstChild();
-			{
-			int _cnt76=0;
-			_loop76:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==LABEL||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.get [...]
-					element(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt76>=1 ) { break _loop76; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt76++;
-			} while (true);
-			}
-			GrammarAST tmp11_AST_in = (GrammarAST)_t;
-			match(_t,EOA);
-			_t = _t.getNextSibling();
-			_t = __t74;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void element(AST _t) throws RecognitionException {
-		
-		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST id2 = null;
-		GrammarAST a = null;
-		GrammarAST pred = null;
-		GrammarAST spred = null;
-		GrammarAST gpred = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				AST __t101 = _t;
-				GrammarAST tmp12_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t101;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				AST __t102 = _t;
-				GrammarAST tmp13_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t102;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LABEL:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case WILDCARD:
-			{
-				atom(_t);
-				_t = _retTree;
-				break;
-			}
-			case NOT:
-			{
-				AST __t103 = _t;
-				GrammarAST tmp14_AST_in = (GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				out("~");
-				element(_t);
-				_t = _retTree;
-				_t = __t103;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RANGE:
-			{
-				AST __t104 = _t;
-				GrammarAST tmp15_AST_in = (GrammarAST)_t;
-				match(_t,RANGE);
-				_t = _t.getFirstChild();
-				atom(_t);
-				_t = _retTree;
-				out("..");
-				atom(_t);
-				_t = _retTree;
-				_t = __t104;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t105 = _t;
-				GrammarAST tmp16_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				atom(_t);
-				_t = _retTree;
-				out("..");
-				atom(_t);
-				_t = _retTree;
-				_t = __t105;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t106 = _t;
-				GrammarAST tmp17_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				id = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				out(id.getText()+"=");
-				element(_t);
-				_t = _retTree;
-				_t = __t106;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PLUS_ASSIGN:
-			{
-				AST __t107 = _t;
-				GrammarAST tmp18_AST_in = (GrammarAST)_t;
-				match(_t,PLUS_ASSIGN);
-				_t = _t.getFirstChild();
-				id2 = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				out(id2.getText()+"+=");
-				element(_t);
-				_t = _retTree;
-				_t = __t107;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			{
-				ebnf(_t);
-				_t = _retTree;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				tree(_t);
-				_t = _retTree;
-				break;
-			}
-			case SYNPRED:
-			{
-				AST __t108 = _t;
-				GrammarAST tmp19_AST_in = (GrammarAST)_t;
-				match(_t,SYNPRED);
-				_t = _t.getFirstChild();
-				block(_t,true);
-				_t = _retTree;
-				_t = __t108;
-				_t = _t.getNextSibling();
-				out("=>");
-				break;
-			}
-			case ACTION:
-			{
-				a = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( showActions ) {out("{"); out(a.getText()); out("}");}
-				break;
-			}
-			case SEMPRED:
-			{
-				pred = (GrammarAST)_t;
-				match(_t,SEMPRED);
-				_t = _t.getNextSibling();
-				
-					if ( showActions ) {out("{"); out(pred.getText()); out("}?");}
-					else {out("{...}?");}
-					
-				break;
-			}
-			case SYN_SEMPRED:
-			{
-				spred = (GrammarAST)_t;
-				match(_t,SYN_SEMPRED);
-				_t = _t.getNextSibling();
-				
-					  String name = spred.getText();
-					  GrammarAST predAST=grammar.getSyntacticPredicate(name);
-					  block(predAST, true);
-					  out("=>");
-					
-				break;
-			}
-			case BACKTRACK_SEMPRED:
-			{
-				GrammarAST tmp20_AST_in = (GrammarAST)_t;
-				match(_t,BACKTRACK_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case GATED_SEMPRED:
-			{
-				gpred = (GrammarAST)_t;
-				match(_t,GATED_SEMPRED);
-				_t = _t.getNextSibling();
-				
-					if ( showActions ) {out("{"); out(gpred.getText()); out("}? =>");}
-					else {out("{...}? =>");}
-					
-				break;
-			}
-			case EPSILON:
-			{
-				GrammarAST tmp21_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void single_rewrite(AST _t) throws RecognitionException {
-		
-		GrammarAST single_rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t86 = _t;
-			GrammarAST tmp22_AST_in = (GrammarAST)_t;
-			match(_t,REWRITE);
-			_t = _t.getFirstChild();
-			out(" ->");
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SEMPRED:
-			{
-				GrammarAST tmp23_AST_in = (GrammarAST)_t;
-				match(_t,SEMPRED);
-				_t = _t.getNextSibling();
-				out(" {"+tmp23_AST_in.getText()+"}?");
-				break;
-			}
-			case ALT:
-			case TEMPLATE:
-			case ACTION:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ALT:
-			{
-				alternative(_t);
-				_t = _retTree;
-				break;
-			}
-			case TEMPLATE:
-			{
-				rewrite_template(_t);
-				_t = _retTree;
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp24_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				out(" {"+tmp24_AST_in.getText()+"}");
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t86;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void grammarSpec(AST _t,
-		String gtype
-	) throws RecognitionException {
-		
-		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST cmt = null;
-		
-		try {      // for error handling
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			out(gtype+"grammar "+id.getText());
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case DOC_COMMENT:
-			{
-				cmt = (GrammarAST)_t;
-				match(_t,DOC_COMMENT);
-				_t = _t.getNextSibling();
-				out(cmt.getText()+"\n");
-				break;
-			}
-			case OPTIONS:
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			out(";\n");
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKENS:
-			{
-				tokensSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop16:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==SCOPE)) {
-					attrScope(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop16;
-				}
-				
-			} while (true);
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case AMPERSAND:
-			{
-				actions(_t);
-				_t = _retTree;
-				break;
-			}
-			case RULE:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			rules(_t);
-			_t = _retTree;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void attrScope(AST _t) throws RecognitionException {
-		
-		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t10 = _t;
-			GrammarAST tmp25_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			GrammarAST tmp26_AST_in = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			GrammarAST tmp27_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t10;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void optionsSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST optionsSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t25 = _t;
-			GrammarAST tmp28_AST_in = (GrammarAST)_t;
-			match(_t,OPTIONS);
-			_t = _t.getFirstChild();
-			out(" options {");
-			{
-			int _cnt27=0;
-			_loop27:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ASSIGN)) {
-					option(_t);
-					_t = _retTree;
-					out("; ");
-				}
-				else {
-					if ( _cnt27>=1 ) { break _loop27; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt27++;
-			} while (true);
-			}
-			out("} ");
-			_t = __t25;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tokensSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST tokensSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t32 = _t;
-			GrammarAST tmp29_AST_in = (GrammarAST)_t;
-			match(_t,TOKENS);
-			_t = _t.getFirstChild();
-			{
-			int _cnt34=0;
-			_loop34:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ASSIGN||_t.getType()==TOKEN_REF)) {
-					tokenSpec(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt34>=1 ) { break _loop34; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt34++;
-			} while (true);
-			}
-			_t = __t32;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void actions(AST _t) throws RecognitionException {
-		
-		GrammarAST actions_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			int _cnt20=0;
-			_loop20:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					action(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt20>=1 ) { break _loop20; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt20++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rules(AST _t) throws RecognitionException {
-		
-		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			int _cnt40=0;
-			_loop40:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==RULE)) {
-					rule(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt40>=1 ) { break _loop40; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt40++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void action(AST _t) throws RecognitionException {
-		
-		GrammarAST action_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id1 = null;
-		GrammarAST id2 = null;
-		GrammarAST a1 = null;
-		GrammarAST a2 = null;
-		
-		String scope=null, name=null;
-		String action=null;
-		
-		
-		try {      // for error handling
-			AST __t22 = _t;
-			GrammarAST tmp30_AST_in = (GrammarAST)_t;
-			match(_t,AMPERSAND);
-			_t = _t.getFirstChild();
-			id1 = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ID:
-			{
-				id2 = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				a1 = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				scope=id1.getText(); name=a1.getText(); action=a1.getText();
-				break;
-			}
-			case ACTION:
-			{
-				a2 = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				scope=null; name=id1.getText(); action=a2.getText();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t22;
-			_t = _t.getNextSibling();
-			
-					 if ( showActions ) {
-					 	out("@"+(scope!=null?scope+"::":"")+name+action);
-					 }
-					
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void option(AST _t) throws RecognitionException {
-		
-		GrammarAST option_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		
-		try {      // for error handling
-			AST __t29 = _t;
-			GrammarAST tmp31_AST_in = (GrammarAST)_t;
-			match(_t,ASSIGN);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			out(id.getText()+"=");
-			optionValue(_t);
-			_t = _retTree;
-			_t = __t29;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void optionValue(AST _t) throws RecognitionException {
-		
-		GrammarAST optionValue_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST s = null;
-		GrammarAST c = null;
-		GrammarAST i = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ID:
-			{
-				id = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				out(id.getText());
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				out(s.getText());
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				out(c.getText());
-				break;
-			}
-			case INT:
-			{
-				i = (GrammarAST)_t;
-				match(_t,INT);
-				_t = _t.getNextSibling();
-				out(i.getText());
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tokenSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST tokenSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKEN_REF:
-			{
-				GrammarAST tmp32_AST_in = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t36 = _t;
-				GrammarAST tmp33_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				GrammarAST tmp34_AST_in = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case STRING_LITERAL:
-				{
-					GrammarAST tmp35_AST_in = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					GrammarAST tmp36_AST_in = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t36;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void modifier(AST _t) throws RecognitionException {
-		
-		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		out(modifier_AST_in.getText()); out(" ");
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_protected:
-			{
-				GrammarAST tmp37_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_protected);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_public:
-			{
-				GrammarAST tmp38_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_public);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_private:
-			{
-				GrammarAST tmp39_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_private);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case FRAGMENT:
-			{
-				GrammarAST tmp40_AST_in = (GrammarAST)_t;
-				match(_t,FRAGMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ruleScopeSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t57 = _t;
-			GrammarAST tmp41_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ACTION:
-			{
-				GrammarAST tmp42_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			case ID:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop60:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ID)) {
-					GrammarAST tmp43_AST_in = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop60;
-				}
-				
-			} while (true);
-			}
-			_t = __t57;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ruleAction(AST _t) throws RecognitionException {
-		
-		GrammarAST ruleAction_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST a = null;
-		
-		try {      // for error handling
-			AST __t54 = _t;
-			GrammarAST tmp44_AST_in = (GrammarAST)_t;
-			match(_t,AMPERSAND);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			a = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t54;
-			_t = _t.getNextSibling();
-			if ( showActions ) out("@"+id.getText()+"{"+a.getText()+"}");
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void block(AST _t,
-		boolean forceParens
-	) throws RecognitionException {
-		
-		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		int numAlts = countAltsForBlock(block_AST_in);
-		
-		
-		try {      // for error handling
-			AST __t62 = _t;
-			GrammarAST tmp45_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			if ( forceParens||numAlts>1 ) out(" (");
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				out(" : ");
-				break;
-			}
-			case ALT:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			alternative(_t);
-			_t = _retTree;
-			rewrite(_t);
-			_t = _retTree;
-			{
-			_loop65:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					out(" | ");
-					alternative(_t);
-					_t = _retTree;
-					rewrite(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop65;
-				}
-				
-			} while (true);
-			}
-			GrammarAST tmp46_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			if ( forceParens||numAlts>1 ) out(")");
-			_t = __t62;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void exceptionGroup(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			{
-				{
-				int _cnt79=0;
-				_loop79:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==LITERAL_catch)) {
-						exceptionHandler(_t);
-						_t = _retTree;
-					}
-					else {
-						if ( _cnt79>=1 ) { break _loop79; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt79++;
-				} while (true);
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case LITERAL_finally:
-				{
-					finallyClause(_t);
-					_t = _retTree;
-					break;
-				}
-				case EOR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				break;
-			}
-			case LITERAL_finally:
-			{
-				finallyClause(_t);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			_loop99:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==REWRITE)) {
-					single_rewrite(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop99;
-				}
-				
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final int  countAltsForBlock(AST _t) throws RecognitionException {
-		int n=0;
-		
-		GrammarAST countAltsForBlock_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t67 = _t;
-			GrammarAST tmp47_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp48_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ALT:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			int _cnt72=0;
-			_loop72:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					GrammarAST tmp49_AST_in = (GrammarAST)_t;
-					match(_t,ALT);
-					_t = _t.getNextSibling();
-					{
-					_loop71:
-					do {
-						if (_t==null) _t=ASTNULL;
-						if ((_t.getType()==REWRITE)) {
-							GrammarAST tmp50_AST_in = (GrammarAST)_t;
-							match(_t,REWRITE);
-							_t = _t.getNextSibling();
-						}
-						else {
-							break _loop71;
-						}
-						
-					} while (true);
-					}
-					n++;
-				}
-				else {
-					if ( _cnt72>=1 ) { break _loop72; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt72++;
-			} while (true);
-			}
-			GrammarAST tmp51_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t67;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return n;
-	}
-	
-	public final void exceptionHandler(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t82 = _t;
-			GrammarAST tmp52_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_catch);
-			_t = _t.getFirstChild();
-			GrammarAST tmp53_AST_in = (GrammarAST)_t;
-			match(_t,ARG_ACTION);
-			_t = _t.getNextSibling();
-			GrammarAST tmp54_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t82;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void finallyClause(AST _t) throws RecognitionException {
-		
-		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t84 = _t;
-			GrammarAST tmp55_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_finally);
-			_t = _t.getFirstChild();
-			GrammarAST tmp56_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t84;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_template(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_template_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST ind = null;
-		GrammarAST arg = null;
-		GrammarAST a = null;
-		
-		try {      // for error handling
-			AST __t90 = _t;
-			GrammarAST tmp57_AST_in = (GrammarAST)_t;
-			match(_t,TEMPLATE);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ID:
-			{
-				id = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				out(" "+id.getText());
-				break;
-			}
-			case ACTION:
-			{
-				ind = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				out(" ({"+ind.getText()+"})");
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			AST __t92 = _t;
-			GrammarAST tmp58_AST_in = (GrammarAST)_t;
-			match(_t,ARGLIST);
-			_t = _t.getFirstChild();
-			out("(");
-			{
-			_loop95:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ARG)) {
-					AST __t94 = _t;
-					GrammarAST tmp59_AST_in = (GrammarAST)_t;
-					match(_t,ARG);
-					_t = _t.getFirstChild();
-					arg = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-					out(arg.getText()+"=");
-					a = (GrammarAST)_t;
-					match(_t,ACTION);
-					_t = _t.getNextSibling();
-					out(a.getText());
-					_t = __t94;
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop95;
-				}
-				
-			} while (true);
-			}
-			out(")");
-			_t = __t92;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case DOUBLE_QUOTE_STRING_LITERAL:
-			{
-				GrammarAST tmp60_AST_in = (GrammarAST)_t;
-				match(_t,DOUBLE_QUOTE_STRING_LITERAL);
-				_t = _t.getNextSibling();
-				out(" "+tmp60_AST_in.getText());
-				break;
-			}
-			case DOUBLE_ANGLE_STRING_LITERAL:
-			{
-				GrammarAST tmp61_AST_in = (GrammarAST)_t;
-				match(_t,DOUBLE_ANGLE_STRING_LITERAL);
-				_t = _t.getNextSibling();
-				out(" "+tmp61_AST_in.getText());
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t90;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void atom(AST _t) throws RecognitionException {
-		
-		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST rarg = null;
-		GrammarAST targ = null;
-		out(" ");
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case WILDCARD:
-			{
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case RULE_REF:
-				{
-					AST __t119 = _t;
-					GrammarAST tmp62_AST_in = (GrammarAST)_t;
-					match(_t,RULE_REF);
-					_t = _t.getFirstChild();
-					out(atom_AST_in.toString());
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case ARG_ACTION:
-					{
-						rarg = (GrammarAST)_t;
-						match(_t,ARG_ACTION);
-						_t = _t.getNextSibling();
-						out("["+rarg.toString()+"]");
-						break;
-					}
-					case 3:
-					case BANG:
-					case ROOT:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t119;
-					_t = _t.getNextSibling();
-					break;
-				}
-				case TOKEN_REF:
-				{
-					AST __t122 = _t;
-					GrammarAST tmp63_AST_in = (GrammarAST)_t;
-					match(_t,TOKEN_REF);
-					_t = _t.getFirstChild();
-					out(atom_AST_in.toString());
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case ARG_ACTION:
-					{
-						targ = (GrammarAST)_t;
-						match(_t,ARG_ACTION);
-						_t = _t.getNextSibling();
-						out("["+targ.toString()+"]");
-						break;
-					}
-					case 3:
-					case BANG:
-					case ROOT:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t122;
-					_t = _t.getNextSibling();
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					AST __t125 = _t;
-					GrammarAST tmp64_AST_in = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getFirstChild();
-					out(atom_AST_in.toString());
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t125;
-					_t = _t.getNextSibling();
-					break;
-				}
-				case STRING_LITERAL:
-				{
-					AST __t127 = _t;
-					GrammarAST tmp65_AST_in = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getFirstChild();
-					out(atom_AST_in.toString());
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t127;
-					_t = _t.getNextSibling();
-					break;
-				}
-				case WILDCARD:
-				{
-					AST __t129 = _t;
-					GrammarAST tmp66_AST_in = (GrammarAST)_t;
-					match(_t,WILDCARD);
-					_t = _t.getFirstChild();
-					out(atom_AST_in.toString());
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t129;
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				out(" ");
-				break;
-			}
-			case LABEL:
-			{
-				GrammarAST tmp67_AST_in = (GrammarAST)_t;
-				match(_t,LABEL);
-				_t = _t.getNextSibling();
-				out(" $"+tmp67_AST_in.getText());
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ebnf(AST _t) throws RecognitionException {
-		
-		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case BLOCK:
-			{
-				block(_t,true);
-				_t = _retTree;
-				out(" ");
-				break;
-			}
-			case OPTIONAL:
-			{
-				AST __t110 = _t;
-				GrammarAST tmp68_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				block(_t,true);
-				_t = _retTree;
-				_t = __t110;
-				_t = _t.getNextSibling();
-				out("? ");
-				break;
-			}
-			case CLOSURE:
-			{
-				AST __t111 = _t;
-				GrammarAST tmp69_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				block(_t,true);
-				_t = _retTree;
-				_t = __t111;
-				_t = _t.getNextSibling();
-				out("* ");
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				AST __t112 = _t;
-				GrammarAST tmp70_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				block(_t,true);
-				_t = _retTree;
-				_t = __t112;
-				_t = _t.getNextSibling();
-				out("+ ");
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tree(AST _t) throws RecognitionException {
-		
-		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t114 = _t;
-			GrammarAST tmp71_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			out(" ^(");
-			element(_t);
-			_t = _retTree;
-			{
-			_loop116:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==LABEL||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.get [...]
-					element(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop116;
-				}
-				
-			} while (true);
-			}
-			out(") ");
-			_t = __t114;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ast_suffix(AST _t) throws RecognitionException {
-		
-		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				GrammarAST tmp72_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getNextSibling();
-				out("^");
-				break;
-			}
-			case BANG:
-			{
-				GrammarAST tmp73_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getNextSibling();
-				out("!");
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	
-	public static final String[] _tokenNames = {
-		"<0>",
-		"EOF",
-		"<2>",
-		"NULL_TREE_LOOKAHEAD",
-		"\"options\"",
-		"\"tokens\"",
-		"\"parser\"",
-		"LEXER",
-		"RULE",
-		"BLOCK",
-		"OPTIONAL",
-		"CLOSURE",
-		"POSITIVE_CLOSURE",
-		"SYNPRED",
-		"RANGE",
-		"CHAR_RANGE",
-		"EPSILON",
-		"ALT",
-		"EOR",
-		"EOB",
-		"EOA",
-		"ID",
-		"ARG",
-		"ARGLIST",
-		"RET",
-		"LEXER_GRAMMAR",
-		"PARSER_GRAMMAR",
-		"TREE_GRAMMAR",
-		"COMBINED_GRAMMAR",
-		"INITACTION",
-		"LABEL",
-		"TEMPLATE",
-		"\"scope\"",
-		"GATED_SEMPRED",
-		"SYN_SEMPRED",
-		"BACKTRACK_SEMPRED",
-		"\"fragment\"",
-		"ACTION",
-		"DOC_COMMENT",
-		"SEMI",
-		"\"lexer\"",
-		"\"tree\"",
-		"\"grammar\"",
-		"AMPERSAND",
-		"COLON",
-		"RCURLY",
-		"ASSIGN",
-		"STRING_LITERAL",
-		"CHAR_LITERAL",
-		"INT",
-		"STAR",
-		"TOKEN_REF",
-		"\"protected\"",
-		"\"public\"",
-		"\"private\"",
-		"BANG",
-		"ARG_ACTION",
-		"\"returns\"",
-		"\"throws\"",
-		"COMMA",
-		"LPAREN",
-		"OR",
-		"RPAREN",
-		"\"catch\"",
-		"\"finally\"",
-		"PLUS_ASSIGN",
-		"SEMPRED",
-		"IMPLIES",
-		"ROOT",
-		"RULE_REF",
-		"NOT",
-		"TREE_BEGIN",
-		"QUESTION",
-		"PLUS",
-		"WILDCARD",
-		"REWRITE",
-		"DOLLAR",
-		"DOUBLE_QUOTE_STRING_LITERAL",
-		"DOUBLE_ANGLE_STRING_LITERAL",
-		"WS",
-		"COMMENT",
-		"SL_COMMENT",
-		"ML_COMMENT",
-		"OPEN_ELEMENT_OPTION",
-		"CLOSE_ELEMENT_OPTION",
-		"ESC",
-		"DIGIT",
-		"XDIGIT",
-		"NESTED_ARG_ACTION",
-		"NESTED_ACTION",
-		"ACTION_CHAR_LITERAL",
-		"ACTION_STRING_LITERAL",
-		"ACTION_ESC",
-		"WS_LOOP",
-		"INTERNAL_RULE_REF",
-		"WS_OPT",
-		"SRC"
-	};
-	
-	}
-	
diff --git a/src/org/antlr/tool/ANTLRTreePrinter.smap b/src/org/antlr/tool/ANTLRTreePrinter.smap
deleted file mode 100644
index 1cc3b71..0000000
--- a/src/org/antlr/tool/ANTLRTreePrinter.smap
+++ /dev/null
@@ -1,1670 +0,0 @@
-SMAP
-ANTLRTreePrinter.java
-G
-*S G
-*F
-+ 0 antlr.print.g
-antlr.print.g
-*L
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-48:55
-49:56
-50:57
-52:59
-53:60
-54:61
-56:63
-57:64
-58:65
-59:66
-60:67
-61:68
-62:69
-63:70
-64:71
-65:72
-66:73
-67:74
-68:75
-69:76
-70:77
-72:79
-73:80
-74:81
-75:82
-76:83
-77:84
-78:85
-79:86
-80:87
-81:88
-82:89
-83:90
-84:91
-85:92
-86:93
-87:94
-88:95
-89:96
-90:97
-94:102
-94:103
-94:104
-94:105
-94:106
-94:114
-94:190
-94:191
-94:192
-94:193
-94:194
-94:195
-94:196
-94:197
-95:110
-96:111
-99:116
-99:117
-99:118
-99:119
-99:120
-99:121
-99:122
-99:123
-99:124
-99:183
-99:184
-99:185
-99:186
-99:187
-100:127
-100:128
-100:129
-100:130
-101:133
-101:134
-101:135
-101:136
-102:139
-102:140
-102:141
-102:142
-102:143
-102:144
-102:145
-102:146
-102:147
-102:148
-102:149
-102:150
-102:151
-102:152
-102:153
-102:154
-102:155
-102:156
-102:157
-102:158
-102:159
-102:160
-102:161
-102:162
-102:163
-102:164
-102:165
-102:166
-103:169
-103:170
-103:171
-103:172
-104:175
-104:176
-104:177
-104:178
-104:179
-104:180
-106:189
-111:199
-111:203
-111:261
-111:262
-111:263
-111:264
-111:265
-111:266
-111:267
-112:205
-112:206
-112:207
-112:208
-112:209
-112:210
-112:211
-112:212
-112:213
-112:214
-112:215
-112:216
-112:255
-112:256
-112:257
-112:258
-112:259
-113:219
-113:220
-113:221
-113:222
-113:223
-113:224
-113:225
-113:226
-113:227
-113:228
-114:231
-114:232
-114:233
-114:234
-114:235
-114:236
-114:237
-114:238
-114:239
-114:240
-115:243
-115:244
-115:245
-115:246
-115:247
-115:248
-115:249
-115:250
-115:251
-115:252
-119:911
-119:915
-119:928
-119:929
-119:930
-119:931
-119:932
-119:933
-119:934
-120:916
-120:917
-120:918
-120:919
-120:920
-120:921
-120:922
-120:923
-120:924
-120:925
-120:926
-120:927
-123:786
-123:787
-123:788
-123:794
-123:903
-123:904
-123:905
-123:906
-123:907
-123:908
-123:909
-124:791
-124:795
-124:796
-124:797
-124:798
-125:792
-125:800
-125:801
-125:802
-125:803
-125:804
-125:805
-125:806
-125:807
-125:818
-125:819
-125:820
-125:821
-125:822
-126:825
-126:826
-126:827
-126:828
-126:829
-126:830
-126:840
-126:841
-126:842
-126:843
-126:844
-126:846
-127:848
-127:849
-127:850
-127:851
-127:852
-127:853
-127:862
-127:863
-127:864
-127:865
-127:866
-128:868
-128:869
-128:870
-128:871
-128:872
-128:873
-128:874
-128:875
-128:876
-128:877
-128:878
-128:880
-128:881
-129:883
-129:884
-129:885
-129:886
-129:887
-129:888
-129:895
-129:896
-129:897
-129:898
-129:899
-130:901
-130:902
-133:1009
-133:1013
-133:1030
-133:1031
-133:1032
-133:1033
-133:1034
-133:1035
-133:1036
-134:1015
-134:1016
-134:1017
-134:1018
-134:1019
-134:1020
-134:1021
-134:1022
-134:1023
-134:1024
-134:1025
-134:1027
-134:1028
-134:1029
-137:1067
-137:1079
-137:1122
-137:1123
-137:1124
-137:1125
-137:1126
-137:1127
-137:1128
-138:1075
-139:1076
-142:1070
-142:1080
-142:1081
-142:1082
-142:1083
-142:1084
-142:1085
-142:1086
-142:1115
-142:1116
-143:1071
-143:1072
-143:1088
-143:1089
-143:1090
-143:1091
-143:1092
-143:1093
-143:1094
-143:1095
-143:1096
-143:1097
-143:1109
-143:1110
-143:1111
-143:1112
-143:1113
-144:1098
-145:1073
-145:1101
-145:1102
-145:1103
-145:1104
-145:1105
-146:1106
-150:1118
-151:1119
-152:1120
-156:936
-156:940
-156:966
-156:967
-156:968
-156:969
-156:970
-156:971
-156:972
-157:941
-157:942
-157:943
-157:944
-157:945
-157:964
-157:965
-158:947
-158:948
-158:949
-158:950
-158:951
-158:952
-158:953
-158:954
-158:955
-158:956
-158:957
-158:958
-158:960
-158:961
-158:962
-159:963
-163:1130
-163:1135
-163:1148
-163:1149
-163:1150
-163:1151
-163:1152
-163:1153
-163:1154
-164:1133
-164:1136
-164:1137
-164:1138
-164:1139
-164:1140
-164:1141
-164:1142
-164:1143
-164:1144
-164:1145
-164:1146
-164:1147
-167:1156
-167:1164
-167:1165
-167:1166
-167:1199
-167:1200
-167:1201
-167:1202
-167:1203
-167:1204
-167:1205
-167:1206
-167:1207
-167:1208
-167:1209
-167:1210
-168:1159
-168:1167
-168:1168
-168:1169
-168:1170
-168:1171
-168:1172
-169:1160
-169:1175
-169:1176
-169:1177
-169:1178
-169:1179
-169:1180
-170:1161
-170:1183
-170:1184
-170:1185
-170:1186
-170:1187
-170:1188
-171:1162
-171:1191
-171:1192
-171:1193
-171:1194
-171:1195
-171:1196
-187:974
-187:978
-187:1001
-187:1002
-187:1003
-187:1004
-187:1005
-187:1006
-187:1007
-188:979
-188:980
-188:981
-188:982
-188:984
-188:985
-188:986
-188:987
-188:988
-188:989
-188:990
-188:991
-188:992
-188:993
-188:994
-188:996
-188:997
-188:998
-188:999
-188:1000
-191:1212
-191:1216
-191:1217
-191:1218
-191:1262
-191:1263
-191:1264
-191:1265
-191:1266
-191:1267
-191:1268
-191:1269
-191:1270
-191:1271
-191:1272
-191:1273
-192:1219
-192:1220
-192:1221
-192:1222
-192:1223
-193:1226
-193:1227
-193:1228
-193:1229
-193:1230
-193:1231
-193:1232
-193:1233
-193:1234
-193:1236
-193:1237
-193:1238
-193:1239
-193:1240
-193:1241
-193:1242
-193:1245
-193:1246
-193:1247
-193:1248
-193:1249
-193:1252
-193:1253
-193:1254
-193:1255
-193:1256
-193:1258
-193:1259
-196:1038
-196:1042
-196:1059
-196:1060
-196:1061
-196:1062
-196:1063
-196:1064
-196:1065
-197:1044
-197:1045
-197:1046
-197:1047
-197:1048
-197:1049
-197:1050
-197:1051
-197:1052
-197:1053
-197:1054
-197:1056
-197:1057
-197:1058
-200:269
-200:277
-200:447
-200:448
-200:449
-200:450
-200:451
-200:452
-200:453
-201:272
-201:278
-201:279
-201:280
-201:281
-201:282
-201:283
-201:284
-201:445
-201:446
-202:286
-202:287
-202:288
-202:289
-202:290
-202:291
-202:292
-202:293
-202:294
-202:301
-202:302
-202:303
-202:304
-202:305
-203:307
-204:273
-204:308
-204:309
-204:310
-204:311
-204:313
-204:314
-204:315
-204:316
-204:317
-204:318
-204:319
-204:320
-204:327
-204:328
-204:329
-204:330
-204:331
-204:333
-204:334
-205:274
-205:335
-205:336
-205:337
-205:338
-205:340
-205:341
-205:342
-205:343
-205:344
-205:345
-205:346
-205:347
-205:354
-205:355
-205:356
-205:357
-205:358
-205:360
-205:361
-206:363
-206:364
-206:365
-206:366
-206:367
-206:368
-206:377
-206:378
-206:379
-206:380
-206:381
-207:384
-207:385
-207:386
-207:387
-207:388
-207:389
-207:397
-207:398
-207:399
-207:400
-207:401
-208:403
-208:404
-208:405
-208:406
-208:407
-208:408
-208:409
-208:410
-208:411
-208:412
-208:413
-208:415
-208:416
-209:417
-210:275
-210:418
-210:419
-210:420
-211:422
-211:423
-211:424
-211:425
-211:426
-211:427
-211:428
-211:435
-211:436
-211:437
-211:438
-211:439
-212:441
-212:442
-212:443
-212:444
-216:1379
-216:1385
-216:1399
-216:1400
-216:1401
-216:1402
-216:1403
-216:1404
-216:1405
-217:1382
-217:1383
-217:1386
-217:1387
-217:1388
-217:1389
-217:1390
-217:1391
-217:1392
-217:1393
-217:1394
-217:1395
-217:1396
-217:1397
-218:1398
-221:1275
-221:1278
-221:1280
-221:1281
-221:1282
-221:1311
-221:1312
-221:1313
-221:1314
-221:1315
-221:1316
-221:1317
-221:1318
-221:1319
-221:1320
-221:1321
-221:1322
-223:1283
-223:1284
-223:1285
-223:1286
-223:1287
-224:1290
-224:1291
-224:1292
-224:1293
-224:1294
-225:1297
-225:1298
-225:1299
-225:1300
-225:1301
-226:1304
-226:1305
-226:1306
-226:1307
-226:1308
-229:1324
-229:1328
-229:1371
-229:1372
-229:1373
-229:1374
-229:1375
-229:1376
-229:1377
-230:1329
-230:1330
-230:1331
-230:1332
-230:1334
-230:1335
-230:1336
-230:1337
-230:1338
-230:1339
-230:1340
-230:1348
-230:1349
-230:1350
-230:1351
-230:1352
-230:1354
-230:1355
-230:1356
-230:1357
-230:1358
-230:1359
-230:1360
-230:1361
-230:1362
-230:1363
-230:1364
-230:1365
-230:1367
-230:1368
-230:1369
-230:1370
-233:1407
-233:1408
-233:1409
-233:1416
-233:1469
-233:1470
-233:1471
-233:1472
-233:1473
-233:1474
-233:1475
-234:1413
-237:1417
-237:1418
-237:1419
-237:1420
-237:1421
-237:1467
-237:1468
-238:1423
-238:1424
-238:1425
-238:1426
-238:1427
-238:1428
-238:1429
-238:1436
-238:1437
-238:1438
-238:1439
-238:1440
-239:1442
-239:1443
-239:1444
-239:1445
-239:1446
-239:1447
-239:1448
-239:1449
-239:1450
-239:1451
-239:1452
-239:1453
-239:1454
-239:1455
-239:1456
-239:1457
-239:1458
-239:1459
-239:1461
-239:1462
-240:1463
-240:1464
-240:1465
-240:1466
-244:1569
-244:1570
-244:1574
-244:1637
-244:1638
-244:1639
-244:1640
-244:1641
-244:1642
-244:1643
-244:1644
-245:1575
-245:1576
-245:1577
-245:1578
-245:1580
-245:1581
-245:1582
-245:1583
-245:1584
-245:1585
-245:1586
-245:1593
-245:1594
-245:1595
-245:1596
-245:1597
-245:1600
-245:1601
-245:1602
-245:1603
-245:1604
-245:1605
-245:1606
-245:1607
-245:1608
-245:1609
-245:1610
-245:1611
-245:1612
-245:1613
-245:1614
-245:1615
-245:1616
-245:1617
-245:1618
-245:1619
-245:1621
-245:1622
-245:1623
-245:1624
-245:1625
-245:1626
-245:1627
-245:1629
-245:1630
-245:1631
-245:1632
-245:1633
-245:1634
-245:1635
-245:1636
-248:455
-248:459
-248:485
-248:486
-248:487
-248:488
-248:489
-248:490
-248:491
-249:460
-249:461
-249:462
-249:463
-249:465
-249:466
-249:467
-249:468
-249:469
-249:470
-249:471
-249:472
-249:473
-249:474
-249:475
-249:477
-249:478
-249:479
-249:480
-249:481
-249:482
-249:483
-249:484
-252:1477
-252:1481
-252:1482
-252:1483
-252:1529
-252:1530
-252:1531
-252:1532
-252:1533
-252:1534
-252:1535
-252:1536
-252:1537
-252:1538
-252:1539
-252:1540
-253:1484
-253:1485
-253:1487
-253:1488
-253:1489
-253:1490
-253:1491
-253:1492
-253:1493
-253:1494
-253:1495
-253:1496
-253:1497
-253:1499
-253:1500
-253:1501
-253:1503
-253:1504
-253:1505
-253:1506
-253:1507
-253:1508
-253:1515
-253:1516
-253:1517
-253:1518
-253:1519
-254:1523
-254:1524
-254:1525
-254:1526
-257:1646
-257:1650
-257:1663
-257:1664
-257:1665
-257:1666
-257:1667
-257:1668
-257:1669
-258:1651
-258:1652
-258:1653
-258:1654
-258:1655
-258:1656
-258:1657
-258:1658
-258:1659
-258:1660
-258:1661
-258:1662
-261:1671
-261:1675
-261:1685
-261:1686
-261:1687
-261:1688
-261:1689
-261:1690
-261:1691
-262:1676
-262:1677
-262:1678
-262:1679
-262:1680
-262:1681
-262:1682
-262:1683
-262:1684
-265:714
-265:718
-265:778
-265:779
-265:780
-265:781
-265:782
-265:783
-265:784
-266:719
-266:720
-266:721
-266:722
-266:723
-266:725
-266:726
-266:727
-266:728
-266:729
-266:730
-266:731
-266:732
-266:741
-266:742
-266:743
-266:744
-266:745
-266:776
-266:777
-267:748
-267:749
-267:750
-267:751
-267:752
-267:753
-267:756
-267:757
-267:758
-267:759
-267:762
-267:763
-267:764
-267:765
-267:766
-267:767
-267:770
-267:771
-267:772
-267:773
-267:774
-271:1693
-271:1701
-271:1796
-271:1797
-271:1798
-271:1799
-271:1800
-271:1801
-271:1802
-272:1702
-272:1703
-272:1704
-272:1705
-272:1794
-272:1795
-273:1696
-273:1697
-273:1707
-273:1708
-273:1709
-273:1710
-273:1711
-273:1712
-273:1713
-273:1714
-273:1717
-273:1718
-273:1719
-273:1720
-273:1721
-273:1722
-273:1725
-273:1726
-273:1727
-273:1728
-273:1729
-274:1731
-274:1732
-274:1733
-274:1734
-274:1763
-274:1764
-275:1735
-276:1698
-276:1736
-276:1737
-276:1738
-276:1739
-276:1740
-276:1741
-276:1742
-276:1743
-276:1744
-276:1745
-276:1746
-276:1747
-276:1748
-276:1753
-276:1754
-276:1755
-276:1756
-276:1757
-276:1758
-276:1760
-276:1761
-277:1699
-277:1749
-277:1750
-277:1751
-277:1752
-280:1762
-282:1766
-282:1767
-282:1768
-282:1769
-282:1770
-282:1771
-282:1772
-282:1773
-282:1788
-282:1789
-282:1790
-282:1791
-282:1792
-283:1776
-283:1777
-283:1778
-283:1779
-283:1780
-283:1781
-288:1542
-288:1546
-288:1561
-288:1562
-288:1563
-288:1564
-288:1565
-288:1566
-288:1567
-289:1547
-289:1548
-289:1549
-289:1550
-289:1551
-289:1552
-289:1553
-289:1554
-289:1555
-289:1556
-289:1557
-289:1559
-289:1560
-292:493
-292:503
-292:504
-292:505
-292:701
-292:702
-292:703
-292:704
-292:705
-292:706
-292:707
-292:708
-292:709
-292:710
-292:711
-292:712
-293:506
-293:507
-293:508
-293:509
-293:510
-293:511
-293:512
-293:513
-293:514
-293:515
-294:518
-294:519
-294:520
-294:521
-294:522
-294:523
-294:524
-294:525
-294:526
-294:527
-295:530
-295:531
-295:532
-295:533
-295:534
-295:535
-295:536
-295:537
-295:538
-296:541
-296:542
-296:543
-296:544
-296:545
-296:546
-296:547
-296:548
-296:549
-296:550
-296:551
-297:554
-297:555
-297:556
-297:557
-297:558
-297:559
-297:560
-297:561
-297:562
-297:563
-297:564
-297:565
-297:566
-298:569
-298:570
-298:571
-298:572
-298:573
-298:574
-298:575
-298:576
-298:577
-298:578
-298:579
-298:580
-298:581
-299:496
-299:584
-299:585
-299:586
-299:587
-299:588
-299:589
-299:590
-299:591
-299:592
-299:593
-299:594
-299:595
-299:596
-299:597
-300:497
-300:600
-300:601
-300:602
-300:603
-300:604
-300:605
-300:606
-300:607
-300:608
-300:609
-300:610
-300:611
-300:612
-300:613
-301:616
-301:617
-301:618
-301:619
-301:620
-301:621
-301:622
-302:625
-302:626
-302:627
-302:628
-303:631
-303:632
-303:633
-303:634
-303:635
-303:636
-303:637
-303:638
-303:639
-303:640
-303:641
-304:498
-304:644
-304:645
-304:646
-304:647
-304:648
-304:649
-305:499
-305:652
-305:653
-305:654
-305:655
-305:656
-307:658
-308:659
-310:500
-310:663
-310:664
-310:665
-310:666
-310:667
-312:669
-313:670
-314:671
-315:672
-317:676
-317:677
-317:678
-317:679
-317:680
-318:501
-318:683
-318:684
-318:685
-318:686
-318:687
-320:689
-321:690
-323:694
-323:695
-323:696
-323:697
-323:698
-326:2054
-326:2058
-326:2059
-326:2060
-326:2061
-326:2062
-326:2063
-326:2064
-326:2065
-326:2107
-326:2108
-326:2109
-326:2110
-326:2111
-326:2112
-326:2113
-326:2114
-326:2115
-326:2116
-326:2117
-326:2118
-327:2068
-327:2069
-327:2070
-327:2071
-327:2072
-327:2073
-327:2074
-327:2075
-327:2076
-327:2077
-327:2078
-328:2081
-328:2082
-328:2083
-328:2084
-328:2085
-328:2086
-328:2087
-328:2088
-328:2089
-328:2090
-328:2091
-329:2094
-329:2095
-329:2096
-329:2097
-329:2098
-329:2099
-329:2100
-329:2101
-329:2102
-329:2103
-329:2104
-332:2120
-332:2124
-332:2125
-332:2126
-332:2127
-332:2128
-332:2129
-332:2130
-332:2131
-332:2132
-332:2133
-332:2134
-332:2135
-332:2136
-332:2137
-332:2138
-332:2139
-332:2140
-332:2141
-332:2142
-332:2144
-332:2145
-332:2146
-332:2147
-332:2148
-332:2149
-332:2150
-332:2151
-332:2152
-332:2153
-332:2154
-332:2155
-335:1804
-335:1809
-335:1811
-335:1812
-335:1813
-335:2041
-335:2042
-335:2043
-335:2044
-335:2045
-335:2046
-335:2047
-335:2048
-335:2049
-335:2050
-335:2051
-335:2052
-337:1814
-337:1815
-337:1816
-337:1817
-337:1818
-337:1819
-337:1821
-337:1822
-337:1823
-337:1824
-337:1825
-337:1826
-337:1827
-337:1828
-337:1829
-337:1873
-337:1874
-337:2024
-337:2025
-337:2026
-337:2027
-337:2028
-338:1807
-338:1831
-338:1832
-338:1833
-338:1834
-338:1835
-338:1836
-338:1837
-338:1838
-338:1847
-338:1848
-338:1849
-338:1850
-338:1851
-339:1854
-339:1855
-339:1856
-339:1857
-339:1858
-339:1859
-339:1860
-339:1867
-339:1868
-339:1869
-339:1870
-339:1871
-341:1877
-341:1878
-341:1879
-341:1880
-341:1881
-341:1882
-341:1883
-341:1927
-341:1928
-342:1808
-342:1885
-342:1886
-342:1887
-342:1888
-342:1889
-342:1890
-342:1891
-342:1892
-342:1901
-342:1902
-342:1903
-342:1904
-342:1905
-343:1908
-343:1909
-343:1910
-343:1911
-343:1912
-343:1913
-343:1914
-343:1921
-343:1922
-343:1923
-343:1924
-343:1925
-345:1931
-345:1932
-345:1933
-345:1934
-345:1935
-345:1936
-345:1937
-345:1958
-345:1959
-346:1939
-346:1940
-346:1941
-346:1942
-346:1943
-346:1944
-346:1945
-346:1952
-346:1953
-346:1954
-346:1955
-346:1956
-348:1962
-348:1963
-348:1964
-348:1965
-348:1966
-348:1967
-348:1968
-348:1989
-348:1990
-349:1970
-349:1971
-349:1972
-349:1973
-349:1974
-349:1975
-349:1976
-349:1983
-349:1984
-349:1985
-349:1986
-349:1987
-351:1993
-351:1994
-351:1995
-351:1996
-351:1997
-351:1998
-351:1999
-351:2020
-351:2021
-352:2001
-352:2002
-352:2003
-352:2004
-352:2005
-352:2006
-352:2007
-352:2014
-352:2015
-352:2016
-352:2017
-352:2018
-355:2030
-356:2033
-356:2034
-356:2035
-356:2036
-356:2037
-356:2038
-359:2157
-359:2161
-359:2162
-359:2163
-359:2180
-359:2181
-359:2182
-359:2183
-359:2184
-359:2185
-359:2186
-359:2187
-359:2188
-359:2189
-359:2190
-359:2191
-360:2164
-360:2165
-360:2166
-360:2167
-360:2168
-360:2169
-361:2172
-361:2173
-361:2174
-361:2175
-361:2176
-361:2177
-*E
diff --git a/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java b/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java
deleted file mode 100644
index 7e6fbcb..0000000
--- a/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.java
+++ /dev/null
@@ -1,129 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "antlr.print.g" -> "ANTLRTreePrinter.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.tool;
-	import java.util.*;
-
-public interface ANTLRTreePrinterTokenTypes {
-	int EOF = 1;
-	int NULL_TREE_LOOKAHEAD = 3;
-	int OPTIONS = 4;
-	int TOKENS = 5;
-	int PARSER = 6;
-	int LEXER = 7;
-	int RULE = 8;
-	int BLOCK = 9;
-	int OPTIONAL = 10;
-	int CLOSURE = 11;
-	int POSITIVE_CLOSURE = 12;
-	int SYNPRED = 13;
-	int RANGE = 14;
-	int CHAR_RANGE = 15;
-	int EPSILON = 16;
-	int ALT = 17;
-	int EOR = 18;
-	int EOB = 19;
-	int EOA = 20;
-	int ID = 21;
-	int ARG = 22;
-	int ARGLIST = 23;
-	int RET = 24;
-	int LEXER_GRAMMAR = 25;
-	int PARSER_GRAMMAR = 26;
-	int TREE_GRAMMAR = 27;
-	int COMBINED_GRAMMAR = 28;
-	int INITACTION = 29;
-	int LABEL = 30;
-	int TEMPLATE = 31;
-	int SCOPE = 32;
-	int GATED_SEMPRED = 33;
-	int SYN_SEMPRED = 34;
-	int BACKTRACK_SEMPRED = 35;
-	int FRAGMENT = 36;
-	int ACTION = 37;
-	int DOC_COMMENT = 38;
-	int SEMI = 39;
-	int LITERAL_lexer = 40;
-	int LITERAL_tree = 41;
-	int LITERAL_grammar = 42;
-	int AMPERSAND = 43;
-	int COLON = 44;
-	int RCURLY = 45;
-	int ASSIGN = 46;
-	int STRING_LITERAL = 47;
-	int CHAR_LITERAL = 48;
-	int INT = 49;
-	int STAR = 50;
-	int TOKEN_REF = 51;
-	int LITERAL_protected = 52;
-	int LITERAL_public = 53;
-	int LITERAL_private = 54;
-	int BANG = 55;
-	int ARG_ACTION = 56;
-	int LITERAL_returns = 57;
-	int LITERAL_throws = 58;
-	int COMMA = 59;
-	int LPAREN = 60;
-	int OR = 61;
-	int RPAREN = 62;
-	int LITERAL_catch = 63;
-	int LITERAL_finally = 64;
-	int PLUS_ASSIGN = 65;
-	int SEMPRED = 66;
-	int IMPLIES = 67;
-	int ROOT = 68;
-	int RULE_REF = 69;
-	int NOT = 70;
-	int TREE_BEGIN = 71;
-	int QUESTION = 72;
-	int PLUS = 73;
-	int WILDCARD = 74;
-	int REWRITE = 75;
-	int DOLLAR = 76;
-	int DOUBLE_QUOTE_STRING_LITERAL = 77;
-	int DOUBLE_ANGLE_STRING_LITERAL = 78;
-	int WS = 79;
-	int COMMENT = 80;
-	int SL_COMMENT = 81;
-	int ML_COMMENT = 82;
-	int OPEN_ELEMENT_OPTION = 83;
-	int CLOSE_ELEMENT_OPTION = 84;
-	int ESC = 85;
-	int DIGIT = 86;
-	int XDIGIT = 87;
-	int NESTED_ARG_ACTION = 88;
-	int NESTED_ACTION = 89;
-	int ACTION_CHAR_LITERAL = 90;
-	int ACTION_STRING_LITERAL = 91;
-	int ACTION_ESC = 92;
-	int WS_LOOP = 93;
-	int INTERNAL_RULE_REF = 94;
-	int WS_OPT = 95;
-	int SRC = 96;
-}
diff --git a/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt b/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt
deleted file mode 100644
index 88d2e6a..0000000
--- a/src/org/antlr/tool/ANTLRTreePrinterTokenTypes.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): antlr.print.g -> ANTLRTreePrinterTokenTypes.txt$
-ANTLRTreePrinter    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
diff --git a/src/org/antlr/tool/ANTLRv3.g b/src/org/antlr/tool/ANTLRv3.g
deleted file mode 100644
index d089640..0000000
--- a/src/org/antlr/tool/ANTLRv3.g
+++ /dev/null
@@ -1,745 +0,0 @@
-/** ANTLR v3 grammar written in ANTLR v3 */
-grammar ANTLRv3;
-
-options {
-	output=AST;
-}
-
-tokens {
-	DOC_COMMENT;
-	PARSER;	
-    LEXER;
-    RULE;
-    BLOCK;
-    OPTIONAL;
-    CLOSURE;
-    POSITIVE_CLOSURE;
-    SYNPRED;
-    RANGE;
-    CHAR_RANGE;
-    EPSILON;
-    ALT;
-    EOR;
-    EOB;
-    EOA; // end of alt
-    ID;
-    ARG;
-    ARGLIST;
-    RET;
-    LEXER_GRAMMAR;
-    PARSER_GRAMMAR;
-    TREE_GRAMMAR;
-    COMBINED_GRAMMAR;
-    INITACTION;
-    LABEL; // $x used in rewrite rules
-    TEMPLATE;
-    SCOPE='scope';
-    SEMPRED;
-    GATED_SEMPRED; // {p}? =>
-    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
-    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
-    FRAGMENT='fragment';
-}
-
- at header {
-package org.antlr.tool;
-import java.util.Map;
-import java.util.HashMap;
-}
-
- at members {
-	Grammar grammar = null;
-	protected int gtype = 0;
-	protected String currentRuleName = null;
-	protected GrammarAST currentBlockAST = null;
-}
-
-grammarDef
- at init {
-		for (int i=0; i<input.size(); i++) {
-			System.out.println(input.get(i));
-		}
-}
-    :   DOC_COMMENT?
-    	(	'lexer'  {gtype=LEXER_GRAMMAR;}    // pure lexer
-    	|   'parser' {gtype=PARSER_GRAMMAR;}   // pure parser
-    	|   'tree'   {gtype=TREE_GRAMMAR;}     // a tree parser
-    	|		     {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
-    	)
-    	'grammar' id ';' optionsSpec? tokensSpec? attrScope* action*
-    	rule+
-    	EOF
-    	-> ^('grammar' id DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action*)
-    ;
-
-tokensSpec
-	:	TOKENS tokenSpec+ '}' -> ^(TOKENS tokenSpec+)
-	;
-
-tokenSpec
-	:	TOKEN_REF
-		(	'=' (lit=STRING_LITERAL|lit=CHAR_LITERAL)	-> ^('=' TOKEN_REF $lit)
-		|												-> TOKEN_REF
-		)
-		';'
-	;
-
-attrScope
-	:	'scope' id ACTION -> ^('scope' id ACTION)
-	;
-
-/** Match stuff like @parser::members {int i;} */
-action
-	:	'@' (actionScopeName '::')? id ACTION
-		-> ^('@' actionScopeName? id ACTION)
-	;
-
-/** Sometimes the scope names will collide with keywords; allow them as
- *  ids for action scopes.
- */
-actionScopeName
-	:	id
-	|	l='lexer'	-> ID[$l]
-    |   p='parser'	-> ID[$p]
-	;
-
-optionsSpec returns [Map opts]
- at init {
-	$opts=new HashMap();
-}
-	:	OPTIONS (option[$opts] ';')+ '}'
-		-> ^(OPTIONS option+)
-	;
-
-option[Map opts]
-    :   id '=' v=optionValue {$opts.put($id.text, $v.value);}
-    	-> ^('=' id optionValue)
- 	;
- 	
-optionValue returns [Object value]
- at init {$value=null;}
-    :   id			 	{$value = $id.text;}
-    |   STRING_LITERAL	{String vs = $STRING_LITERAL.text;
-                         $value=vs.substring(1,vs.length()-1);}
-    |   CHAR_LITERAL	{String vs = $CHAR_LITERAL.text;
-                         $value=vs.substring(1,vs.length()-1);}
-    |   INT				{$value = new Integer($INT.text);}
-    |	s='*'			{$value = '*';} -> STRING_LITERAL[$s]  // used for k=*
-    ;
-
-rule
- at init {
-GrammarAST modifier=null, blk=null, blkRoot=null, eob=null;
-int start = ((TokenWithIndex)LT(1)).getIndex();
-int startLine = LT(1).getLine();
-GrammarAST opt = null;
-Map opts = null;
-}
-	:	DOC_COMMENT?
-		{modifier=input.LT(1);}
-		(	'protected'
-		|	'public'
-		|	'private'
-		|	frag='fragment'
-		)?
-		ruleName=id
-		{
-		currentRuleName=$ruleName.text;
-	    if ( gtype==LEXER_GRAMMAR && $frag==null ) {
-	        lexerRuleNames.add(currentRuleName);
-		}
-		}
-		'!'?
-		( arg=ARG_ACTION )?
-		( 'returns' rt=ARG_ACTION  )?
-		throwsSpec?
-	    optionsSpec?
-		ruleScopeSpec
-		ruleAction+
-		':'
-		b=altList[opts]
-		semi=';'
-		exceptionGroup?
-	    {
-	    /*
-	    int stop = ((TokenWithIndex)LT(1)).getIndex()-1; // point at the semi or exception thingie
-		eob.setLine(semi.getLine());
-		eob.setColumn(semi.getColumn());
-	    GrammarAST eor = #[EOR,'<end-of-rule>'];
-	   	eor.setEnclosingRule($ruleName.text);
-		eor.setLine(semi.getLine());
-		eor.setColumn(semi.getColumn());
-		GrammarAST root = #[RULE,'rule'];
-		root.ruleStartTokenIndex = start;
-		root.ruleStopTokenIndex = stop;
-		root.setLine(startLine);
-		root.options = opts;
-	    #rule = #(root,
-	              #ruleName,modifier,#(#[ARG,'ARG'],#aa),#(#[RET,'RET'],#rt),
-	              opt,#scopes,#a,blk,ex,eor);
-	              */
-		currentRuleName=null;
-	    }
-	    -> ^( RULE $ruleName {modifier} ^(ARG $arg)? ^(RET $rt)?
-	    	  optionsSpec? ruleScopeSpec? ruleAction+
-	    	  altList
-	    	  exceptionGroup?
-	    	  EOR["<end-of-rule>"]
-	    	)	    	  
-	;
-
-/** Match stuff like @init {int i;} */
-ruleAction
-	:	'@' id ACTION -> ^('@' id ACTION)
-	;
-
-throwsSpec
-	:	'throws' id ( ',' id )* -> ^('throws' id+)
-	;
-
-ruleScopeSpec
- at init {
-}
-	:	( 'scope' ACTION )?
-		( 'scope' id+ ';' )*
-		-> ^('scope' ACTION? id+)
-	;
-
-/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
-block
- at init {
-GrammarAST save = currentBlockAST;
-Map opts=null;
-}
- at after {
-$block.tree.setOptions(grammar,opts);
-}
-    :   lp='('
-		( (opts=optionsSpec)? ':' )?
-		{currentBlockAST = lp;}
-		a1=alternative rewrite
-		{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a1);}
-		( '|' a2=alternative rewrite
-		  {if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred(#a2);}
-		)*
-        rp=')'
-        {
-		currentBlockAST = save;
-		}
-        -> ^( BLOCK[$lp] optionsSpec? alternative+ EOB[$rp] )
-    ;
-
-altList[Map opts]
- at init {
-	GrammarAST blkRoot = #[BLOCK,'BLOCK'];
-	blkRoot.setLine(LT(1).getLine());
-	blkRoot.setColumn(LT(1).getColumn());
-	GrammarAST save = currentBlockAST;
-	currentBlockAST = #blkRoot;
-}
-    :   a1=alternative rewrite
-		{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a1.tree);}
-    	( '|' a2=alternative rewrite
-    	  {if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a2.tree);}
-    	)*
-        {
-        currentBlockAST = save;
-        }
-		-> ^( {blkRoot} (alternative rewrite)+ EOB["<end-of-block>"] )
-    ;
-
-alternative
- at init {
-	Token firstToken = input.LT(1);
-}
-    :   ( el=element )+ -> ^(ALT[firstToken] element+ EOA["<end-of-alt>"])
-    |   -> ^(ALT[input.LT(1)] EPSILON[input.LT(-1)] EOA["<end-of-alt>"])
-    ;
-
-exceptionGroup
-	:	( exceptionHandler )+ ( finallyClause )?
-	|	finallyClause
-    ;
-
-exceptionHandler
-    :    'catch'^ ARG_ACTION ACTION
-    ;
-
-finallyClause
-    :    'finally'^ ACTION
-    ;
-
-element
-	:	elementNoOptionSpec
-	;
-
-elementNoOptionSpec
- at init {
-    IntSet elements=null;
-    GrammarAST sub, sub2;
-}
-	:	id ('='^|'+='^) (atom|block)
-        ( sub=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub;} )?
-    |   atom
-        ( sub2=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub2;} )?
-    |	ebnf
-	|   ACTION
-	|   p=SEMPRED ( '=>' ! {#p.setType(GATED_SEMPRED);} )?
-		{
-		#p.setEnclosingRule(currentRuleName);
-		grammar.blocksWithSemPreds.add(currentBlockAST);
-		}
-	|   t3=treeSpec
-	;
-
-atom:   range ('^'^|'!'^)?
-    |   terminal
-    |	notSet ('^'^|'!'^)?
-    |   rr=RULE_REF^
-		( ARG_ACTION )?
-		('^'^|'!'^)?
-    ;
-
-notSet
- at init {
-    int line = LT(1).getLine();
-    int col = LT(1).getColumn();
-    GrammarAST subrule=null;
-}
-	:	n='~'^
-		(	notTerminal
-        |   block
-		)
-        {#notSet.setLine(line); #notSet.setColumn(col);}
-	;
-
-treeSpec :
-	'^('^
-        element ( element )+
-    ')'!
-	;
-
-/** matches ENBF blocks (and sets via block rule) */
-ebnf
- at init {
-    int line = LT(1).getLine();
-    int col = LT(1).getColumn();
-}
-	:	b=block
-		(	'?'    {#ebnf=#([OPTIONAL,'?'],#b);}
-		|	'*'	    {#ebnf=#([CLOSURE,'*'],#b);}
-		|	'+'	    {#ebnf=#([POSITIVE_CLOSURE,'+'],#b);}
-		|   '=>'! // syntactic predicate
-			{
-			if ( gtype==COMBINED_GRAMMAR &&
-			     Character.isUpperCase(currentRuleName.charAt(0)) )
-		    {
-                // ignore for lexer rules in combined
-		    	#ebnf = #(#[SYNPRED,'=>'],#b); 
-		    }
-		    else {
-		    	// create manually specified (...)=> predicate;
-                // convert to sempred
-		    	#ebnf = createSynSemPredFromBlock(#b, SYN_SEMPRED);
-			}
-			}
-		|   '^' {#ebnf = #(#ROOT, #b);}
-		|   '!' {#ebnf = #(#BANG, #b);}
-        |   {#ebnf = #b;}
-		)
-		{#ebnf.setLine(line); #ebnf.setColumn(col);}
-	;
-
-range!
- at init {
-GrammarAST subrule=null, root=null;
-}
-	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL
-		{
-		GrammarAST r = #[CHAR_RANGE,".."];
-		r.setLine(c1.getLine());
-		r.setColumn(c1.getColumn());
-		#range = #(r, #c1, #c2);
-		root = #range;
-		}
-//    	(subrule=ebnfSuffix[root,false] {#range=subrule;})?
-	;
-
-terminal
- at init {
-GrammarAST ebnfRoot=null, subrule=null;
-}
-    :   CHAR_LITERAL^ ('^'^|'!'^)?
-
-	|   TOKEN_REF^
-			( ARG_ACTION )? // Args are only valid for lexer rules
-            ('^'^|'!'^)?
-
-	|   STRING_LITERAL ('^'^|'!'^)?
-
-	|   '.' ('^'^|'!'^)?
-	;
-
-ebnfSuffix[GrammarAST elemAST, boolean inRewrite] returns [GrammarAST subrule=null]
- at init {
-GrammarAST ebnfRoot=null;
-// bang on alt
-}
-	:	(	'?'	{ebnfRoot = #[OPTIONAL,'?'];}
-   		|	'*' {ebnfRoot = #[CLOSURE,'*'];}
-   		|	'+' {ebnfRoot = #[POSITIVE_CLOSURE,'+'];}
-   		)
-    	{
-		GrammarAST save = currentBlockAST;
-       	ebnfRoot.setLine(elemAST.getLine());
-       	ebnfRoot.setColumn(elemAST.getColumn());
-    	GrammarAST blkRoot = #[BLOCK,"BLOCK"];
-    	currentBlockAST = blkRoot;
-       	GrammarAST eob = #[EOB,'<end-of-block>'];
-		eob.setLine(elemAST.getLine());
-		eob.setColumn(elemAST.getColumn());
-		GrammarAST alt = #(#[ALT,'ALT'],elemAST,#[EOA,"<end-of-alt>"]);
-    	if ( !inRewrite ) {
-    		prefixWithSynPred(alt);
-    	}
-  		subrule =
-  		     #(ebnfRoot,
-  		       #(blkRoot,alt,eob)
-  		      );
-  		currentBlockAST = save;
-   		}
-    ;
-
-notTerminal
-	:   CHAR_LITERAL
-	|	TOKEN_REF
-	|	STRING_LITERAL
-	;
-
-
-// R E W R I T E  S Y N T A X
-
-rewrite
- at init {
-    GrammarAST root = new GrammarAST();
-    // bang on alt
-}
-	:
-		( rew='->' pred=SEMPRED alt=rewrite_alternative
-	      {root.addChild( #(#rew, #pred, #alt) );}
-		  {
-          #pred.setEnclosingRule(currentRuleName);
-          #rew.setEnclosingRule(currentRuleName);
-          }
-	    )*
-		rew2='->' alt2=rewrite_alternative
-        {
-        root.addChild( #(#rew2, #alt2) );
-        #rewrite = (GrammarAST)root.getFirstChild();
-        }
-	|
-	;
-
-rewrite_block
-    :   lp='('^ {#lp.setType(BLOCK); #lp.setText('BLOCK');}
-		rewrite_alternative
-        ')'!
-        {
-        GrammarAST eob = #[EOB,"<end-of-block>"];
-        eob.setLine(lp.getLine());
-        eob.setColumn(lp.getColumn());
-        #rewrite_block.addChild(eob);
-        }
-    ;
-
-rewrite_alternative
- at init {
-    GrammarAST eoa = #[EOA, "<end-of-alt>"];
-    GrammarAST altRoot = #[ALT,"ALT"];
-    altRoot.setLine(LT(1).getLine());
-    altRoot.setColumn(LT(1).getColumn());
-}
-    :	{grammar.buildTemplate()}? rewrite_template
-
-    |	{grammar.buildAST()}? ( rewrite_element )+
-        {
-            if ( #rewrite_alternative==null ) {
-                #rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);
-            }
-            else {
-                #rewrite_alternative = #(altRoot, #rewrite_alternative,eoa);
-            }
-        }
-
-   	|   {#rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);}
-    ;
-
-rewrite_element
- at init {
-GrammarAST subrule=null;
-}
-	:	t=rewrite_atom
-    	( subrule=ebnfSuffix[#t,true] {#rewrite_element=subrule;} )?
-	|   rewrite_ebnf
-	|   tr=rewrite_tree
-    	( subrule=ebnfSuffix[#tr,true] {#rewrite_element=subrule;} )?
-	;
-
-rewrite_atom
- at init {
-GrammarAST subrule=null;
-}
-    :   CHAR_LITERAL
-	|   TOKEN_REF^ (ARG_ACTION)? // for imaginary nodes
-    |   RULE_REF
-	|   STRING_LITERAL
-	|   // bang on this alt
-		d='$' i=id // reference to a label in a rewrite rule
-		{
-		#rewrite_atom = #[LABEL,i_AST.getText()];
-		#rewrite_atom.setLine(#d.getLine());
-		#rewrite_atom.setColumn(#d.getColumn());
-        #rewrite_atom.setEnclosingRule(currentRuleName);
-		}
-	|	ACTION
-	;
-
-rewrite_ebnf!
- at init {
-    int line = LT(1).getLine();
-    int col = LT(1).getColumn();
-}
-	:	b=rewrite_block
-		(	'?'   {#rewrite_ebnf=#([OPTIONAL,'?'],#b);}
-		|	'*'	  {#rewrite_ebnf=#([CLOSURE,'*'],#b);}
-		|	'+'	  {#rewrite_ebnf=#([POSITIVE_CLOSURE,'+'],#b);}
-		)
-		{#rewrite_ebnf.setLine(line); #rewrite_ebnf.setColumn(col);}
-	;
-
-rewrite_tree :
-	'^(' rewrite_atom rewrite_element* ')' -> ^('^(' rewrite_atom rewrite_element* )
-	;
-
-/** Build a tree for a template rewrite:
-      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
-    where ARGLIST is always there even if no args exist.
-    ID can be "template" keyword.  If first child is ACTION then it's
-    an indirect template ref
-
-    -> foo(a={...}, b={...})
-    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
-    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
-	-> {st-expr} // st-expr evaluates to ST
- */
-rewrite_template
- at init {Token st=null;}
-	:   // -> template(a={...},...) "..."
-		{LT(1).getText().equals('template')}? // inline
-		rewrite_template_head {st=LT(1);}
-		( DOUBLE_QUOTE_STRING_LITERAL! | DOUBLE_ANGLE_STRING_LITERAL! )
-		{#rewrite_template.addChild(#[st]);}
-
-	|	// -> foo(a={...}, ...)
-		rewrite_template_head
-
-	|	// -> ({expr})(a={...}, ...)
-		rewrite_indirect_template_head
-
-	|	// -> {...}
-		ACTION
-	;
-
-/** -> foo(a={...}, ...) */
-rewrite_template_head
-	:	id lp='('^ {#lp.setType(TEMPLATE); #lp.setText('TEMPLATE');}
-		rewrite_template_args
-		')'!
-	;
-
-/** -> ({expr})(a={...}, ...) */
-rewrite_indirect_template_head
-	:	lp='('^ {#lp.setType(TEMPLATE); #lp.setText('TEMPLATE');}
-		ACTION
-		')'!
-		'('! rewrite_template_args ')'!
-	;
-
-rewrite_template_args
-	:	rewrite_template_arg (','! rewrite_template_arg)*
-		{#rewrite_template_args = #(#[ARGLIST,"ARGLIST"], rewrite_template_args);}
-	|	{#rewrite_template_args = #[ARGLIST,"ARGLIST"];}
-	;
-
-rewrite_template_arg
-	:   id a='=' ACTION -> ^(ARG[$a] id ACTION)
-	;
-
-idList
-	:	id+
-	;
-
-id	:	TOKEN_REF -> ID[$TOKEN_REF]
-	|	RULE_REF  -> ID[$RULE_REF]
-	;
-
-// L E X I C A L   R U L E S
-
-SL_COMMENT
- 	:	'//'
- 	 	(	' $ANTLR ' SRC // src directive
- 		|	.*
-		)
-		'\r'? '\n'
-		{$channel=HIDDEN;}
-	;
-
-ML_COMMENT
-	:	'/*' {if (input.LA(1)=='*') $type=DOC_COMMENT; else $channel=HIDDEN;} .* '*/'
-	;
-
-CHAR_LITERAL
-	:	'\'' LITERAL_CHAR '\''
-	;
-
-STRING_LITERAL
-	:	'\'' LITERAL_CHAR LITERAL_CHAR* '\''
-	;
-
-fragment
-LITERAL_CHAR
-	:	ESC
-	|	~('\''|'\\')
-	;
-
-DOUBLE_QUOTE_STRING_LITERAL
-	:	'"' LITERAL_CHAR* '"'
-	;
-
-DOUBLE_ANGLE_STRING_LITERAL
-	:	'<<' .* '>>'
-	;
-
-fragment
-ESC	:	'\\'
-		(	'n'
-		|	'r'
-		|	't'
-		|	'b'
-		|	'f'
-		|	'"'
-		|	'\''
-		|	'\\'
-		|	'>'
-		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
-		|	. // unknown, leave as it is
-		)
-	;
-
-fragment
-XDIGIT :
-		'0' .. '9'
-	|	'a' .. 'f'
-	|	'A' .. 'F'
-	;
-
-INT	:	'0'..'9'+
-	;
-
-ARG_ACTION
-	:	NESTED_ARG_ACTION
-	;
-
-fragment
-NESTED_ARG_ACTION :
-	'['!
-	(	options {greedy=false; k=1;}
-	:	NESTED_ARG_ACTION
-	|	ACTION_STRING_LITERAL
-	|	ACTION_CHAR_LITERAL
-	|	.
-	)*
-	']'!
-	;
-
-ACTION
-	:	NESTED_ACTION ( '?' {$type = SEMPRED;} )?
-	;
-
-fragment
-NESTED_ACTION :
-	'{'
-	(	options {greedy=false; k=3;}
-	:	NESTED_ACTION
-//	|	DOC_COMMENT
-	|	SL_COMMENT
-	|	ML_COMMENT
-	|	ACTION_STRING_LITERAL
-	|	ACTION_CHAR_LITERAL
-	|	.
-	)*
-	'}'
-   ;
-
-fragment
-ACTION_CHAR_LITERAL
-	:	'\'' (ACTION_ESC|.) '\''
-	;
-
-fragment
-ACTION_STRING_LITERAL
-	:	'"' (ACTION_ESC|.) (ACTION_ESC|.)* '"'
-	;
-
-fragment
-ACTION_ESC
-	:	'\\\''
-	|	'\\"'
-	|	'\\' ~('\''|'"')
-	;
-
-TOKEN_REF
-	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-	;
-
-RULE_REF
-	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-	;
-	
-OPTIONS
-	:	'options' WS_LOOP '{' {$channel=0;} // reset after WS call
-	;
-	
-TOKENS
-	:	'tokens' WS_LOOP '{' {$channel=0;} // reset after WS call
-	;
-
-/** Reset the file and line information; useful when the grammar
- *  has been generated so that errors are shown relative to the
- *  original file like the old C preprocessor used to do.
- */
-protected
-SRC	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
-		{
-		//setFilename($file.text.substring(1,$file.text.length()-1));
-		//setLine(Integer.parseInt($line.text)-1);  // -1 because SL_COMMENT will increment the line no. KR
-		$channel=HIDDEN;
-		}
-	;
-
-WS	:	(	' '
-		|	'\t'
-		|	'\r'? '\n'
-		)+
-		{$channel=HIDDEN;}
-	;
-
-fragment
-WS_LOOP
-	:	(	WS
-		|	SL_COMMENT
-		|	ML_COMMENT
-		)*
-		{$channel=HIDDEN;}
-	;
-
diff --git a/src/org/antlr/tool/ActionAnalysis.tokens b/src/org/antlr/tool/ActionAnalysis.tokens
deleted file mode 100644
index a788b35..0000000
--- a/src/org/antlr/tool/ActionAnalysis.tokens
+++ /dev/null
@@ -1,5 +0,0 @@
-X_Y=5
-Tokens=8
-Y=7
-ID=4
-X=6
diff --git a/src/org/antlr/tool/ActionAnalysisLexer.java b/src/org/antlr/tool/ActionAnalysisLexer.java
deleted file mode 100644
index e7ece4b..0000000
--- a/src/org/antlr/tool/ActionAnalysisLexer.java
+++ /dev/null
@@ -1,400 +0,0 @@
-// $ANTLR 3.0b7 ActionAnalysis.g 2007-04-03 12:25:48
-
-package org.antlr.tool;
-import org.antlr.runtime.*;
-import org.antlr.tool.AttributeScope;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarAST;
-import org.antlr.tool.Rule;
-
-import java.util.HashMap;
-/** We need to set Rule.referencedPredefinedRuleAttributes before
- *  code generation.  This filter looks at an action in context of
- *  its rule and outer alternative number and figures out which
- *  rules have predefined prefs referenced.  I need this so I can
- *  remove unusued labels.
- */
-public class ActionAnalysisLexer extends Lexer {
-    public static final int X_Y=5;
-    public static final int EOF=-1;
-    public static final int Tokens=8;
-    public static final int Y=7;
-    public static final int ID=4;
-    public static final int X=6;
-
-    Rule enclosingRule;
-    Grammar grammar;
-    antlr.Token actionToken;
-    int outerAltNum = 0;
-
-    	public ActionAnalysisLexer(Grammar grammar, String ruleName, GrammarAST actionAST)
-    	{
-    		this(new ANTLRStringStream(actionAST.token.getText()));
-    		this.grammar = grammar;
-    	    this.enclosingRule = grammar.getRule(ruleName);
-    	    this.actionToken = actionAST.token;
-    	    this.outerAltNum = actionAST.outerAltNum;
-    	}
-
-    public void analyze() {
-    	// System.out.println("###\naction="+actionToken);
-    	Token t;
-    	do {
-    		t = nextToken();
-    	} while ( t.getType()!= Token.EOF );
-    }
-
-    public ActionAnalysisLexer() {;}
-    public ActionAnalysisLexer(CharStream input) {
-        super(input);
-        ruleMemo = new HashMap[7+1];
-     }
-    public String getGrammarFileName() { return "ActionAnalysis.g"; }
-
-    public Token nextToken() {
-        while (true) {
-            if ( input.LA(1)==CharStream.EOF ) {
-                return Token.EOF_TOKEN;
-            }
-            token = null;
-    	channel = Token.DEFAULT_CHANNEL;
-            tokenStartCharIndex = input.index();
-            tokenStartCharPositionInLine = input.getCharPositionInLine();
-            tokenStartLine = input.getLine();
-    	text = null;
-            try {
-                int m = input.mark();
-                backtracking=1;
-                failed=false;
-                mTokens();
-                backtracking=0;
-
-                if ( failed ) {
-                    input.rewind(m);
-                    input.consume();
-                }
-                else {
-                    emit();
-                    return token;
-                }
-            }
-            catch (RecognitionException re) {
-                // shouldn't happen in backtracking mode, but...
-                reportError(re);
-                recover(re);
-            }
-        }
-    }
-
-    public void memoize(IntStream input,
-    		int ruleIndex,
-    		int ruleStartIndex)
-    {
-    if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
-    }
-
-    public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-    if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
-    return false;
-    }// $ANTLR start X_Y
-    public final void mX_Y() throws RecognitionException {
-        try {
-            int _type = X_Y;
-            // ActionAnalysis.g:73:7: ( '$' x= ID '.' y= ID {...}?)
-            // ActionAnalysis.g:73:7: '$' x= ID '.' y= ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            match('.'); if (failed) return ;
-            int yStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart, getCharIndex()-1);
-            if ( !(enclosingRule!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "X_Y", "enclosingRule!=null");
-            }
-            if ( backtracking==1 ) {
-
-              		AttributeScope scope = null;
-              		String refdRuleName = null;
-              		if ( x.getText().equals(enclosingRule.name) ) {
-              			// ref to enclosing rule.
-              			refdRuleName = x.getText();
-              			scope = enclosingRule.getLocalAttributeScope(y.getText());
-              		}
-              		else if ( enclosingRule.getRuleLabel(x.getText())!=null ) {
-              			// ref to rule label
-              			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel(x.getText());
-              			pair.actionReferencesLabel = true;
-              			refdRuleName = pair.referencedRuleName;
-              			Rule refdRule = grammar.getRule(refdRuleName);
-              			scope = refdRule.getLocalAttributeScope(y.getText());
-              		}
-              		else if ( enclosingRule.getRuleRefsInAlt(x.getText(), outerAltNum)!=null ) {
-              			// ref to rule referenced in this alt
-              			refdRuleName = x.getText();
-              			Rule refdRule = grammar.getRule(refdRuleName);
-              			scope = refdRule.getLocalAttributeScope(y.getText());
-              		}
-              		if ( scope!=null &&
-              			 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
-              		{
-              			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
-              			//System.out.println("referenceRuleLabelPredefinedAttribute for "+refdRuleName);
-              		}
-
-            }
-
-            }
-
-            this.type = _type;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end X_Y
-
-    // $ANTLR start X
-    public final void mX() throws RecognitionException {
-        try {
-            int _type = X;
-            // ActionAnalysis.g:106:5: ( '$' x= ID {...}?)
-            // ActionAnalysis.g:106:5: '$' x= ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int xStart = getCharIndex();
-            mID(); if (failed) return ;
-            Token x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart, getCharIndex()-1);
-            if ( !(enclosingRule!=null && enclosingRule.getRuleLabel(x.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "X", "enclosingRule!=null && enclosingRule.getRuleLabel($x.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel(x.getText());
-              			pair.actionReferencesLabel = true;
-
-            }
-
-            }
-
-            this.type = _type;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end X
-
-    // $ANTLR start Y
-    public final void mY() throws RecognitionException {
-        try {
-            int _type = Y;
-            // ActionAnalysis.g:114:5: ( '$' ID {...}?)
-            // ActionAnalysis.g:114:5: '$' ID {...}?
-            {
-            match('$'); if (failed) return ;
-            int ID1Start = getCharIndex();
-            mID(); if (failed) return ;
-            Token ID1 = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, ID1Start, getCharIndex()-1);
-            if ( !(enclosingRule!=null && enclosingRule.getLocalAttributeScope(ID1.getText())!=null) ) {
-                if (backtracking>0) {failed=true; return ;}
-                throw new FailedPredicateException(input, "Y", "enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null");
-            }
-            if ( backtracking==1 ) {
-
-              			AttributeScope scope = enclosingRule.getLocalAttributeScope(ID1.getText());
-              			if ( scope!=null &&
-              				 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
-              			{
-              				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
-              				//System.out.println("referenceRuleLabelPredefinedAttribute for "+ID1.getText());
-              			}
-
-            }
-
-            }
-
-            this.type = _type;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end Y
-
-    // $ANTLR start ID
-    public final void mID() throws RecognitionException {
-        try {
-            // ActionAnalysis.g:127:9: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* )
-            // ActionAnalysis.g:127:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
-            {
-            if ( (input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
-                input.consume();
-            failed=false;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                MismatchedSetException mse =
-                    new MismatchedSetException(null,input);
-                recover(mse);    throw mse;
-            }
-
-            // ActionAnalysis.g:127:33: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
-            loop1:
-            do {
-                int alt1=2;
-                int LA1_0 = input.LA(1);
-
-                if ( ((LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||LA1_0=='_'||(LA1_0>='a' && LA1_0<='z')) ) {
-                    alt1=1;
-                }
-
-
-                switch (alt1) {
-            	case 1 :
-            	    // ActionAnalysis.g:
-            	    {
-            	    if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
-            	        input.consume();
-            	    failed=false;
-            	    }
-            	    else {
-            	        if (backtracking>0) {failed=true; return ;}
-            	        MismatchedSetException mse =
-            	            new MismatchedSetException(null,input);
-            	        recover(mse);    throw mse;
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop1;
-                }
-            } while (true);
-
-
-            }
-
-        }
-        finally {
-        }
-    }
-    // $ANTLR end ID
-
-    public void mTokens() throws RecognitionException {
-        // ActionAnalysis.g:1:41: ( X_Y | X | Y )
-        int alt2=3;
-        int LA2_0 = input.LA(1);
-
-        if ( (LA2_0=='$') ) {
-            int LA2_1 = input.LA(2);
-
-            if ( (synpred1()) ) {
-                alt2=1;
-            }
-            else if ( (synpred2()) ) {
-                alt2=2;
-            }
-            else if ( (true) ) {
-                alt2=3;
-            }
-            else {
-                if (backtracking>0) {failed=true; return ;}
-                NoViableAltException nvae =
-                    new NoViableAltException("1:1: Tokens options {k=1; backtrack=true; } : ( X_Y | X | Y );", 2, 1, input);
-
-                throw nvae;
-            }
-        }
-        else {
-            if (backtracking>0) {failed=true; return ;}
-            NoViableAltException nvae =
-                new NoViableAltException("1:1: Tokens options {k=1; backtrack=true; } : ( X_Y | X | Y );", 2, 0, input);
-
-            throw nvae;
-        }
-        switch (alt2) {
-            case 1 :
-                // ActionAnalysis.g:1:41: X_Y
-                {
-                mX_Y(); if (failed) return ;
-
-                }
-                break;
-            case 2 :
-                // ActionAnalysis.g:1:45: X
-                {
-                mX(); if (failed) return ;
-
-                }
-                break;
-            case 3 :
-                // ActionAnalysis.g:1:47: Y
-                {
-                mY(); if (failed) return ;
-
-                }
-                break;
-
-        }
-
-    }
-
-    // $ANTLR start synpred1
-    public final void synpred1_fragment() throws RecognitionException {
-        // ActionAnalysis.g:1:41: ( X_Y )
-        // ActionAnalysis.g:1:41: X_Y
-        {
-        mX_Y(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred1
-
-    // $ANTLR start synpred2
-    public final void synpred2_fragment() throws RecognitionException {
-        // ActionAnalysis.g:1:45: ( X )
-        // ActionAnalysis.g:1:45: X
-        {
-        mX(); if (failed) return ;
-
-        }
-    }
-    // $ANTLR end synpred2
-
-    public final boolean synpred2() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred2_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-    public final boolean synpred1() {
-        backtracking++;
-        int start = input.mark();
-        try {
-            synpred1_fragment(); // can never throw exception
-        } catch (RecognitionException re) {
-            System.err.println("impossible: "+re);
-        }
-        boolean success = !failed;
-        input.rewind(start);
-        backtracking--;
-        failed=false;
-        return success;
-    }
-
-
- 
-
-}
\ No newline at end of file
diff --git a/src/org/antlr/tool/AssignTokenTypesWalker.java b/src/org/antlr/tool/AssignTokenTypesWalker.java
deleted file mode 100644
index fb747da..0000000
--- a/src/org/antlr/tool/AssignTokenTypesWalker.java
+++ /dev/null
@@ -1,1949 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "assign.types.g" -> "AssignTokenTypesWalker.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.tool;
-	import java.util.*;
-	import org.antlr.analysis.*;
-	import org.antlr.misc.*;
-	import java.io.*;
-
-import antlr.TreeParser;
-import antlr.Token;
-import antlr.collections.AST;
-import antlr.RecognitionException;
-import antlr.ANTLRException;
-import antlr.NoViableAltException;
-import antlr.MismatchedTokenException;
-import antlr.SemanticException;
-import antlr.collections.impl.BitSet;
-import antlr.ASTPair;
-import antlr.collections.impl.ASTArray;
-
-
-/** [Warning: TJP says that this is probably out of date as of 11/19/2005,
- *   but since it's probably still useful, I'll leave in.  Don't have energy
- *   to update at the moment.]
- *
- *  Compute the token types for all literals and rules etc..  There are
- *  a few different cases to consider for grammar types and a few situations
- *  within.
- *
- *  CASE 1 : pure parser grammar
- *	a) Any reference to a token gets a token type.
- *  b) The tokens section may alias a token name to a string or char
- *
- *  CASE 2 : pure lexer grammar
- *  a) Import token vocabulary if available. Set token types for any new tokens
- *     to values above last imported token type
- *  b) token rule definitions get token types if not already defined
- *  c) literals do NOT get token types
- *
- *  CASE 3 : merged parser / lexer grammar
- *	a) Any char or string literal gets a token type in a parser rule
- *  b) Any reference to a token gets a token type if not referencing
- *     a fragment lexer rule
- *  c) The tokens section may alias a token name to a string or char
- *     which must add a rule to the lexer
- *  d) token rule definitions get token types if not already defined
- *  e) token rule definitions may also alias a token name to a literal.
- *     E.g., Rule 'FOR : "for";' will alias FOR to "for" in the sense that
- *     references to either in the parser grammar will yield the token type
- *
- *  What this pass does:
- *
- *  0. Collects basic info about the grammar like grammar name and type;
- *     Oh, I have go get the options in case they affect the token types.
- *     E.g., tokenVocab option.
- *     Imports any token vocab name/type pairs into a local hashtable.
- *  1. Finds a list of all literals and token names.
- *  2. Finds a list of all token name rule definitions;
- *     no token rules implies pure parser.
- *  3. Finds a list of all simple token rule defs of form "<NAME> : <literal>;"
- *     and aliases them.
- *  4. Walks token names table and assign types to any unassigned
- *  5. Walks aliases and assign types to referenced literals
- *  6. Walks literals, assigning types if untyped
- *  4. Informs the Grammar object of the type definitions such as:
- *     g.defineToken(<charliteral>, ttype);
- *     g.defineToken(<stringliteral>, ttype);
- *     g.defineToken(<tokenID>, ttype);
- *     where some of the ttype values will be the same for aliases tokens.
- */
-public class AssignTokenTypesWalker extends antlr.TreeParser       implements AssignTokenTypesWalkerTokenTypes
- {
-
-    public void reportError(RecognitionException ex) {
-		Token token = null;
-		if ( ex instanceof MismatchedTokenException ) {
-			token = ((MismatchedTokenException)ex).token;
-		}
-		else if ( ex instanceof NoViableAltException ) {
-			token = ((NoViableAltException)ex).token;
-		}
-        ErrorManager.syntaxError(
-            ErrorManager.MSG_SYNTAX_ERROR,
-            grammar,
-            token,
-            "assign.types: "+ex.toString(),
-            ex);
-    }
-
-protected GrammarAST stringAlias;
-protected GrammarAST charAlias;
-protected GrammarAST stringAlias2;
-protected GrammarAST charAlias2;
-
-protected Grammar grammar;
-protected Map stringLiterals = new LinkedHashMap(); // Map<literal,Integer>
-protected Map tokens = new LinkedHashMap();         // Map<name,Integer>
-/** Track actual lexer rule defs so we don't get repeated token defs in 
- *  generated lexer.
- */
-protected Set tokenRuleDefs = new HashSet();        // Set<name>
-protected Map aliases = new LinkedHashMap();        // Map<name,literal>
-protected String currentRuleName;
-protected static final Integer UNASSIGNED = Utils.integer(-1);
-protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
-
-/** Track string literals in any non-lexer rule (could be in tokens{} section) */
-protected void trackString(GrammarAST t) {
-	// if lexer, don't allow aliasing in tokens section
-	if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
-		ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
-								  grammar,
-								  t.token,
-								  t.getText());
-		return;
-	}
-	// in a plain parser grammar rule, cannot reference literals
-	// (unless defined previously via tokenVocab option)
-	if ( grammar.type==Grammar.PARSER &&
-	     grammar.getTokenType(t.getText())==Label.INVALID )
-    {
-		ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
-								  grammar,
-								  t.token,
-								  t.getText());
-	}
-	// otherwise add literal to token types if referenced from parser rule
-	// or in the tokens{} section
-	if ( (currentRuleName==null ||
-         Character.isLowerCase(currentRuleName.charAt(0))) &&
-         grammar.getTokenType(t.getText())==Label.INVALID )
-	{
-		stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
-	}
-}
-
-protected void trackToken(GrammarAST t) {
-	// imported token names might exist, only add if new
-	if ( grammar.getTokenType(t.getText())==Label.INVALID ) {
-		tokens.put(t.getText(), UNASSIGNED);
-	}
-}
-
-protected void trackTokenRule(GrammarAST t,
-							  GrammarAST modifier,
-							  GrammarAST block)
-{
-	// imported token names might exist, only add if new
-	if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
-		if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
-			return;
-		}
-		int existing = grammar.getTokenType(t.getText());
-		if ( existing==Label.INVALID ) {
-			tokens.put(t.getText(), UNASSIGNED);
-		}
-		// look for "<TOKEN> : <literal> ;" pattern
-        // (can have optional action last)
-		if ( block.hasSameTreeStructure(charAlias) ||
-             block.hasSameTreeStructure(stringAlias) ||
-             block.hasSameTreeStructure(charAlias2) ||
-             block.hasSameTreeStructure(stringAlias2) )
-        {
-			alias(t, (GrammarAST)block.getFirstChild().getFirstChild());
-			tokenRuleDefs.add(t.getText());
-		}
-	}
-	// else error
-}
-
-protected void alias(GrammarAST t, GrammarAST s) {
-	aliases.put(t.getText(), s.getText());
-}
-
-protected void assignTypes() {
-	/*
-	System.out.println("stringLiterals="+stringLiterals);
-	System.out.println("tokens="+tokens);
-	System.out.println("aliases="+aliases);
-	*/
-
-	assignTokenIDTypes();
-
-	aliasTokenIDsAndLiterals();
-
-	assignStringTypes();
-
-	/*
-	System.out.println("AFTER:");
-	System.out.println("stringLiterals="+stringLiterals);
-	System.out.println("tokens="+tokens);
-	System.out.println("aliases="+aliases);
-	*/
-
-	notifyGrammarObject();
-}
-
-	protected void assignStringTypes() {
-		// walk string literals assigning types to unassigned ones
-		Set s = stringLiterals.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String lit = (String) it.next();
-			Integer oldTypeI = (Integer)stringLiterals.get(lit);
-			int oldType = oldTypeI.intValue();
-			if ( oldType<Label.MIN_TOKEN_TYPE ) {
-				Integer typeI = Utils.integer(grammar.getNewTokenType());
-				stringLiterals.put(lit, typeI);
-				// if string referenced in combined grammar parser rule,
-				// automatically define in the generated lexer
-				grammar.defineLexerRuleForStringLiteral(lit, typeI.intValue());
-			}
-		}
-	}
-
-	protected void aliasTokenIDsAndLiterals() {
-		if ( grammar.type==Grammar.LEXER ) {
-			return; // strings/chars are never token types in LEXER
-		}
-		// walk aliases if any and assign types to aliased literals if literal
-		// was referenced
-		Set s = aliases.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			String literal = (String)aliases.get(tokenID);
-			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
-				stringLiterals.put(literal, tokens.get(tokenID));
-				// an alias still means you need a lexer rule for it
-				Integer typeI = (Integer)tokens.get(tokenID);
-				if ( !tokenRuleDefs.contains(tokenID) ) {
-					grammar.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI.intValue());
-				}
-			}
-		}
-	}
-
-	protected void assignTokenIDTypes() {
-		// walk token names, assigning values if unassigned
-		Set s = tokens.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			if ( tokens.get(tokenID)==UNASSIGNED ) {
-				tokens.put(tokenID, Utils.integer(grammar.getNewTokenType()));
-			}
-		}
-	}
-
-	protected void notifyGrammarObject() {
-		Set s = tokens.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			int ttype = ((Integer)tokens.get(tokenID)).intValue();
-			grammar.defineToken(tokenID, ttype);
-		}
-		s = stringLiterals.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String lit = (String) it.next();
-			int ttype = ((Integer)stringLiterals.get(lit)).intValue();
-			grammar.defineToken(lit, ttype);
-		}
-	}
-
-	protected void init(Grammar g) {
-		this.grammar = g;
-        stringAlias = 
-            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(STRING_LITERAL)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
-        charAlias =
-            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(CHAR_LITERAL)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
-        stringAlias2 =
-            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(4)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(STRING_LITERAL)).add((GrammarAST)astFactory.create(ACTION)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
-        charAlias2 = 
-            (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK)).add((GrammarAST)astFactory.make( (new ASTArray(4)).add((GrammarAST)astFactory.create(ALT)).add((GrammarAST)astFactory.create(CHAR_LITERAL)).add((GrammarAST)astFactory.create(ACTION)).add((GrammarAST)astFactory.create(EOA)))).add((GrammarAST)astFactory.create(EOB)));
-	}
-public AssignTokenTypesWalker() {
-	tokenNames = _tokenNames;
-}
-
-	public final void grammar(AST _t,
-		Grammar g
-	) throws RecognitionException {
-		
-		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-			init(g);
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LEXER_GRAMMAR:
-			{
-				AST __t3 = _t;
-				GrammarAST tmp1_AST_in = (GrammarAST)_t;
-				match(_t,LEXER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammar.type = Grammar.LEXER;
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t3;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PARSER_GRAMMAR:
-			{
-				AST __t4 = _t;
-				GrammarAST tmp2_AST_in = (GrammarAST)_t;
-				match(_t,PARSER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammar.type = Grammar.PARSER;
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t4;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TREE_GRAMMAR:
-			{
-				AST __t5 = _t;
-				GrammarAST tmp3_AST_in = (GrammarAST)_t;
-				match(_t,TREE_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammar.type = Grammar.TREE_PARSER;
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t5;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case COMBINED_GRAMMAR:
-			{
-				AST __t6 = _t;
-				GrammarAST tmp4_AST_in = (GrammarAST)_t;
-				match(_t,COMBINED_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammar.type = Grammar.COMBINED;
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t6;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			assignTypes();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void grammarSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST cmt = null;
-		Map opts=null;
-		
-		try {      // for error handling
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			grammar.setName(id.getText());
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case DOC_COMMENT:
-			{
-				cmt = (GrammarAST)_t;
-				match(_t,DOC_COMMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case OPTIONS:
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKENS:
-			{
-				tokensSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop12:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==SCOPE)) {
-					attrScope(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop12;
-				}
-				
-			} while (true);
-			}
-			{
-			_loop14:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp5_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop14;
-				}
-				
-			} while (true);
-			}
-			rules(_t);
-			_t = _retTree;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final Map  optionsSpec(AST _t) throws RecognitionException {
-		Map opts=new HashMap();
-		
-		GrammarAST optionsSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t18 = _t;
-			GrammarAST tmp6_AST_in = (GrammarAST)_t;
-			match(_t,OPTIONS);
-			_t = _t.getFirstChild();
-			{
-			int _cnt20=0;
-			_loop20:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ASSIGN)) {
-					option(_t,opts);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt20>=1 ) { break _loop20; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt20++;
-			} while (true);
-			}
-			_t = __t18;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return opts;
-	}
-	
-	public final void tokensSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST tokensSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t30 = _t;
-			GrammarAST tmp7_AST_in = (GrammarAST)_t;
-			match(_t,TOKENS);
-			_t = _t.getFirstChild();
-			{
-			int _cnt32=0;
-			_loop32:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ASSIGN||_t.getType()==TOKEN_REF)) {
-					tokenSpec(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt32>=1 ) { break _loop32; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt32++;
-			} while (true);
-			}
-			_t = __t30;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void attrScope(AST _t) throws RecognitionException {
-		
-		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t16 = _t;
-			GrammarAST tmp8_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			GrammarAST tmp9_AST_in = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			GrammarAST tmp10_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t16;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rules(AST _t) throws RecognitionException {
-		
-		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			int _cnt38=0;
-			_loop38:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==RULE)) {
-					rule(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt38>=1 ) { break _loop38; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt38++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void option(AST _t,
-		Map opts
-	) throws RecognitionException {
-		
-		GrammarAST option_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		
-		String key=null;
-		Object value=null;
-		
-		
-		try {      // for error handling
-			AST __t22 = _t;
-			GrammarAST tmp11_AST_in = (GrammarAST)_t;
-			match(_t,ASSIGN);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			key=id.getText();
-			value=optionValue(_t);
-			_t = _retTree;
-			_t = __t22;
-			_t = _t.getNextSibling();
-			
-			opts.put(key,value);
-			// check for grammar-level option to import vocabulary
-			if ( currentRuleName==null && key.equals("tokenVocab") ) {
-			grammar.importTokenVocabulary((String)value);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final Object  optionValue(AST _t) throws RecognitionException {
-		Object value=null;
-		
-		GrammarAST optionValue_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST s = null;
-		GrammarAST c = null;
-		GrammarAST i = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ID:
-			{
-				id = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				value = id.getText();
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				value = s.getText();
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				value = c.getText();
-				break;
-			}
-			case INT:
-			{
-				i = (GrammarAST)_t;
-				match(_t,INT);
-				_t = _t.getNextSibling();
-				value = new Integer(i.getText());
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return value;
-	}
-	
-	public final void charSet(AST _t) throws RecognitionException {
-		
-		GrammarAST charSet_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t25 = _t;
-			GrammarAST tmp12_AST_in = (GrammarAST)_t;
-			match(_t,CHARSET);
-			_t = _t.getFirstChild();
-			charSetElement(_t);
-			_t = _retTree;
-			_t = __t25;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void charSetElement(AST _t) throws RecognitionException {
-		
-		GrammarAST charSetElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST c = null;
-		GrammarAST c1 = null;
-		GrammarAST c2 = null;
-		GrammarAST c3 = null;
-		GrammarAST c4 = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case OR:
-			{
-				AST __t27 = _t;
-				GrammarAST tmp13_AST_in = (GrammarAST)_t;
-				match(_t,OR);
-				_t = _t.getFirstChild();
-				c1 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				c2 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t27;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RANGE:
-			{
-				AST __t28 = _t;
-				GrammarAST tmp14_AST_in = (GrammarAST)_t;
-				match(_t,RANGE);
-				_t = _t.getFirstChild();
-				c3 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				c4 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t28;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tokenSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST tokenSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST t = null;
-		GrammarAST t2 = null;
-		GrammarAST s = null;
-		GrammarAST c = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKEN_REF:
-			{
-				t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				trackToken(t);
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t34 = _t;
-				GrammarAST tmp15_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				t2 = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				trackToken(t2);
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case STRING_LITERAL:
-				{
-					s = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getNextSibling();
-					trackString(s); alias(t2,s);
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					c = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getNextSibling();
-					trackString(c); alias(t2,c);
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t34;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rule(AST _t) throws RecognitionException {
-		
-		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST m = null;
-		GrammarAST b = null;
-		
-		try {      // for error handling
-			AST __t40 = _t;
-			GrammarAST tmp16_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			currentRuleName=id.getText();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				m = _t==ASTNULL ? null : (GrammarAST)_t;
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			GrammarAST tmp17_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				GrammarAST tmp18_AST_in = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RET:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			}
-			{
-			GrammarAST tmp19_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				GrammarAST tmp20_AST_in = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case OPTIONS:
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop49:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp21_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop49;
-				}
-				
-			} while (true);
-			}
-			b = _t==ASTNULL ? null : (GrammarAST)_t;
-			block(_t);
-			_t = _retTree;
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp22_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			trackTokenRule(id,m,b);
-			_t = __t40;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void modifier(AST _t) throws RecognitionException {
-		
-		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_protected:
-			{
-				GrammarAST tmp23_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_protected);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_public:
-			{
-				GrammarAST tmp24_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_public);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_private:
-			{
-				GrammarAST tmp25_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_private);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case FRAGMENT:
-			{
-				GrammarAST tmp26_AST_in = (GrammarAST)_t;
-				match(_t,FRAGMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ruleScopeSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t53 = _t;
-			GrammarAST tmp27_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ACTION:
-			{
-				GrammarAST tmp28_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			case ID:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop56:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ID)) {
-					GrammarAST tmp29_AST_in = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop56;
-				}
-				
-			} while (true);
-			}
-			_t = __t53;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void block(AST _t) throws RecognitionException {
-		
-		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t58 = _t;
-			GrammarAST tmp30_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case ALT:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			int _cnt61=0;
-			_loop61:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					alternative(_t);
-					_t = _retTree;
-					rewrite(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt61>=1 ) { break _loop61; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt61++;
-			} while (true);
-			}
-			GrammarAST tmp31_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t58;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void exceptionGroup(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			{
-				{
-				int _cnt68=0;
-				_loop68:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==LITERAL_catch)) {
-						exceptionHandler(_t);
-						_t = _retTree;
-					}
-					else {
-						if ( _cnt68>=1 ) { break _loop68; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt68++;
-				} while (true);
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case LITERAL_finally:
-				{
-					finallyClause(_t);
-					_t = _retTree;
-					break;
-				}
-				case EOR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				break;
-			}
-			case LITERAL_finally:
-			{
-				finallyClause(_t);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void alternative(AST _t) throws RecognitionException {
-		
-		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t63 = _t;
-			GrammarAST tmp32_AST_in = (GrammarAST)_t;
-			match(_t,ALT);
-			_t = _t.getFirstChild();
-			{
-			int _cnt65=0;
-			_loop65:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
-					element(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt65>=1 ) { break _loop65; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt65++;
-			} while (true);
-			}
-			GrammarAST tmp33_AST_in = (GrammarAST)_t;
-			match(_t,EOA);
-			_t = _t.getNextSibling();
-			_t = __t63;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			_loop79:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==REWRITE)) {
-					AST __t76 = _t;
-					GrammarAST tmp34_AST_in = (GrammarAST)_t;
-					match(_t,REWRITE);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case SEMPRED:
-					{
-						GrammarAST tmp35_AST_in = (GrammarAST)_t;
-						match(_t,SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case ALT:
-					case TEMPLATE:
-					case ACTION:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case ALT:
-					{
-						GrammarAST tmp36_AST_in = (GrammarAST)_t;
-						match(_t,ALT);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case TEMPLATE:
-					{
-						GrammarAST tmp37_AST_in = (GrammarAST)_t;
-						match(_t,TEMPLATE);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case ACTION:
-					{
-						GrammarAST tmp38_AST_in = (GrammarAST)_t;
-						match(_t,ACTION);
-						_t = _t.getNextSibling();
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t76;
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop79;
-				}
-				
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void element(AST _t) throws RecognitionException {
-		
-		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				AST __t81 = _t;
-				GrammarAST tmp39_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t81;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				AST __t82 = _t;
-				GrammarAST tmp40_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t82;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case WILDCARD:
-			{
-				atom(_t);
-				_t = _retTree;
-				break;
-			}
-			case NOT:
-			{
-				AST __t83 = _t;
-				GrammarAST tmp41_AST_in = (GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t83;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RANGE:
-			{
-				AST __t84 = _t;
-				GrammarAST tmp42_AST_in = (GrammarAST)_t;
-				match(_t,RANGE);
-				_t = _t.getFirstChild();
-				atom(_t);
-				_t = _retTree;
-				atom(_t);
-				_t = _retTree;
-				_t = __t84;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t85 = _t;
-				GrammarAST tmp43_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				atom(_t);
-				_t = _retTree;
-				atom(_t);
-				_t = _retTree;
-				_t = __t85;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t86 = _t;
-				GrammarAST tmp44_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				GrammarAST tmp45_AST_in = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				element(_t);
-				_t = _retTree;
-				_t = __t86;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PLUS_ASSIGN:
-			{
-				AST __t87 = _t;
-				GrammarAST tmp46_AST_in = (GrammarAST)_t;
-				match(_t,PLUS_ASSIGN);
-				_t = _t.getFirstChild();
-				GrammarAST tmp47_AST_in = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				element(_t);
-				_t = _retTree;
-				_t = __t87;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			{
-				ebnf(_t);
-				_t = _retTree;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				tree(_t);
-				_t = _retTree;
-				break;
-			}
-			case SYNPRED:
-			{
-				AST __t88 = _t;
-				GrammarAST tmp48_AST_in = (GrammarAST)_t;
-				match(_t,SYNPRED);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t88;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp49_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case SEMPRED:
-			{
-				GrammarAST tmp50_AST_in = (GrammarAST)_t;
-				match(_t,SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case SYN_SEMPRED:
-			{
-				GrammarAST tmp51_AST_in = (GrammarAST)_t;
-				match(_t,SYN_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BACKTRACK_SEMPRED:
-			{
-				GrammarAST tmp52_AST_in = (GrammarAST)_t;
-				match(_t,BACKTRACK_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case GATED_SEMPRED:
-			{
-				GrammarAST tmp53_AST_in = (GrammarAST)_t;
-				match(_t,GATED_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case EPSILON:
-			{
-				GrammarAST tmp54_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void exceptionHandler(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t71 = _t;
-			GrammarAST tmp55_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_catch);
-			_t = _t.getFirstChild();
-			GrammarAST tmp56_AST_in = (GrammarAST)_t;
-			match(_t,ARG_ACTION);
-			_t = _t.getNextSibling();
-			GrammarAST tmp57_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t71;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void finallyClause(AST _t) throws RecognitionException {
-		
-		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t73 = _t;
-			GrammarAST tmp58_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_finally);
-			_t = _t.getFirstChild();
-			GrammarAST tmp59_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t73;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void atom(AST _t) throws RecognitionException {
-		
-		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST t = null;
-		GrammarAST c = null;
-		GrammarAST s = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case RULE_REF:
-			{
-				GrammarAST tmp60_AST_in = (GrammarAST)_t;
-				match(_t,RULE_REF);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TOKEN_REF:
-			{
-				t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				trackToken(t);
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				trackString(c);
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				trackString(s);
-				break;
-			}
-			case WILDCARD:
-			{
-				GrammarAST tmp61_AST_in = (GrammarAST)_t;
-				match(_t,WILDCARD);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ebnf(AST _t) throws RecognitionException {
-		
-		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case BLOCK:
-			{
-				block(_t);
-				_t = _retTree;
-				break;
-			}
-			case OPTIONAL:
-			{
-				AST __t90 = _t;
-				GrammarAST tmp62_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t90;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CLOSURE:
-			{
-				AST __t91 = _t;
-				GrammarAST tmp63_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t91;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				AST __t92 = _t;
-				GrammarAST tmp64_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t92;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tree(AST _t) throws RecognitionException {
-		
-		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t94 = _t;
-			GrammarAST tmp65_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			element(_t);
-			_t = _retTree;
-			{
-			_loop96:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
-					element(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop96;
-				}
-				
-			} while (true);
-			}
-			_t = __t94;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ast_suffix(AST _t) throws RecognitionException {
-		
-		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				GrammarAST tmp66_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				GrammarAST tmp67_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	
-	public static final String[] _tokenNames = {
-		"<0>",
-		"EOF",
-		"<2>",
-		"NULL_TREE_LOOKAHEAD",
-		"\"options\"",
-		"\"tokens\"",
-		"\"parser\"",
-		"LEXER",
-		"RULE",
-		"BLOCK",
-		"OPTIONAL",
-		"CLOSURE",
-		"POSITIVE_CLOSURE",
-		"SYNPRED",
-		"RANGE",
-		"CHAR_RANGE",
-		"EPSILON",
-		"ALT",
-		"EOR",
-		"EOB",
-		"EOA",
-		"ID",
-		"ARG",
-		"ARGLIST",
-		"RET",
-		"LEXER_GRAMMAR",
-		"PARSER_GRAMMAR",
-		"TREE_GRAMMAR",
-		"COMBINED_GRAMMAR",
-		"INITACTION",
-		"LABEL",
-		"TEMPLATE",
-		"\"scope\"",
-		"GATED_SEMPRED",
-		"SYN_SEMPRED",
-		"BACKTRACK_SEMPRED",
-		"\"fragment\"",
-		"ACTION",
-		"DOC_COMMENT",
-		"SEMI",
-		"\"lexer\"",
-		"\"tree\"",
-		"\"grammar\"",
-		"AMPERSAND",
-		"COLON",
-		"RCURLY",
-		"ASSIGN",
-		"STRING_LITERAL",
-		"CHAR_LITERAL",
-		"INT",
-		"STAR",
-		"TOKEN_REF",
-		"\"protected\"",
-		"\"public\"",
-		"\"private\"",
-		"BANG",
-		"ARG_ACTION",
-		"\"returns\"",
-		"\"throws\"",
-		"COMMA",
-		"LPAREN",
-		"OR",
-		"RPAREN",
-		"\"catch\"",
-		"\"finally\"",
-		"PLUS_ASSIGN",
-		"SEMPRED",
-		"IMPLIES",
-		"ROOT",
-		"RULE_REF",
-		"NOT",
-		"TREE_BEGIN",
-		"QUESTION",
-		"PLUS",
-		"WILDCARD",
-		"REWRITE",
-		"DOLLAR",
-		"DOUBLE_QUOTE_STRING_LITERAL",
-		"DOUBLE_ANGLE_STRING_LITERAL",
-		"WS",
-		"COMMENT",
-		"SL_COMMENT",
-		"ML_COMMENT",
-		"OPEN_ELEMENT_OPTION",
-		"CLOSE_ELEMENT_OPTION",
-		"ESC",
-		"DIGIT",
-		"XDIGIT",
-		"NESTED_ARG_ACTION",
-		"NESTED_ACTION",
-		"ACTION_CHAR_LITERAL",
-		"ACTION_STRING_LITERAL",
-		"ACTION_ESC",
-		"WS_LOOP",
-		"INTERNAL_RULE_REF",
-		"WS_OPT",
-		"SRC",
-		"CHARSET"
-	};
-	
-	}
-	
diff --git a/src/org/antlr/tool/AssignTokenTypesWalker.smap b/src/org/antlr/tool/AssignTokenTypesWalker.smap
deleted file mode 100644
index 8599d98..0000000
--- a/src/org/antlr/tool/AssignTokenTypesWalker.smap
+++ /dev/null
@@ -1,1403 +0,0 @@
-SMAP
-AssignTokenTypesWalker.java
-G
-*S G
-*F
-+ 0 assign.types.g
-assign.types.g
-*L
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-1:32
-1:33
-1:34
-94:101
-95:102
-96:103
-97:104
-98:105
-99:106
-100:107
-101:108
-102:109
-103:110
-104:111
-105:112
-106:113
-107:114
-108:115
-110:117
-111:118
-112:119
-113:120
-115:122
-116:123
-117:124
-118:125
-119:126
-120:127
-121:128
-122:129
-123:130
-124:131
-125:132
-127:134
-128:135
-129:136
-130:137
-131:138
-132:139
-133:140
-134:141
-135:142
-136:143
-137:144
-138:145
-139:146
-140:147
-141:148
-142:149
-143:150
-144:151
-145:152
-146:153
-147:154
-148:155
-149:156
-150:157
-151:158
-152:159
-153:160
-154:161
-155:162
-157:164
-158:165
-159:166
-160:167
-161:168
-162:169
-164:171
-165:172
-166:173
-167:174
-168:175
-169:176
-170:177
-171:178
-172:179
-173:180
-174:181
-175:182
-176:183
-177:184
-178:185
-179:186
-180:187
-181:188
-182:189
-183:190
-184:191
-185:192
-186:193
-187:194
-188:195
-189:196
-191:198
-192:199
-193:200
-195:202
-196:203
-197:204
-198:205
-199:206
-200:207
-202:209
-204:211
-206:213
-208:215
-209:216
-210:217
-211:218
-212:219
-213:220
-215:222
-216:223
-218:225
-219:226
-220:227
-221:228
-222:229
-223:230
-224:231
-225:232
-226:233
-227:234
-228:235
-229:236
-230:237
-231:238
-232:239
-233:240
-235:242
-236:243
-237:244
-238:245
-239:246
-240:247
-241:248
-242:249
-243:250
-244:251
-245:252
-246:253
-247:254
-248:255
-249:256
-250:257
-251:258
-252:259
-253:260
-254:261
-256:263
-257:264
-258:265
-259:266
-260:267
-261:268
-262:269
-263:270
-264:271
-265:272
-267:274
-268:275
-269:276
-270:277
-271:278
-272:279
-273:280
-274:281
-275:282
-276:283
-277:284
-278:285
-279:286
-280:287
-282:289
-283:290
-284:291
-285:292
-286:293
-287:294
-288:295
-289:296
-290:297
-291:298
-292:299
-295:304
-295:305
-295:306
-295:313
-295:376
-295:377
-295:378
-295:379
-295:380
-295:381
-295:382
-296:310
-299:315
-299:316
-299:317
-299:318
-299:319
-299:320
-299:321
-299:322
-299:323
-299:324
-299:325
-299:326
-299:327
-299:369
-299:370
-299:371
-299:372
-299:373
-300:330
-300:331
-300:332
-300:333
-300:334
-300:335
-300:336
-300:337
-300:338
-300:339
-300:340
-301:343
-301:344
-301:345
-301:346
-301:347
-301:348
-301:349
-301:350
-301:351
-301:352
-301:353
-302:356
-302:357
-302:358
-302:359
-302:360
-302:361
-302:362
-302:363
-302:364
-302:365
-302:366
-304:375
-307:384
-307:389
-307:391
-307:494
-307:495
-307:496
-307:497
-307:498
-307:499
-307:500
-309:387
-309:392
-309:393
-309:394
-309:395
-310:388
-310:397
-310:398
-310:399
-310:400
-310:401
-310:402
-310:403
-310:414
-310:415
-310:416
-310:417
-310:418
-311:421
-311:422
-311:423
-311:424
-311:425
-311:426
-311:436
-311:437
-311:438
-311:439
-311:440
-312:443
-312:444
-312:445
-312:446
-312:447
-312:448
-312:457
-312:458
-312:459
-312:460
-312:461
-313:463
-313:464
-313:465
-313:466
-313:467
-313:468
-313:469
-313:470
-313:471
-313:472
-313:473
-313:475
-313:476
-314:477
-314:478
-314:479
-314:480
-314:481
-314:482
-314:483
-314:484
-314:485
-314:486
-314:487
-314:488
-314:490
-314:491
-315:492
-315:493
-318:574
-318:578
-318:591
-318:592
-318:593
-318:594
-318:595
-318:596
-318:597
-319:579
-319:580
-319:581
-319:582
-319:583
-319:584
-319:585
-319:586
-319:587
-319:588
-319:589
-319:590
-322:502
-322:503
-322:507
-322:530
-322:531
-322:532
-322:533
-322:534
-322:535
-322:536
-322:537
-323:508
-323:509
-323:510
-323:511
-323:513
-323:514
-323:515
-323:516
-323:517
-323:518
-323:519
-323:520
-323:521
-323:522
-323:523
-323:525
-323:526
-323:527
-323:528
-323:529
-326:628
-326:629
-326:630
-326:639
-326:659
-326:660
-326:661
-326:662
-326:663
-326:664
-326:665
-327:635
-328:636
-331:633
-331:640
-331:641
-331:642
-331:643
-331:644
-331:645
-331:646
-331:647
-331:648
-331:649
-331:650
-331:651
-333:653
-334:654
-335:655
-336:656
-337:657
-341:667
-341:668
-341:676
-341:677
-341:678
-341:711
-341:712
-341:713
-341:714
-341:715
-341:716
-341:717
-341:718
-341:719
-341:720
-341:721
-341:722
-341:723
-342:671
-342:679
-342:680
-342:681
-342:682
-342:683
-342:684
-343:672
-343:687
-343:688
-343:689
-343:690
-343:691
-343:692
-344:673
-344:695
-344:696
-344:697
-344:698
-344:699
-344:700
-345:674
-345:703
-345:704
-345:705
-345:706
-345:707
-345:708
-349:725
-349:729
-349:738
-349:739
-349:740
-349:741
-349:742
-349:743
-349:744
-350:730
-350:731
-350:732
-350:733
-350:734
-350:735
-350:736
-350:737
-353:746
-353:755
-353:756
-353:757
-353:797
-353:798
-353:799
-353:800
-353:801
-353:802
-353:803
-353:804
-353:805
-353:806
-353:807
-353:808
-354:749
-354:758
-354:759
-354:760
-354:761
-354:762
-355:750
-355:751
-355:765
-355:766
-355:767
-355:768
-355:769
-355:770
-355:771
-355:772
-355:773
-355:774
-355:775
-355:776
-355:777
-355:778
-356:752
-356:753
-356:781
-356:782
-356:783
-356:784
-356:785
-356:786
-356:787
-356:788
-356:789
-356:790
-356:791
-356:792
-356:793
-356:794
-359:539
-359:543
-359:566
-359:567
-359:568
-359:569
-359:570
-359:571
-359:572
-360:544
-360:545
-360:546
-360:547
-360:549
-360:550
-360:551
-360:552
-360:553
-360:554
-360:555
-360:556
-360:557
-360:558
-360:559
-360:561
-360:562
-360:563
-360:564
-360:565
-363:810
-363:818
-363:819
-363:820
-363:868
-363:869
-363:870
-363:871
-363:872
-363:873
-363:874
-363:875
-363:876
-363:877
-363:878
-363:879
-364:813
-364:821
-364:822
-364:823
-364:824
-364:825
-364:826
-365:829
-365:830
-365:831
-365:832
-365:833
-365:834
-365:864
-365:865
-366:814
-366:835
-366:836
-366:837
-366:838
-367:815
-367:840
-367:841
-367:842
-367:843
-367:844
-367:845
-367:846
-367:847
-367:858
-367:859
-367:860
-367:861
-367:862
-368:816
-368:850
-368:851
-368:852
-368:853
-368:854
-368:855
-373:599
-373:603
-373:620
-373:621
-373:622
-373:623
-373:624
-373:625
-373:626
-374:605
-374:606
-374:607
-374:608
-374:609
-374:610
-374:611
-374:612
-374:613
-374:614
-374:615
-374:617
-374:618
-374:619
-377:881
-377:888
-377:1058
-377:1059
-377:1060
-377:1061
-377:1062
-377:1063
-377:1064
-378:884
-378:889
-378:890
-378:891
-378:892
-378:893
-378:894
-378:895
-378:896
-378:1056
-378:1057
-379:885
-379:898
-379:899
-379:900
-379:901
-379:902
-379:903
-379:904
-379:905
-379:906
-379:907
-379:914
-379:915
-379:916
-379:917
-379:918
-380:921
-380:922
-380:923
-380:925
-380:926
-380:927
-380:928
-380:929
-380:930
-380:931
-380:938
-380:939
-380:940
-380:941
-380:942
-381:946
-381:947
-381:948
-381:950
-381:951
-381:952
-381:953
-381:954
-381:955
-381:956
-381:966
-381:967
-381:968
-381:969
-381:970
-382:974
-382:975
-382:976
-382:977
-382:978
-382:979
-382:988
-382:989
-382:990
-382:991
-382:992
-383:995
-383:996
-383:997
-383:998
-383:999
-383:1000
-383:1008
-383:1009
-383:1010
-383:1011
-383:1012
-384:1014
-384:1015
-384:1016
-384:1017
-384:1018
-384:1019
-384:1020
-384:1021
-384:1022
-384:1023
-384:1024
-384:1025
-384:1027
-384:1028
-385:886
-385:1029
-385:1030
-385:1031
-386:1033
-386:1034
-386:1035
-386:1036
-386:1037
-386:1038
-386:1039
-386:1046
-386:1047
-386:1048
-386:1049
-386:1050
-387:1052
-387:1053
-387:1054
-388:1055
-392:1066
-392:1070
-392:1071
-392:1072
-392:1101
-392:1102
-392:1103
-392:1104
-392:1105
-392:1106
-392:1107
-392:1108
-392:1109
-392:1110
-392:1111
-392:1112
-393:1073
-393:1074
-393:1075
-393:1076
-393:1077
-394:1080
-394:1081
-394:1082
-394:1083
-394:1084
-395:1087
-395:1088
-395:1089
-395:1090
-395:1091
-396:1094
-396:1095
-396:1096
-396:1097
-396:1098
-399:1114
-399:1118
-399:1161
-399:1162
-399:1163
-399:1164
-399:1165
-399:1166
-399:1167
-400:1119
-400:1120
-400:1121
-400:1122
-400:1124
-400:1125
-400:1126
-400:1127
-400:1128
-400:1129
-400:1130
-400:1138
-400:1139
-400:1140
-400:1141
-400:1142
-400:1144
-400:1145
-400:1146
-400:1147
-400:1148
-400:1149
-400:1150
-400:1151
-400:1152
-400:1153
-400:1154
-400:1155
-400:1157
-400:1158
-400:1159
-400:1160
-403:1169
-403:1173
-403:1220
-403:1221
-403:1222
-403:1223
-403:1224
-403:1225
-403:1226
-404:1174
-404:1175
-404:1176
-404:1177
-404:1218
-404:1219
-405:1179
-405:1180
-405:1181
-405:1182
-405:1183
-405:1184
-405:1191
-405:1192
-405:1193
-405:1194
-405:1195
-406:1198
-406:1199
-406:1200
-406:1201
-406:1202
-406:1203
-406:1204
-406:1205
-406:1206
-406:1207
-406:1208
-406:1209
-406:1210
-406:1212
-406:1213
-406:1214
-407:1215
-407:1216
-407:1217
-411:1293
-411:1297
-411:1323
-411:1324
-411:1325
-411:1326
-411:1327
-411:1328
-411:1329
-412:1298
-412:1299
-412:1300
-412:1301
-412:1303
-412:1304
-412:1305
-412:1306
-412:1307
-412:1308
-412:1309
-412:1310
-412:1311
-412:1312
-412:1313
-412:1315
-412:1316
-412:1317
-412:1318
-412:1319
-412:1320
-412:1321
-412:1322
-415:1228
-415:1232
-415:1233
-415:1234
-415:1280
-415:1281
-415:1282
-415:1283
-415:1284
-415:1285
-415:1286
-415:1287
-415:1288
-415:1289
-415:1290
-415:1291
-416:1235
-416:1236
-416:1238
-416:1239
-416:1240
-416:1241
-416:1242
-416:1243
-416:1244
-416:1245
-416:1246
-416:1247
-416:1248
-416:1250
-416:1251
-416:1252
-416:1254
-416:1255
-416:1256
-416:1257
-416:1258
-416:1259
-416:1266
-416:1267
-416:1268
-416:1269
-416:1270
-417:1274
-417:1275
-417:1276
-417:1277
-420:1607
-420:1611
-420:1624
-420:1625
-420:1626
-420:1627
-420:1628
-420:1629
-420:1630
-421:1612
-421:1613
-421:1614
-421:1615
-421:1616
-421:1617
-421:1618
-421:1619
-421:1620
-421:1621
-421:1622
-421:1623
-424:1632
-424:1636
-424:1646
-424:1647
-424:1648
-424:1649
-424:1650
-424:1651
-424:1652
-425:1637
-425:1638
-425:1639
-425:1640
-425:1641
-425:1642
-425:1643
-425:1644
-425:1645
-428:1331
-428:1335
-428:1406
-428:1407
-428:1408
-428:1409
-428:1410
-428:1411
-428:1412
-429:1336
-429:1337
-429:1338
-429:1339
-429:1340
-429:1341
-429:1342
-429:1343
-429:1344
-429:1346
-429:1347
-429:1348
-429:1349
-429:1350
-429:1351
-429:1352
-429:1361
-429:1362
-429:1363
-429:1364
-429:1365
-429:1368
-429:1369
-429:1370
-429:1371
-429:1372
-429:1373
-429:1374
-429:1377
-429:1378
-429:1379
-429:1380
-429:1381
-429:1384
-429:1385
-429:1386
-429:1387
-429:1388
-429:1391
-429:1392
-429:1393
-429:1394
-429:1395
-429:1397
-429:1398
-429:1399
-429:1400
-429:1401
-429:1402
-429:1404
-429:1405
-432:1414
-432:1418
-432:1419
-432:1420
-432:1594
-432:1595
-432:1596
-432:1597
-432:1598
-432:1599
-432:1600
-432:1601
-432:1602
-432:1603
-432:1604
-432:1605
-433:1421
-433:1422
-433:1423
-433:1424
-433:1425
-433:1426
-433:1427
-433:1428
-433:1429
-433:1430
-434:1433
-434:1434
-434:1435
-434:1436
-434:1437
-434:1438
-434:1439
-434:1440
-434:1441
-434:1442
-435:1445
-435:1446
-435:1447
-435:1448
-435:1449
-435:1450
-435:1451
-435:1452
-436:1455
-436:1456
-436:1457
-436:1458
-436:1459
-436:1460
-436:1461
-436:1462
-436:1463
-436:1464
-437:1467
-437:1468
-437:1469
-437:1470
-437:1471
-437:1472
-437:1473
-437:1474
-437:1475
-437:1476
-437:1477
-437:1478
-438:1481
-438:1482
-438:1483
-438:1484
-438:1485
-438:1486
-438:1487
-438:1488
-438:1489
-438:1490
-438:1491
-438:1492
-439:1495
-439:1496
-439:1497
-439:1498
-439:1499
-439:1500
-439:1501
-439:1502
-439:1503
-439:1504
-439:1505
-439:1506
-439:1507
-440:1510
-440:1511
-440:1512
-440:1513
-440:1514
-440:1515
-440:1516
-440:1517
-440:1518
-440:1519
-440:1520
-440:1521
-440:1522
-441:1525
-441:1526
-441:1527
-441:1528
-441:1529
-441:1530
-441:1531
-442:1534
-442:1535
-442:1536
-442:1537
-443:1540
-443:1541
-443:1542
-443:1543
-443:1544
-443:1545
-443:1546
-443:1547
-443:1548
-443:1549
-444:1552
-444:1553
-444:1554
-444:1555
-444:1556
-445:1559
-445:1560
-445:1561
-445:1562
-445:1563
-446:1566
-446:1567
-446:1568
-446:1569
-446:1570
-447:1573
-447:1574
-447:1575
-447:1576
-447:1577
-448:1580
-448:1581
-448:1582
-448:1583
-448:1584
-449:1587
-449:1588
-449:1589
-449:1590
-449:1591
-452:1715
-452:1719
-452:1720
-452:1721
-452:1722
-452:1723
-452:1724
-452:1725
-452:1764
-452:1765
-452:1766
-452:1767
-452:1768
-452:1769
-452:1770
-452:1771
-452:1772
-452:1773
-452:1774
-452:1775
-453:1728
-453:1729
-453:1730
-453:1731
-453:1732
-453:1733
-453:1734
-453:1735
-453:1736
-453:1737
-454:1740
-454:1741
-454:1742
-454:1743
-454:1744
-454:1745
-454:1746
-454:1747
-454:1748
-454:1749
-455:1752
-455:1753
-455:1754
-455:1755
-455:1756
-455:1757
-455:1758
-455:1759
-455:1760
-455:1761
-458:1777
-458:1781
-458:1782
-458:1783
-458:1784
-458:1785
-458:1786
-458:1787
-458:1788
-458:1789
-458:1790
-458:1791
-458:1792
-458:1793
-458:1794
-458:1795
-458:1796
-458:1797
-458:1798
-458:1800
-458:1801
-458:1802
-458:1803
-458:1804
-458:1805
-458:1806
-458:1807
-458:1808
-458:1809
-458:1810
-461:1654
-461:1661
-461:1662
-461:1663
-461:1702
-461:1703
-461:1704
-461:1705
-461:1706
-461:1707
-461:1708
-461:1709
-461:1710
-461:1711
-461:1712
-461:1713
-462:1664
-462:1665
-462:1666
-462:1667
-462:1668
-463:1657
-463:1671
-463:1672
-463:1673
-463:1674
-463:1675
-463:1676
-464:1658
-464:1679
-464:1680
-464:1681
-464:1682
-464:1683
-464:1684
-465:1659
-465:1687
-465:1688
-465:1689
-465:1690
-465:1691
-465:1692
-466:1695
-466:1696
-466:1697
-466:1698
-466:1699
-469:1812
-469:1816
-469:1817
-469:1818
-469:1833
-469:1834
-469:1835
-469:1836
-469:1837
-469:1838
-469:1839
-469:1840
-469:1841
-469:1842
-469:1843
-469:1844
-470:1819
-470:1820
-470:1821
-470:1822
-470:1823
-471:1826
-471:1827
-471:1828
-471:1829
-471:1830
-*E
diff --git a/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.java b/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.java
deleted file mode 100644
index bbbc644..0000000
--- a/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "assign.types.g" -> "AssignTokenTypesWalker.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.tool;
-	import java.util.*;
-	import org.antlr.analysis.*;
-	import org.antlr.misc.*;
-	import java.io.*;
-
-public interface AssignTokenTypesWalkerTokenTypes {
-	int EOF = 1;
-	int NULL_TREE_LOOKAHEAD = 3;
-	int OPTIONS = 4;
-	int TOKENS = 5;
-	int PARSER = 6;
-	int LEXER = 7;
-	int RULE = 8;
-	int BLOCK = 9;
-	int OPTIONAL = 10;
-	int CLOSURE = 11;
-	int POSITIVE_CLOSURE = 12;
-	int SYNPRED = 13;
-	int RANGE = 14;
-	int CHAR_RANGE = 15;
-	int EPSILON = 16;
-	int ALT = 17;
-	int EOR = 18;
-	int EOB = 19;
-	int EOA = 20;
-	int ID = 21;
-	int ARG = 22;
-	int ARGLIST = 23;
-	int RET = 24;
-	int LEXER_GRAMMAR = 25;
-	int PARSER_GRAMMAR = 26;
-	int TREE_GRAMMAR = 27;
-	int COMBINED_GRAMMAR = 28;
-	int INITACTION = 29;
-	int LABEL = 30;
-	int TEMPLATE = 31;
-	int SCOPE = 32;
-	int GATED_SEMPRED = 33;
-	int SYN_SEMPRED = 34;
-	int BACKTRACK_SEMPRED = 35;
-	int FRAGMENT = 36;
-	int ACTION = 37;
-	int DOC_COMMENT = 38;
-	int SEMI = 39;
-	int LITERAL_lexer = 40;
-	int LITERAL_tree = 41;
-	int LITERAL_grammar = 42;
-	int AMPERSAND = 43;
-	int COLON = 44;
-	int RCURLY = 45;
-	int ASSIGN = 46;
-	int STRING_LITERAL = 47;
-	int CHAR_LITERAL = 48;
-	int INT = 49;
-	int STAR = 50;
-	int TOKEN_REF = 51;
-	int LITERAL_protected = 52;
-	int LITERAL_public = 53;
-	int LITERAL_private = 54;
-	int BANG = 55;
-	int ARG_ACTION = 56;
-	int LITERAL_returns = 57;
-	int LITERAL_throws = 58;
-	int COMMA = 59;
-	int LPAREN = 60;
-	int OR = 61;
-	int RPAREN = 62;
-	int LITERAL_catch = 63;
-	int LITERAL_finally = 64;
-	int PLUS_ASSIGN = 65;
-	int SEMPRED = 66;
-	int IMPLIES = 67;
-	int ROOT = 68;
-	int RULE_REF = 69;
-	int NOT = 70;
-	int TREE_BEGIN = 71;
-	int QUESTION = 72;
-	int PLUS = 73;
-	int WILDCARD = 74;
-	int REWRITE = 75;
-	int DOLLAR = 76;
-	int DOUBLE_QUOTE_STRING_LITERAL = 77;
-	int DOUBLE_ANGLE_STRING_LITERAL = 78;
-	int WS = 79;
-	int COMMENT = 80;
-	int SL_COMMENT = 81;
-	int ML_COMMENT = 82;
-	int OPEN_ELEMENT_OPTION = 83;
-	int CLOSE_ELEMENT_OPTION = 84;
-	int ESC = 85;
-	int DIGIT = 86;
-	int XDIGIT = 87;
-	int NESTED_ARG_ACTION = 88;
-	int NESTED_ACTION = 89;
-	int ACTION_CHAR_LITERAL = 90;
-	int ACTION_STRING_LITERAL = 91;
-	int ACTION_ESC = 92;
-	int WS_LOOP = 93;
-	int INTERNAL_RULE_REF = 94;
-	int WS_OPT = 95;
-	int SRC = 96;
-	int CHARSET = 97;
-}
diff --git a/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.txt b/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.txt
deleted file mode 100644
index ff59099..0000000
--- a/src/org/antlr/tool/AssignTokenTypesWalkerTokenTypes.txt
+++ /dev/null
@@ -1,96 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): assign.types.g -> AssignTokenTypesWalkerTokenTypes.txt$
-AssignTokenTypesWalker    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
-CHARSET=97
diff --git a/src/org/antlr/tool/BuildDependencyGenerator.java b/src/org/antlr/tool/BuildDependencyGenerator.java
deleted file mode 100644
index 45ae500..0000000
--- a/src/org/antlr/tool/BuildDependencyGenerator.java
+++ /dev/null
@@ -1,193 +0,0 @@
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.misc.Utils;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.stringtemplate.StringTemplateGroup;
-import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.io.*;
-
-/** Given a grammar file, show the dependencies on .tokens etc...
- *  Using ST, emit a simple "make compatible" list of dependencies.
- *  For example, combined grammar T.g (no token import) generates:
- *
- *		TParser.java : T.g
- * 		T.tokens : T.g
- * 		T__g : T.g
- *
- *  For tree grammar TP with import of T.tokens:
- *
- * 		TP.g : T.tokens
- * 		TP.java : TP.g
- *
- *  If "-lib libdir" is used on command-line with -depend, then include the
- *  path like
- *
- * 		TP.g : libdir/T.tokens
- *
- *  Pay attention to -o as well:
- *
- * 		outputdir/TParser.java : T.g
- *
- *  So this output shows what the grammar depends on *and* what it generates.
- *
- *  Operate on one grammar file at a time.  If given a list of .g on the
- *  command-line with -depend, just emit the dependencies.  The grammars
- *  may depend on each other, but the order doesn't matter.  Build tools,
- *  reading in this output, will know how to organize it.
- *
- *  This is a wee bit slow probably because the code generator has to load
- *  all of its template files in order to figure out the file extension
- *  for the generated recognizer.
- *
- *  This code was obvious until I removed redundant "./" on front of files
- *  and had to escape spaces in filenames :(
- */
-public class BuildDependencyGenerator {
-	protected String grammarFileName;
-	protected Tool tool;
-	protected Grammar grammar;
-	protected CodeGenerator generator;
-	protected StringTemplateGroup templates;
-
-	public BuildDependencyGenerator(Tool tool, String grammarFileName)
-		throws IOException, antlr.TokenStreamException, antlr.RecognitionException
-	{
-		this.tool = tool;
-		this.grammarFileName = grammarFileName;
-		grammar = tool.getGrammar(grammarFileName);
-		String language = (String)grammar.getOption("language");
-		generator = new CodeGenerator(tool, grammar, language);
-		generator.loadTemplates(language);
-	}
-
-	/** From T.g return a list of File objects that
-	 *  names files ANTLR will emit from T.g.
-	 */
-	public List getGeneratedFileList() {
-		List files = new ArrayList();
-		File outputDir = tool.getOutputDirectory(grammarFileName);
-		if ( outputDir.getName().equals(".") ) {
-			outputDir = null;
-		}
-		else if ( outputDir.getName().indexOf(' ')>=0 ) { // has spaces?
-			String escSpaces = Utils.replace(outputDir.toString(),
-											 " ",
-											 "\\ ");
-			outputDir = new File(escSpaces);
-		}
-		// add generated recognizer; e.g., TParser.java
-		String recognizer =
-			generator.getRecognizerFileName(grammar.name, grammar.type);
-		files.add(new File(outputDir, recognizer));
-		// add output vocab file; e.g., T.tokens
-		files.add(new File(outputDir, generator.getVocabFileName()));
-		// are we generating a .h file?
-		StringTemplate headerExtST = null;
-		if ( generator.getTemplates().isDefined("headerFile") ) {
-			headerExtST = generator.getTemplates().getInstanceOf("headerFileExtension");
-			files.add(new File(outputDir,headerExtST.toString()));
-		}
-		if ( grammar.type==Grammar.COMBINED ) {
-			// add autogenerated lexer; e.g., TLexer.java TLexer.h TLexer.tokens
-			// don't add T__.g (just a temp file)
-			String lexer =
-				generator.getRecognizerFileName(grammar.name, Grammar.LEXER);
-			files.add(new File(outputDir,lexer));
-			// TLexer.h
-			String suffix = Grammar.grammarTypeToFileNameSuffix[Grammar.LEXER];
-			if ( headerExtST !=null ) {
-				String header =	 grammar.name+suffix+headerExtST.toString();
-				files.add(new File(outputDir,header));
-			}
-			// for combined, don't generate TLexer.tokens
-		}
-
-		if ( files.size()==0 ) {
-			return null;
-		}
-		return files;
-	}
-
-	/** Return a list of File objects that name files ANTLR will read
-	 *  to process T.g; for now, this can only be .tokens files and only
-	 *  if they use the tokenVocab option.
-	 */
-	public List getDependenciesFileList() {
-		List files = new ArrayList();
-		String vocabName = (String)grammar.getOption("tokenVocab");
-		if ( vocabName == null ) {
-			return null;
-		}
-		File vocabFile = grammar.getImportedVocabFileName(vocabName);
-		File outputDir = vocabFile.getParentFile();
-		if ( outputDir.getName().equals(".") ) {
-			files.add(vocabFile.getName());
-		}
-		else if ( outputDir.getName().indexOf(' ')>=0 ) { // has spaces?
-			String escSpaces = Utils.replace(outputDir.toString(),
-											 " ",
-											 "\\ ");
-			outputDir = new File(escSpaces);
-			files.add(new File(outputDir, vocabFile.getName()));
-		}
-		else {
-			files.add(vocabFile);
-		}
-
-		if ( files.size()==0 ) {
-			return null;
-		}
-		return files;
-	}
-
-	public StringTemplate getDependencies() {
-		loadDependencyTemplates();
-		StringTemplate dependenciesST = templates.getInstanceOf("dependencies");
-		dependenciesST.setAttribute("in", getDependenciesFileList());
-		dependenciesST.setAttribute("out", getGeneratedFileList());
-		dependenciesST.setAttribute("grammarFileName", grammar.fileName);
-		return dependenciesST;
-	}
-
-	public void loadDependencyTemplates() {
-		if ( templates!=null ) {
-			return;
-		}
-		String fileName = "org/antlr/tool/templates/depend.stg";
-		ClassLoader cl = Thread.currentThread().getContextClassLoader();
-		InputStream is = cl.getResourceAsStream(fileName);
-		if ( is==null ) {
-			cl = ErrorManager.class.getClassLoader();
-			is = cl.getResourceAsStream(fileName);
-		}
-		if ( is==null ) {
-			ErrorManager.internalError("Can't load dependency templates: "+fileName);
-			return;
-		}
-		BufferedReader br = null;
-		try {
-			br = new BufferedReader(new InputStreamReader(is));
-			templates = new StringTemplateGroup(br,
-												AngleBracketTemplateLexer.class);
-			br.close();
-		}
-		catch (IOException ioe) {
-			ErrorManager.internalError("error reading dependency templates file "+fileName, ioe);
-		}
-		finally {
-			if ( br!=null ) {
-				try {
-					br.close();
-				}
-				catch (IOException ioe) {
-					ErrorManager.internalError("cannot close dependency templates file "+fileName, ioe);
-				}
-			}
-		}
-	}
-}
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalker.java b/src/org/antlr/tool/DefineGrammarItemsWalker.java
deleted file mode 100644
index 3e28ebb..0000000
--- a/src/org/antlr/tool/DefineGrammarItemsWalker.java
+++ /dev/null
@@ -1,2995 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "define.g" -> "DefineGrammarItemsWalker.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.tool;
-	import java.util.*;
-	import org.antlr.misc.*;
-
-import antlr.TreeParser;
-import antlr.Token;
-import antlr.collections.AST;
-import antlr.RecognitionException;
-import antlr.ANTLRException;
-import antlr.NoViableAltException;
-import antlr.MismatchedTokenException;
-import antlr.SemanticException;
-import antlr.collections.impl.BitSet;
-import antlr.ASTPair;
-import antlr.collections.impl.ASTArray;
-
-
-public class DefineGrammarItemsWalker extends antlr.TreeParser       implements DefineGrammarItemsWalkerTokenTypes
- {
-
-protected Grammar grammar;
-protected GrammarAST root;
-protected String currentRuleName;
-protected GrammarAST currentRewriteBlock;
-protected GrammarAST currentRewriteRule;
-protected int outerAltNum = 0;
-protected int blockLevel = 0;
-
-    public void reportError(RecognitionException ex) {
-		Token token = null;
-		if ( ex instanceof MismatchedTokenException ) {
-			token = ((MismatchedTokenException)ex).token;
-		}
-		else if ( ex instanceof NoViableAltException ) {
-			token = ((NoViableAltException)ex).token;
-		}
-        ErrorManager.syntaxError(
-            ErrorManager.MSG_SYNTAX_ERROR,
-            grammar,
-            token,
-            "define: "+ex.toString(),
-            ex);
-    }
-
-	protected void finish() {
-		trimGrammar();
-	}
-
-	/** Remove any lexer rules from a COMBINED; already passed to lexer */
-	protected void trimGrammar() {
-		if ( grammar.type!=Grammar.COMBINED ) {
-			return;
-		}
-		// form is (header ... ) ( grammar ID (scope ...) ... ( rule ... ) ( rule ... ) ... )
-		GrammarAST p = root;
-		// find the grammar spec
-		while ( !p.getText().equals("grammar") ) {
-			p = (GrammarAST)p.getNextSibling();
-		}
-		p = (GrammarAST)p.getFirstChild(); // jump down to first child of grammar
-		// look for first RULE def
-		GrammarAST prev = p; // points to the ID (grammar name)
-		while ( p.getType()!=RULE ) {
-			prev = p;
-			p = (GrammarAST)p.getNextSibling();
-		}
-		// prev points at last node before first rule subtree at this point
-		while ( p!=null ) {
-			String ruleName = p.getFirstChild().getText();
-			//System.out.println("rule "+ruleName+" prev="+prev.getText());
-			if ( Character.isUpperCase(ruleName.charAt(0)) ) {
-				// remove lexer rule
-				prev.setNextSibling(p.getNextSibling());
-			}
-			else {
-				prev = p; // non-lexer rule; move on
-			}
-			p = (GrammarAST)p.getNextSibling();
-		}
-		//System.out.println("root after removal is: "+root.toStringList());
-	}
-
-    protected void trackInlineAction(GrammarAST actionAST) {
-		Rule r = grammar.getRule(currentRuleName);
-        if ( r!=null ) {
-            r.trackInlineAction(actionAST);
-        }
-    }
-
-public DefineGrammarItemsWalker() {
-	tokenNames = _tokenNames;
-}
-
-	public final void grammar(AST _t,
-		Grammar g
-	) throws RecognitionException {
-		
-		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		grammar = g;
-		root = grammar_AST_in;
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LEXER_GRAMMAR:
-			{
-				AST __t3 = _t;
-				GrammarAST tmp1_AST_in = (GrammarAST)_t;
-				match(_t,LEXER_GRAMMAR);
-				_t = _t.getFirstChild();
-				if ( inputState.guessing==0 ) {
-					grammar.type = Grammar.LEXER;
-				}
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t3;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PARSER_GRAMMAR:
-			{
-				AST __t4 = _t;
-				GrammarAST tmp2_AST_in = (GrammarAST)_t;
-				match(_t,PARSER_GRAMMAR);
-				_t = _t.getFirstChild();
-				if ( inputState.guessing==0 ) {
-					grammar.type = Grammar.PARSER;
-				}
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t4;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TREE_GRAMMAR:
-			{
-				AST __t5 = _t;
-				GrammarAST tmp3_AST_in = (GrammarAST)_t;
-				match(_t,TREE_GRAMMAR);
-				_t = _t.getFirstChild();
-				if ( inputState.guessing==0 ) {
-					grammar.type = Grammar.TREE_PARSER;
-				}
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t5;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case COMBINED_GRAMMAR:
-			{
-				AST __t6 = _t;
-				GrammarAST tmp4_AST_in = (GrammarAST)_t;
-				match(_t,COMBINED_GRAMMAR);
-				_t = _t.getFirstChild();
-				if ( inputState.guessing==0 ) {
-					grammar.type = Grammar.COMBINED;
-				}
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t6;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			if ( inputState.guessing==0 ) {
-				finish();
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void grammarSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST cmt = null;
-		
-		Map opts=null;
-		Token optionsStartToken=null;
-		
-		
-		try {      // for error handling
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case DOC_COMMENT:
-			{
-				cmt = (GrammarAST)_t;
-				match(_t,DOC_COMMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case OPTIONS:
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				if ( inputState.guessing==0 ) {
-					optionsStartToken=((GrammarAST)_t).getToken();
-				}
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKENS:
-			{
-				tokensSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop14:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==SCOPE)) {
-					attrScope(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop14;
-				}
-				
-			} while (true);
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case AMPERSAND:
-			{
-				actions(_t);
-				_t = _retTree;
-				break;
-			}
-			case RULE:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			rules(_t);
-			_t = _retTree;
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void attrScope(AST _t) throws RecognitionException {
-		
-		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST name = null;
-		GrammarAST attrs = null;
-		
-		try {      // for error handling
-			AST __t8 = _t;
-			GrammarAST tmp5_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			name = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			attrs = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t8;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				
-						AttributeScope scope = grammar.defineGlobalScope(name.getText(),attrs.token);
-						scope.isDynamicGlobalScope = true;
-						scope.addAttributes(attrs.getText(), ";");
-						
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void optionsSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST optionsSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			GrammarAST tmp6_AST_in = (GrammarAST)_t;
-			match(_t,OPTIONS);
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tokensSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST tokensSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t24 = _t;
-			GrammarAST tmp7_AST_in = (GrammarAST)_t;
-			match(_t,TOKENS);
-			_t = _t.getFirstChild();
-			{
-			int _cnt26=0;
-			_loop26:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ASSIGN||_t.getType()==TOKEN_REF)) {
-					tokenSpec(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt26>=1 ) { break _loop26; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt26++;
-			} while (true);
-			}
-			_t = __t24;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void actions(AST _t) throws RecognitionException {
-		
-		GrammarAST actions_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			int _cnt18=0;
-			_loop18:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					action(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt18>=1 ) { break _loop18; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt18++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rules(AST _t) throws RecognitionException {
-		
-		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			int _cnt32=0;
-			_loop32:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==RULE)) {
-					rule(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt32>=1 ) { break _loop32; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt32++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void action(AST _t) throws RecognitionException {
-		
-		GrammarAST action_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST amp = null;
-		GrammarAST id1 = null;
-		GrammarAST id2 = null;
-		GrammarAST a1 = null;
-		GrammarAST a2 = null;
-		
-		String scope=null;
-		GrammarAST nameAST=null, actionAST=null;
-		
-		
-		try {      // for error handling
-			AST __t20 = _t;
-			amp = _t==ASTNULL ? null :(GrammarAST)_t;
-			match(_t,AMPERSAND);
-			_t = _t.getFirstChild();
-			id1 = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ID:
-			{
-				id2 = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				a1 = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					scope=id1.getText(); nameAST=id2; actionAST=a1;
-				}
-				break;
-			}
-			case ACTION:
-			{
-				a2 = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					scope=null; nameAST=id1; actionAST=a2;
-				}
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t20;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				
-						 grammar.defineNamedAction(amp,scope,nameAST,actionAST);
-						
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tokenSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST tokenSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST t = null;
-		GrammarAST t2 = null;
-		GrammarAST s = null;
-		GrammarAST c = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKEN_REF:
-			{
-				t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t28 = _t;
-				GrammarAST tmp8_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				t2 = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case STRING_LITERAL:
-				{
-					s = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					c = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t28;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rule(AST _t) throws RecognitionException {
-		
-		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST args = null;
-		GrammarAST ret = null;
-		GrammarAST b = null;
-		
-		String mod=null;
-		String name=null;
-		Map opts=null;
-		Rule r = null;
-		
-		
-		try {      // for error handling
-			AST __t34 = _t;
-			GrammarAST tmp9_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				opts = tmp9_AST_in.options;
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				mod=modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			AST __t36 = _t;
-			GrammarAST tmp10_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				args = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t36;
-			_t = _t.getNextSibling();
-			AST __t38 = _t;
-			GrammarAST tmp11_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				ret = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			_t = __t38;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			if ( inputState.guessing==0 ) {
-				
-							name = id.getText();
-							currentRuleName = name;
-							if ( Character.isUpperCase(name.charAt(0)) &&
-								 grammar.type==Grammar.COMBINED )
-							{
-								// a merged grammar spec, track lexer rules and send to another grammar
-								grammar.defineLexerRuleFoundInParser(id.getToken(), rule_AST_in);
-							}
-							else {
-								int numAlts = countAltsForRule(rule_AST_in);
-								grammar.defineRule(id.getToken(), mod, opts, rule_AST_in, args, numAlts);
-								r = grammar.getRule(name);
-								if ( args!=null ) {
-									r.parameterScope = grammar.createParameterScope(name,args.token);
-									r.parameterScope.addAttributes(args.getText(), ",");
-								}
-								if ( ret!=null ) {
-									r.returnScope = grammar.createReturnScope(name,ret.token);
-									r.returnScope.addAttributes(ret.getText(), ",");
-								}
-							}
-							
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t,r);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop43:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					ruleAction(_t,r);
-					_t = _retTree;
-				}
-				else {
-					break _loop43;
-				}
-				
-			} while (true);
-			}
-			if ( inputState.guessing==0 ) {
-				this.blockLevel=0;
-			}
-			b = _t==ASTNULL ? null : (GrammarAST)_t;
-			block(_t);
-			_t = _retTree;
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp12_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				
-				// copy rule options into the block AST, which is where
-				// the analysis will look for k option etc...
-				b.options = opts;
-				
-			}
-			_t = __t34;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final String  modifier(AST _t) throws RecognitionException {
-		String mod;
-		
-		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		mod = modifier_AST_in.getText();
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_protected:
-			{
-				GrammarAST tmp13_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_protected);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_public:
-			{
-				GrammarAST tmp14_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_public);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_private:
-			{
-				GrammarAST tmp15_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_private);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case FRAGMENT:
-			{
-				GrammarAST tmp16_AST_in = (GrammarAST)_t;
-				match(_t,FRAGMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-		return mod;
-	}
-	
-	public final void ruleScopeSpec(AST _t,
-		Rule r
-	) throws RecognitionException {
-		
-		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST attrs = null;
-		GrammarAST uses = null;
-		
-		try {      // for error handling
-			AST __t63 = _t;
-			GrammarAST tmp17_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ACTION:
-			{
-				attrs = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-						         r.ruleScope = grammar.createRuleScope(r.name,attrs.token);
-								 r.ruleScope.isDynamicRuleScope = true;
-								 r.ruleScope.addAttributes(attrs.getText(), ";");
-								
-				}
-				break;
-			}
-			case 3:
-			case ID:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop66:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ID)) {
-					uses = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-					if ( inputState.guessing==0 ) {
-						
-							         if ( grammar.getGlobalScope(uses.getText())==null ) {
-										 ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
-																   grammar,
-																   uses.token,
-																   uses.getText());
-							         }
-							         else {
-							         	if ( r.useScopes==null ) {r.useScopes=new ArrayList();}
-							         	r.useScopes.add(uses.getText());
-							         }
-							
-					}
-				}
-				else {
-					break _loop66;
-				}
-				
-			} while (true);
-			}
-			_t = __t63;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ruleAction(AST _t,
-		Rule r
-	) throws RecognitionException {
-		
-		GrammarAST ruleAction_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST amp = null;
-		GrammarAST id = null;
-		GrammarAST a = null;
-		
-		try {      // for error handling
-			AST __t60 = _t;
-			amp = _t==ASTNULL ? null :(GrammarAST)_t;
-			match(_t,AMPERSAND);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			a = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t60;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				if (r!=null) r.defineNamedAction(amp,id,a);
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void block(AST _t) throws RecognitionException {
-		
-		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		this.blockLevel++;
-		if ( this.blockLevel==1 ) {this.outerAltNum=1;}
-		
-		
-		try {      // for error handling
-			AST __t68 = _t;
-			GrammarAST tmp18_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				optionsSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case ALT:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop71:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					blockAction(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop71;
-				}
-				
-			} while (true);
-			}
-			{
-			int _cnt73=0;
-			_loop73:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					alternative(_t);
-					_t = _retTree;
-					rewrite(_t);
-					_t = _retTree;
-					if ( inputState.guessing==0 ) {
-						if ( this.blockLevel==1 ) {this.outerAltNum++;}
-					}
-				}
-				else {
-					if ( _cnt73>=1 ) { break _loop73; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt73++;
-			} while (true);
-			}
-			GrammarAST tmp19_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t68;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				this.blockLevel--;
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void exceptionGroup(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			{
-				{
-				int _cnt82=0;
-				_loop82:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==LITERAL_catch)) {
-						exceptionHandler(_t);
-						_t = _retTree;
-					}
-					else {
-						if ( _cnt82>=1 ) { break _loop82; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt82++;
-				} while (true);
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case LITERAL_finally:
-				{
-					finallyClause(_t);
-					_t = _retTree;
-					break;
-				}
-				case EOR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				break;
-			}
-			case LITERAL_finally:
-			{
-				finallyClause(_t);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final int  countAltsForRule(AST _t) throws RecognitionException {
-		int n=0;
-		
-		GrammarAST countAltsForRule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		
-		try {      // for error handling
-			AST __t46 = _t;
-			GrammarAST tmp20_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp21_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getNextSibling();
-			GrammarAST tmp22_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp23_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				GrammarAST tmp24_AST_in = (GrammarAST)_t;
-				match(_t,SCOPE);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop51:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp25_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop51;
-				}
-				
-			} while (true);
-			}
-			AST __t52 = _t;
-			GrammarAST tmp26_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp27_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ALT:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			int _cnt57=0;
-			_loop57:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					GrammarAST tmp28_AST_in = (GrammarAST)_t;
-					match(_t,ALT);
-					_t = _t.getNextSibling();
-					{
-					_loop56:
-					do {
-						if (_t==null) _t=ASTNULL;
-						if ((_t.getType()==REWRITE)) {
-							GrammarAST tmp29_AST_in = (GrammarAST)_t;
-							match(_t,REWRITE);
-							_t = _t.getNextSibling();
-						}
-						else {
-							break _loop56;
-						}
-						
-					} while (true);
-					}
-					if ( inputState.guessing==0 ) {
-						n++;
-					}
-				}
-				else {
-					if ( _cnt57>=1 ) { break _loop57; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt57++;
-			} while (true);
-			}
-			GrammarAST tmp30_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t52;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp31_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			_t = __t46;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-		return n;
-	}
-	
-	public final void blockAction(AST _t) throws RecognitionException {
-		
-		GrammarAST blockAction_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST amp = null;
-		GrammarAST id = null;
-		GrammarAST a = null;
-		
-		try {      // for error handling
-			AST __t75 = _t;
-			amp = _t==ASTNULL ? null :(GrammarAST)_t;
-			match(_t,AMPERSAND);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			a = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t75;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void alternative(AST _t) throws RecognitionException {
-		
-		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		if ( grammar.type!=Grammar.LEXER && grammar.getOption("output")!=null && blockLevel==1 ) {
-			GrammarAST aRewriteNode = alternative_AST_in.findFirstType(REWRITE);
-			if ( aRewriteNode!=null||
-				 (alternative_AST_in.getNextSibling()!=null &&
-				  alternative_AST_in.getNextSibling().getType()==REWRITE) )
-			{
-				Rule r = grammar.getRule(currentRuleName);
-				r.trackAltsWithRewrites(alternative_AST_in,this.outerAltNum);
-			}
-		}
-		
-		
-		try {      // for error handling
-			AST __t77 = _t;
-			GrammarAST tmp32_AST_in = (GrammarAST)_t;
-			match(_t,ALT);
-			_t = _t.getFirstChild();
-			{
-			int _cnt79=0;
-			_loop79:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
-					element(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt79>=1 ) { break _loop79; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt79++;
-			} while (true);
-			}
-			GrammarAST tmp33_AST_in = (GrammarAST)_t;
-			match(_t,EOA);
-			_t = _t.getNextSibling();
-			_t = __t77;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST pred = null;
-		
-		currentRewriteRule = rewrite_AST_in; // has to execute during guessing
-		if ( grammar.buildAST() ) {
-		rewrite_AST_in.rewriteRefsDeep = new HashSet<GrammarAST>();
-		}
-		
-		
-		try {      // for error handling
-			{
-			_loop124:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==REWRITE)) {
-					AST __t122 = _t;
-					GrammarAST tmp34_AST_in = (GrammarAST)_t;
-					match(_t,REWRITE);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case SEMPRED:
-					{
-						pred = (GrammarAST)_t;
-						match(_t,SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case ALT:
-					case TEMPLATE:
-					case ACTION:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					rewrite_alternative(_t);
-					_t = _retTree;
-					_t = __t122;
-					_t = _t.getNextSibling();
-					if ( inputState.guessing==0 ) {
-						
-						if ( pred!=null ) {
-						pred.outerAltNum = this.outerAltNum;
-						trackInlineAction(pred);
-						}
-						
-					}
-				}
-				else {
-					break _loop124;
-				}
-				
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void element(AST _t) throws RecognitionException {
-		
-		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST el = null;
-		GrammarAST id2 = null;
-		GrammarAST a2 = null;
-		GrammarAST act = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				AST __t89 = _t;
-				GrammarAST tmp35_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t89;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				AST __t90 = _t;
-				GrammarAST tmp36_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t90;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case WILDCARD:
-			{
-				atom(_t);
-				_t = _retTree;
-				break;
-			}
-			case NOT:
-			{
-				AST __t91 = _t;
-				GrammarAST tmp37_AST_in = (GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				element(_t);
-				_t = _retTree;
-				_t = __t91;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RANGE:
-			{
-				AST __t92 = _t;
-				GrammarAST tmp38_AST_in = (GrammarAST)_t;
-				match(_t,RANGE);
-				_t = _t.getFirstChild();
-				atom(_t);
-				_t = _retTree;
-				atom(_t);
-				_t = _retTree;
-				_t = __t92;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t93 = _t;
-				GrammarAST tmp39_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				atom(_t);
-				_t = _retTree;
-				atom(_t);
-				_t = _retTree;
-				_t = __t93;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t94 = _t;
-				GrammarAST tmp40_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				id = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				el = _t==ASTNULL ? null : (GrammarAST)_t;
-				element(_t);
-				_t = _retTree;
-				_t = __t94;
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-							if ( el.getType()==ANTLRParser.ROOT ||
-					el.getType()==ANTLRParser.BANG )
-							{
-					el = (GrammarAST)el.getFirstChild();
-					}
-						if ( el.getType()==RULE_REF) {
-							grammar.defineRuleRefLabel(currentRuleName,id.getToken(),el);
-						}
-						else {
-							grammar.defineTokenRefLabel(currentRuleName,id.getToken(),el);
-						}
-						
-				}
-				break;
-			}
-			case PLUS_ASSIGN:
-			{
-				AST __t95 = _t;
-				GrammarAST tmp41_AST_in = (GrammarAST)_t;
-				match(_t,PLUS_ASSIGN);
-				_t = _t.getFirstChild();
-				id2 = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				a2 = _t==ASTNULL ? null : (GrammarAST)_t;
-				element(_t);
-				_t = _retTree;
-				if ( inputState.guessing==0 ) {
-					
-					if ( a2.getType()==ANTLRParser.ROOT ||
-					a2.getType()==ANTLRParser.BANG )
-					{
-					a2 = (GrammarAST)a2.getFirstChild();
-					}
-						    if ( a2.getType()==RULE_REF ) {
-						    	grammar.defineRuleListLabel(currentRuleName,id2.getToken(),a2);
-						    }
-						    else {
-						    	grammar.defineTokenListLabel(currentRuleName,id2.getToken(),a2);
-						    }
-						
-				}
-				_t = __t95;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			{
-				ebnf(_t);
-				_t = _retTree;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				tree(_t);
-				_t = _retTree;
-				break;
-			}
-			case SYNPRED:
-			{
-				AST __t96 = _t;
-				GrammarAST tmp42_AST_in = (GrammarAST)_t;
-				match(_t,SYNPRED);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t96;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ACTION:
-			{
-				act = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					act.outerAltNum = this.outerAltNum;
-							trackInlineAction(act);
-					
-				}
-				break;
-			}
-			case SEMPRED:
-			{
-				GrammarAST tmp43_AST_in = (GrammarAST)_t;
-				match(_t,SEMPRED);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					tmp43_AST_in.outerAltNum = this.outerAltNum;
-					trackInlineAction(tmp43_AST_in);
-					
-				}
-				break;
-			}
-			case SYN_SEMPRED:
-			{
-				GrammarAST tmp44_AST_in = (GrammarAST)_t;
-				match(_t,SYN_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BACKTRACK_SEMPRED:
-			{
-				GrammarAST tmp45_AST_in = (GrammarAST)_t;
-				match(_t,BACKTRACK_SEMPRED);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case GATED_SEMPRED:
-			{
-				GrammarAST tmp46_AST_in = (GrammarAST)_t;
-				match(_t,GATED_SEMPRED);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					tmp46_AST_in.outerAltNum = this.outerAltNum;
-					trackInlineAction(tmp46_AST_in);
-					
-				}
-				break;
-			}
-			case EPSILON:
-			{
-				GrammarAST tmp47_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void exceptionHandler(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t85 = _t;
-			GrammarAST tmp48_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_catch);
-			_t = _t.getFirstChild();
-			GrammarAST tmp49_AST_in = (GrammarAST)_t;
-			match(_t,ARG_ACTION);
-			_t = _t.getNextSibling();
-			GrammarAST tmp50_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t85;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				trackInlineAction(tmp50_AST_in);
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void finallyClause(AST _t) throws RecognitionException {
-		
-		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t87 = _t;
-			GrammarAST tmp51_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_finally);
-			_t = _t.getFirstChild();
-			GrammarAST tmp52_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t87;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				trackInlineAction(tmp52_AST_in);
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void atom(AST _t) throws RecognitionException {
-		
-		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST rr = null;
-		GrammarAST rarg = null;
-		GrammarAST t = null;
-		GrammarAST targ = null;
-		GrammarAST c = null;
-		GrammarAST s = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case RULE_REF:
-			{
-				AST __t115 = _t;
-				rr = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,RULE_REF);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ARG_ACTION:
-				{
-					rarg = (GrammarAST)_t;
-					match(_t,ARG_ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t115;
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					grammar.altReferencesRule(currentRuleName, rr, this.outerAltNum);
-							if ( rarg!=null ) {
-					rarg.outerAltNum = this.outerAltNum;
-					trackInlineAction(rarg);
-					}
-					
-				}
-				break;
-			}
-			case TOKEN_REF:
-			{
-				AST __t117 = _t;
-				t = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ARG_ACTION:
-				{
-					targ = (GrammarAST)_t;
-					match(_t,ARG_ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t117;
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-							if ( targ!=null ) {
-					targ.outerAltNum = this.outerAltNum;
-					trackInlineAction(targ);
-					}
-						if ( grammar.type==Grammar.LEXER ) {
-							grammar.altReferencesRule(currentRuleName, t, this.outerAltNum);
-						}
-						else {
-							grammar.altReferencesTokenID(currentRuleName, t, this.outerAltNum);
-						}
-						
-				}
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-						if ( grammar.type!=Grammar.LEXER ) {
-							Rule rule = grammar.getRule(currentRuleName);
-								if ( rule!=null ) {
-									rule.trackTokenReferenceInAlt(c, outerAltNum);
-							}
-						}
-						
-				}
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-						if ( grammar.type!=Grammar.LEXER ) {
-							Rule rule = grammar.getRule(currentRuleName);
-								if ( rule!=null ) {
-									rule.trackTokenReferenceInAlt(s, outerAltNum);
-							}
-						}
-						
-				}
-				break;
-			}
-			case WILDCARD:
-			{
-				GrammarAST tmp53_AST_in = (GrammarAST)_t;
-				match(_t,WILDCARD);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ebnf(AST _t) throws RecognitionException {
-		
-		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case BLOCK:
-			{
-				block(_t);
-				_t = _retTree;
-				break;
-			}
-			case OPTIONAL:
-			{
-				AST __t100 = _t;
-				GrammarAST tmp54_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t100;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-				boolean synPredMatched99 = false;
-				if (_t==null) _t=ASTNULL;
-				if (((_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE))) {
-					AST __t99 = _t;
-					synPredMatched99 = true;
-					inputState.guessing++;
-					try {
-						{
-						dotLoop(_t);
-						_t = _retTree;
-						}
-					}
-					catch (RecognitionException pe) {
-						synPredMatched99 = false;
-					}
-					_t = __t99;
-inputState.guessing--;
-				}
-				if ( synPredMatched99 ) {
-					dotLoop(_t);
-					_t = _retTree;
-				}
-				else if ((_t.getType()==CLOSURE)) {
-					AST __t101 = _t;
-					GrammarAST tmp55_AST_in = (GrammarAST)_t;
-					match(_t,CLOSURE);
-					_t = _t.getFirstChild();
-					block(_t);
-					_t = _retTree;
-					_t = __t101;
-					_t = _t.getNextSibling();
-				}
-				else if ((_t.getType()==POSITIVE_CLOSURE)) {
-					AST __t102 = _t;
-					GrammarAST tmp56_AST_in = (GrammarAST)_t;
-					match(_t,POSITIVE_CLOSURE);
-					_t = _t.getFirstChild();
-					block(_t);
-					_t = _retTree;
-					_t = __t102;
-					_t = _t.getNextSibling();
-				}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void tree(AST _t) throws RecognitionException {
-		
-		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t111 = _t;
-			GrammarAST tmp57_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			element(_t);
-			_t = _retTree;
-			{
-			_loop113:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==BLOCK||_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==SYNPRED||_t.getType()==RANGE||_t.getType()==CHAR_RANGE||_t.getType()==EPSILON||_t.getType()==GATED_SEMPRED||_t.getType()==SYN_SEMPRED||_t.getType()==BACKTRACK_SEMPRED||_t.getType()==ACTION||_t.getType()==ASSIGN||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==BANG||_t.getType()==PLUS_ASSIGN||_t.getType()==SEMPRED||_t.g [...]
-					element(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop113;
-				}
-				
-			} while (true);
-			}
-			_t = __t111;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-/** Track the .* and .+ idioms and make them nongreedy by default.
- */
-	public final void dotLoop(AST _t) throws RecognitionException {
-		
-		GrammarAST dotLoop_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		GrammarAST block = (GrammarAST)dotLoop_AST_in.getFirstChild();
-		
-		
-		try {      // for error handling
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case CLOSURE:
-			{
-				AST __t105 = _t;
-				GrammarAST tmp58_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				dotBlock(_t);
-				_t = _retTree;
-				_t = __t105;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				AST __t106 = _t;
-				GrammarAST tmp59_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				dotBlock(_t);
-				_t = _retTree;
-				_t = __t106;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			if ( inputState.guessing==0 ) {
-				
-				Map opts=new HashMap();
-				opts.put("greedy", "false");
-				if ( grammar.type!=Grammar.LEXER ) {
-				// parser grammars assume k=1 for .* loops
-				// otherwise they (analysis?) look til EOF!
-				opts.put("k", Utils.integer(1));
-				}
-				block.setOptions(grammar,opts);
-				
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void dotBlock(AST _t) throws RecognitionException {
-		
-		GrammarAST dotBlock_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t108 = _t;
-			GrammarAST tmp60_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			AST __t109 = _t;
-			GrammarAST tmp61_AST_in = (GrammarAST)_t;
-			match(_t,ALT);
-			_t = _t.getFirstChild();
-			GrammarAST tmp62_AST_in = (GrammarAST)_t;
-			match(_t,WILDCARD);
-			_t = _t.getNextSibling();
-			GrammarAST tmp63_AST_in = (GrammarAST)_t;
-			match(_t,EOA);
-			_t = _t.getNextSibling();
-			_t = __t109;
-			_t = _t.getNextSibling();
-			GrammarAST tmp64_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t108;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ast_suffix(AST _t) throws RecognitionException {
-		
-		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				GrammarAST tmp65_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				GrammarAST tmp66_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_alternative(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST a = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			if (((_t.getType()==ALT))&&(grammar.buildAST())) {
-				AST __t128 = _t;
-				a = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,ALT);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case OPTIONAL:
-				case CLOSURE:
-				case POSITIVE_CLOSURE:
-				case LABEL:
-				case ACTION:
-				case STRING_LITERAL:
-				case CHAR_LITERAL:
-				case TOKEN_REF:
-				case RULE_REF:
-				case TREE_BEGIN:
-				{
-					{
-					int _cnt131=0;
-					_loop131:
-					do {
-						if (_t==null) _t=ASTNULL;
-						if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
-							rewrite_element(_t);
-							_t = _retTree;
-						}
-						else {
-							if ( _cnt131>=1 ) { break _loop131; } else {throw new NoViableAltException(_t);}
-						}
-						
-						_cnt131++;
-					} while (true);
-					}
-					break;
-				}
-				case EPSILON:
-				{
-					GrammarAST tmp67_AST_in = (GrammarAST)_t;
-					match(_t,EPSILON);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				GrammarAST tmp68_AST_in = (GrammarAST)_t;
-				match(_t,EOA);
-				_t = _t.getNextSibling();
-				_t = __t128;
-				_t = _t.getNextSibling();
-			}
-			else if (((_t.getType()==ALT||_t.getType()==TEMPLATE||_t.getType()==ACTION))&&(grammar.buildTemplate())) {
-				rewrite_template(_t);
-				_t = _retTree;
-			}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_block(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		GrammarAST enclosingBlock = currentRewriteBlock;
-		if ( inputState.guessing==0 ) {  // don't do if guessing
-		currentRewriteBlock=rewrite_block_AST_in; // pts to BLOCK node
-		currentRewriteBlock.rewriteRefsShallow = new HashSet<GrammarAST>();
-		currentRewriteBlock.rewriteRefsDeep = new HashSet<GrammarAST>();
-		}
-		
-		
-		try {      // for error handling
-			AST __t126 = _t;
-			GrammarAST tmp69_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			rewrite_alternative(_t);
-			_t = _retTree;
-			GrammarAST tmp70_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t126;
-			_t = _t.getNextSibling();
-			if ( inputState.guessing==0 ) {
-				
-				// copy the element refs in this block to the surrounding block
-				if ( enclosingBlock!=null ) {
-				enclosingBlock.rewriteRefsDeep
-				.addAll(currentRewriteBlock.rewriteRefsShallow);
-				}
-				currentRewriteBlock = enclosingBlock; // restore old BLOCK ptr
-				
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_element(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LABEL:
-			case ACTION:
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			{
-				rewrite_atom(_t);
-				_t = _retTree;
-				break;
-			}
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			{
-				rewrite_ebnf(_t);
-				_t = _retTree;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				rewrite_tree(_t);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_template(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_template_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		GrammarAST ind = null;
-		GrammarAST arg = null;
-		GrammarAST a = null;
-		GrammarAST act = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ALT:
-			{
-				AST __t146 = _t;
-				GrammarAST tmp71_AST_in = (GrammarAST)_t;
-				match(_t,ALT);
-				_t = _t.getFirstChild();
-				GrammarAST tmp72_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				GrammarAST tmp73_AST_in = (GrammarAST)_t;
-				match(_t,EOA);
-				_t = _t.getNextSibling();
-				_t = __t146;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TEMPLATE:
-			{
-				AST __t147 = _t;
-				GrammarAST tmp74_AST_in = (GrammarAST)_t;
-				match(_t,TEMPLATE);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ID:
-				{
-					id = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case ACTION:
-				{
-					ind = (GrammarAST)_t;
-					match(_t,ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				AST __t149 = _t;
-				GrammarAST tmp75_AST_in = (GrammarAST)_t;
-				match(_t,ARGLIST);
-				_t = _t.getFirstChild();
-				{
-				_loop152:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==ARG)) {
-						AST __t151 = _t;
-						GrammarAST tmp76_AST_in = (GrammarAST)_t;
-						match(_t,ARG);
-						_t = _t.getFirstChild();
-						arg = (GrammarAST)_t;
-						match(_t,ID);
-						_t = _t.getNextSibling();
-						a = (GrammarAST)_t;
-						match(_t,ACTION);
-						_t = _t.getNextSibling();
-						_t = __t151;
-						_t = _t.getNextSibling();
-						if ( inputState.guessing==0 ) {
-							
-							a.outerAltNum = this.outerAltNum;
-							trackInlineAction(a);
-							
-						}
-					}
-					else {
-						break _loop152;
-					}
-					
-				} while (true);
-				}
-				_t = __t149;
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					if ( ind!=null ) {
-					ind.outerAltNum = this.outerAltNum;
-					trackInlineAction(ind);
-					}
-					
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case DOUBLE_QUOTE_STRING_LITERAL:
-				{
-					GrammarAST tmp77_AST_in = (GrammarAST)_t;
-					match(_t,DOUBLE_QUOTE_STRING_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case DOUBLE_ANGLE_STRING_LITERAL:
-				{
-					GrammarAST tmp78_AST_in = (GrammarAST)_t;
-					match(_t,DOUBLE_ANGLE_STRING_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t147;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ACTION:
-			{
-				act = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					act.outerAltNum = this.outerAltNum;
-					trackInlineAction(act);
-					
-				}
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_atom(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST arg = null;
-		
-		Rule r = grammar.getRule(currentRuleName);
-		Set tokenRefsInAlt = r.getTokenRefsInAlt(outerAltNum);
-		boolean imaginary =
-		rewrite_atom_AST_in.getType()==TOKEN_REF &&
-		!tokenRefsInAlt.contains(rewrite_atom_AST_in.getText());
-		if ( !imaginary && grammar.buildAST() &&
-		(rewrite_atom_AST_in.getType()==RULE_REF ||
-		rewrite_atom_AST_in.getType()==LABEL ||
-		rewrite_atom_AST_in.getType()==TOKEN_REF ||
-		rewrite_atom_AST_in.getType()==CHAR_LITERAL ||
-		rewrite_atom_AST_in.getType()==STRING_LITERAL) )
-		{
-		// track per block and for entire rewrite rule
-		if ( currentRewriteBlock!=null ) {
-		currentRewriteBlock.rewriteRefsShallow.add(rewrite_atom_AST_in);
-		currentRewriteBlock.rewriteRefsDeep.add(rewrite_atom_AST_in);
-		}
-		currentRewriteRule.rewriteRefsDeep.add(rewrite_atom_AST_in);
-		}
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case RULE_REF:
-			{
-				GrammarAST tmp79_AST_in = (GrammarAST)_t;
-				match(_t,RULE_REF);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			{
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case TOKEN_REF:
-				{
-					AST __t143 = _t;
-					GrammarAST tmp80_AST_in = (GrammarAST)_t;
-					match(_t,TOKEN_REF);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case ARG_ACTION:
-					{
-						arg = (GrammarAST)_t;
-						match(_t,ARG_ACTION);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t143;
-					_t = _t.getNextSibling();
-					break;
-				}
-				case CHAR_LITERAL:
-				{
-					GrammarAST tmp81_AST_in = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case STRING_LITERAL:
-				{
-					GrammarAST tmp82_AST_in = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getNextSibling();
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				if ( inputState.guessing==0 ) {
-					
-					if ( arg!=null ) {
-					arg.outerAltNum = this.outerAltNum;
-					trackInlineAction(arg);
-					}
-					
-				}
-				break;
-			}
-			case LABEL:
-			{
-				GrammarAST tmp83_AST_in = (GrammarAST)_t;
-				match(_t,LABEL);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp84_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				if ( inputState.guessing==0 ) {
-					
-					tmp84_AST_in.outerAltNum = this.outerAltNum;
-					trackInlineAction(tmp84_AST_in);
-					
-				}
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_ebnf(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONAL:
-			{
-				AST __t134 = _t;
-				GrammarAST tmp85_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				rewrite_block(_t);
-				_t = _retTree;
-				_t = __t134;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case CLOSURE:
-			{
-				AST __t135 = _t;
-				GrammarAST tmp86_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				rewrite_block(_t);
-				_t = _retTree;
-				_t = __t135;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				AST __t136 = _t;
-				GrammarAST tmp87_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				rewrite_block(_t);
-				_t = _retTree;
-				_t = __t136;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rewrite_tree(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t138 = _t;
-			GrammarAST tmp88_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			rewrite_atom(_t);
-			_t = _retTree;
-			{
-			_loop140:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==OPTIONAL||_t.getType()==CLOSURE||_t.getType()==POSITIVE_CLOSURE||_t.getType()==LABEL||_t.getType()==ACTION||_t.getType()==STRING_LITERAL||_t.getType()==CHAR_LITERAL||_t.getType()==TOKEN_REF||_t.getType()==RULE_REF||_t.getType()==TREE_BEGIN)) {
-					rewrite_element(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop140;
-				}
-				
-			} while (true);
-			}
-			_t = __t138;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			if (inputState.guessing==0) {
-				reportError(ex);
-				if (_t!=null) {_t = _t.getNextSibling();}
-			} else {
-			  throw ex;
-			}
-		}
-		_retTree = _t;
-	}
-	
-	
-	public static final String[] _tokenNames = {
-		"<0>",
-		"EOF",
-		"<2>",
-		"NULL_TREE_LOOKAHEAD",
-		"\"options\"",
-		"\"tokens\"",
-		"\"parser\"",
-		"LEXER",
-		"RULE",
-		"BLOCK",
-		"OPTIONAL",
-		"CLOSURE",
-		"POSITIVE_CLOSURE",
-		"SYNPRED",
-		"RANGE",
-		"CHAR_RANGE",
-		"EPSILON",
-		"ALT",
-		"EOR",
-		"EOB",
-		"EOA",
-		"ID",
-		"ARG",
-		"ARGLIST",
-		"RET",
-		"LEXER_GRAMMAR",
-		"PARSER_GRAMMAR",
-		"TREE_GRAMMAR",
-		"COMBINED_GRAMMAR",
-		"INITACTION",
-		"LABEL",
-		"TEMPLATE",
-		"\"scope\"",
-		"GATED_SEMPRED",
-		"SYN_SEMPRED",
-		"BACKTRACK_SEMPRED",
-		"\"fragment\"",
-		"ACTION",
-		"DOC_COMMENT",
-		"SEMI",
-		"\"lexer\"",
-		"\"tree\"",
-		"\"grammar\"",
-		"AMPERSAND",
-		"COLON",
-		"RCURLY",
-		"ASSIGN",
-		"STRING_LITERAL",
-		"CHAR_LITERAL",
-		"INT",
-		"STAR",
-		"TOKEN_REF",
-		"\"protected\"",
-		"\"public\"",
-		"\"private\"",
-		"BANG",
-		"ARG_ACTION",
-		"\"returns\"",
-		"\"throws\"",
-		"COMMA",
-		"LPAREN",
-		"OR",
-		"RPAREN",
-		"\"catch\"",
-		"\"finally\"",
-		"PLUS_ASSIGN",
-		"SEMPRED",
-		"IMPLIES",
-		"ROOT",
-		"RULE_REF",
-		"NOT",
-		"TREE_BEGIN",
-		"QUESTION",
-		"PLUS",
-		"WILDCARD",
-		"REWRITE",
-		"DOLLAR",
-		"DOUBLE_QUOTE_STRING_LITERAL",
-		"DOUBLE_ANGLE_STRING_LITERAL",
-		"WS",
-		"COMMENT",
-		"SL_COMMENT",
-		"ML_COMMENT",
-		"OPEN_ELEMENT_OPTION",
-		"CLOSE_ELEMENT_OPTION",
-		"ESC",
-		"DIGIT",
-		"XDIGIT",
-		"NESTED_ARG_ACTION",
-		"NESTED_ACTION",
-		"ACTION_CHAR_LITERAL",
-		"ACTION_STRING_LITERAL",
-		"ACTION_ESC",
-		"WS_LOOP",
-		"INTERNAL_RULE_REF",
-		"WS_OPT",
-		"SRC"
-	};
-	
-	}
-	
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalker.smap b/src/org/antlr/tool/DefineGrammarItemsWalker.smap
deleted file mode 100644
index 5ce8cf0..0000000
--- a/src/org/antlr/tool/DefineGrammarItemsWalker.smap
+++ /dev/null
@@ -1,2248 +0,0 @@
-SMAP
-DefineGrammarItemsWalker.java
-G
-*S G
-*F
-+ 0 define.g
-define.g
-*L
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-1:32
-43:50
-44:51
-45:52
-46:53
-47:54
-48:55
-49:56
-51:58
-52:59
-53:60
-54:61
-55:62
-56:63
-57:64
-58:65
-59:66
-60:67
-61:68
-62:69
-63:70
-64:71
-65:72
-67:74
-68:75
-69:76
-71:78
-72:79
-73:80
-74:81
-75:82
-76:83
-77:84
-78:85
-79:86
-80:87
-81:88
-82:89
-83:90
-84:91
-85:92
-86:93
-87:94
-88:95
-89:96
-90:97
-91:98
-92:99
-93:100
-94:101
-95:102
-96:103
-97:104
-98:105
-99:106
-100:107
-101:108
-102:109
-103:110
-105:112
-106:113
-107:114
-108:115
-109:116
-110:117
-114:123
-114:124
-114:125
-114:133
-114:206
-114:207
-114:208
-114:209
-114:210
-114:211
-114:212
-114:213
-114:214
-114:215
-114:216
-115:129
-116:130
-119:135
-119:136
-119:137
-119:138
-119:139
-119:140
-119:141
-119:142
-119:143
-119:144
-119:146
-119:147
-119:148
-119:149
-119:197
-119:198
-119:199
-119:200
-119:201
-120:152
-120:153
-120:154
-120:155
-120:156
-120:157
-120:158
-120:159
-120:161
-120:162
-120:163
-120:164
-121:167
-121:168
-121:169
-121:170
-121:171
-121:172
-121:173
-121:174
-121:176
-121:177
-121:178
-121:179
-122:182
-122:183
-122:184
-122:185
-122:186
-122:187
-122:188
-122:189
-122:191
-122:192
-122:193
-122:194
-124:203
-124:204
-127:349
-127:355
-127:375
-127:376
-127:377
-127:378
-127:379
-127:380
-127:381
-127:382
-127:383
-127:384
-127:385
-128:352
-128:353
-128:356
-128:357
-128:358
-128:359
-128:360
-128:361
-128:362
-128:363
-128:364
-128:365
-128:366
-128:367
-129:368
-130:370
-131:371
-132:372
-136:218
-136:228
-136:337
-136:338
-136:339
-136:340
-136:341
-136:342
-136:343
-136:344
-136:345
-136:346
-136:347
-137:224
-138:225
-141:221
-141:229
-141:230
-141:231
-142:222
-142:233
-142:234
-142:235
-142:236
-142:237
-142:238
-142:239
-142:250
-142:251
-142:252
-142:253
-142:254
-144:257
-144:258
-144:259
-144:260
-144:261
-144:262
-144:275
-144:276
-144:277
-144:278
-144:279
-145:264
-145:265
-147:282
-147:283
-147:284
-147:285
-147:286
-147:287
-147:296
-147:297
-147:298
-147:299
-147:300
-148:302
-148:303
-148:304
-148:305
-148:306
-148:307
-148:308
-148:309
-148:310
-148:311
-148:312
-148:314
-148:315
-149:317
-149:318
-149:319
-149:320
-149:321
-149:322
-149:329
-149:330
-149:331
-149:332
-149:333
-150:335
-150:336
-153:446
-153:450
-153:467
-153:468
-153:469
-153:470
-153:471
-153:472
-153:473
-153:474
-153:475
-153:476
-153:477
-154:452
-154:453
-154:454
-154:455
-154:456
-154:457
-154:458
-154:459
-154:460
-154:461
-154:462
-154:464
-154:465
-154:466
-157:512
-157:525
-157:572
-157:573
-157:574
-157:575
-157:576
-157:577
-157:578
-157:579
-157:580
-157:581
-157:582
-158:521
-159:522
-162:515
-162:516
-162:526
-162:527
-162:528
-162:529
-162:530
-162:531
-162:532
-162:565
-162:566
-163:517
-163:518
-163:534
-163:535
-163:536
-163:537
-163:538
-163:539
-163:540
-163:541
-163:542
-163:543
-163:559
-163:560
-163:561
-163:562
-163:563
-164:544
-164:545
-165:519
-165:549
-165:550
-165:551
-165:552
-165:553
-166:554
-166:555
-169:567
-170:569
-174:387
-174:391
-174:395
-174:396
-174:397
-174:398
-174:399
-174:400
-174:401
-174:402
-174:403
-174:404
-174:405
-175:392
-175:393
-175:394
-178:407
-178:411
-178:434
-178:435
-178:436
-178:437
-178:438
-178:439
-178:440
-178:441
-178:442
-178:443
-178:444
-179:412
-179:413
-179:414
-179:415
-179:417
-179:418
-179:419
-179:420
-179:421
-179:422
-179:423
-179:424
-179:425
-179:426
-179:427
-179:429
-179:430
-179:431
-179:432
-179:433
-182:584
-182:592
-182:593
-182:594
-182:638
-182:639
-182:640
-182:641
-182:642
-182:643
-182:644
-182:645
-182:646
-182:647
-182:648
-182:649
-182:650
-182:651
-182:652
-182:653
-183:587
-183:595
-183:596
-183:597
-183:598
-183:599
-184:602
-184:603
-184:604
-184:605
-184:606
-184:607
-184:634
-184:635
-185:588
-185:608
-185:609
-185:610
-186:589
-186:612
-186:613
-186:614
-186:615
-186:616
-186:617
-186:618
-186:628
-186:629
-186:630
-186:631
-186:632
-187:590
-187:621
-187:622
-187:623
-187:624
-187:625
-192:479
-192:483
-192:500
-192:501
-192:502
-192:503
-192:504
-192:505
-192:506
-192:507
-192:508
-192:509
-192:510
-193:485
-193:486
-193:487
-193:488
-193:489
-193:490
-193:491
-193:492
-193:493
-193:494
-193:495
-193:497
-193:498
-193:499
-196:655
-196:669
-196:872
-196:873
-196:874
-196:875
-196:876
-196:877
-196:878
-196:879
-196:880
-196:881
-196:882
-197:663
-198:664
-199:665
-200:666
-203:658
-203:670
-203:671
-203:672
-203:673
-203:674
-203:675
-203:676
-203:677
-203:678
-203:870
-203:871
-204:681
-204:682
-204:683
-204:684
-204:685
-204:686
-204:687
-204:688
-204:689
-204:696
-204:697
-204:698
-204:699
-204:700
-205:659
-205:702
-205:703
-205:704
-205:705
-205:707
-205:708
-205:709
-205:710
-205:711
-205:712
-205:713
-205:720
-205:721
-205:722
-205:723
-205:724
-205:726
-205:727
-206:660
-206:728
-206:729
-206:730
-206:731
-206:733
-206:734
-206:735
-206:736
-206:737
-206:738
-206:739
-206:746
-206:747
-206:748
-206:749
-206:750
-206:752
-206:753
-207:755
-207:756
-207:757
-207:758
-207:759
-207:760
-207:769
-207:770
-207:771
-207:772
-207:773
-208:775
-209:777
-210:778
-211:779
-212:780
-213:781
-214:782
-215:783
-216:784
-217:785
-218:786
-219:787
-220:788
-221:789
-222:790
-223:791
-224:792
-225:793
-226:794
-227:795
-228:796
-229:797
-231:801
-231:802
-231:803
-231:804
-231:805
-231:806
-231:814
-231:815
-231:816
-231:817
-231:818
-232:820
-232:821
-232:822
-232:823
-232:824
-232:825
-232:826
-232:827
-232:828
-232:829
-232:830
-232:832
-232:833
-233:834
-233:835
-234:661
-234:837
-234:838
-234:839
-235:841
-235:842
-235:843
-235:844
-235:845
-235:846
-235:847
-235:854
-235:855
-235:856
-235:857
-235:858
-236:860
-236:861
-236:862
-237:863
-238:865
-239:866
-240:867
-245:1219
-245:1220
-245:1225
-245:1408
-245:1409
-245:1410
-245:1411
-245:1412
-245:1413
-245:1414
-245:1415
-245:1416
-245:1417
-245:1418
-245:1419
-246:1223
-246:1226
-246:1227
-246:1228
-246:1229
-246:1230
-246:1231
-246:1232
-246:1234
-246:1235
-246:1236
-246:1237
-246:1238
-246:1239
-246:1240
-246:1241
-246:1242
-246:1249
-246:1250
-246:1251
-246:1252
-246:1253
-246:1255
-246:1256
-246:1257
-246:1258
-246:1259
-246:1260
-246:1262
-246:1263
-246:1264
-246:1265
-246:1266
-246:1267
-246:1268
-246:1277
-246:1278
-246:1279
-246:1280
-246:1281
-246:1284
-246:1285
-246:1286
-246:1287
-246:1288
-246:1289
-246:1290
-246:1298
-246:1299
-246:1300
-246:1301
-246:1302
-246:1304
-246:1305
-246:1306
-246:1307
-246:1308
-246:1309
-246:1310
-246:1311
-246:1312
-246:1313
-246:1314
-246:1315
-246:1317
-246:1318
-246:1406
-246:1407
-247:1319
-247:1320
-247:1321
-247:1322
-247:1324
-247:1325
-247:1326
-247:1327
-247:1328
-247:1329
-247:1330
-247:1337
-247:1338
-247:1339
-247:1340
-247:1341
-247:1344
-247:1345
-247:1346
-247:1347
-247:1348
-247:1349
-247:1350
-247:1351
-247:1352
-247:1353
-247:1354
-247:1355
-247:1356
-247:1357
-247:1358
-247:1359
-247:1360
-247:1361
-247:1362
-247:1363
-247:1365
-247:1366
-247:1367
-247:1368
-247:1370
-247:1371
-247:1372
-247:1373
-247:1375
-247:1376
-247:1377
-247:1378
-247:1379
-247:1380
-247:1381
-247:1382
-248:1384
-248:1385
-248:1386
-248:1387
-248:1388
-248:1389
-248:1390
-248:1397
-248:1398
-248:1399
-248:1400
-248:1401
-249:1403
-249:1404
-249:1405
-253:1025
-253:1026
-253:1027
-253:1034
-253:1050
-253:1051
-253:1052
-253:1053
-253:1054
-253:1055
-253:1056
-253:1057
-253:1058
-253:1059
-253:1060
-254:1030
-254:1031
-254:1032
-254:1035
-254:1036
-254:1037
-254:1038
-254:1039
-254:1040
-254:1041
-254:1042
-254:1043
-254:1044
-254:1045
-254:1046
-254:1047
-254:1048
-257:884
-257:885
-257:892
-257:893
-257:894
-257:923
-257:924
-257:925
-257:926
-257:927
-257:928
-257:929
-257:930
-257:931
-257:932
-257:933
-257:934
-257:935
-257:936
-257:937
-257:938
-257:939
-258:889
-261:895
-261:896
-261:897
-261:898
-261:899
-262:902
-262:903
-262:904
-262:905
-262:906
-263:909
-263:910
-263:911
-263:912
-263:913
-264:916
-264:917
-264:918
-264:919
-264:920
-267:941
-267:942
-267:943
-267:949
-267:1013
-267:1014
-267:1015
-267:1016
-267:1017
-267:1018
-267:1019
-267:1020
-267:1021
-267:1022
-267:1023
-268:950
-268:951
-268:952
-268:953
-268:1011
-268:1012
-269:946
-269:955
-269:956
-269:957
-269:958
-269:959
-269:960
-269:961
-269:976
-269:977
-269:978
-269:979
-269:980
-270:962
-271:964
-272:965
-273:966
-276:947
-276:982
-276:983
-276:984
-276:985
-276:986
-276:987
-276:988
-276:989
-276:1004
-276:1005
-276:1006
-276:1007
-276:1009
-276:1010
-277:990
-278:992
-279:993
-280:994
-281:995
-282:996
-283:997
-284:998
-285:999
-286:1000
-287:1001
-293:1062
-293:1070
-293:1138
-293:1139
-293:1140
-293:1141
-293:1142
-293:1143
-293:1144
-293:1145
-293:1146
-293:1147
-293:1148
-294:1066
-295:1067
-298:1071
-298:1072
-298:1073
-298:1074
-298:1133
-298:1134
-299:1076
-299:1077
-299:1078
-299:1079
-299:1080
-299:1081
-299:1089
-299:1090
-299:1091
-299:1092
-299:1093
-300:1095
-300:1096
-300:1097
-300:1098
-300:1099
-300:1100
-300:1101
-300:1102
-300:1103
-300:1104
-300:1105
-300:1107
-300:1108
-301:1110
-301:1111
-301:1112
-301:1113
-301:1114
-301:1115
-301:1116
-301:1117
-301:1118
-301:1122
-301:1123
-301:1124
-301:1125
-301:1127
-301:1128
-301:1129
-302:1119
-302:1120
-304:1130
-304:1131
-304:1132
-306:1135
-306:1136
-310:1421
-310:1428
-310:1441
-310:1442
-310:1443
-310:1444
-310:1445
-310:1446
-310:1447
-310:1448
-310:1449
-310:1450
-310:1451
-311:1424
-311:1425
-311:1426
-311:1429
-311:1430
-311:1431
-311:1432
-311:1433
-311:1434
-311:1435
-311:1436
-311:1437
-311:1438
-311:1439
-311:1440
-314:1453
-314:1469
-314:1495
-314:1496
-314:1497
-314:1498
-314:1499
-314:1500
-314:1501
-314:1502
-314:1503
-314:1504
-314:1505
-315:1457
-316:1458
-317:1459
-318:1460
-319:1461
-320:1462
-321:1463
-322:1464
-323:1465
-324:1466
-327:1470
-327:1471
-327:1472
-327:1473
-327:1475
-327:1476
-327:1477
-327:1478
-327:1479
-327:1480
-327:1481
-327:1482
-327:1483
-327:1484
-327:1485
-327:1487
-327:1488
-327:1489
-327:1490
-327:1491
-327:1492
-327:1493
-327:1494
-330:1150
-330:1154
-330:1155
-330:1156
-330:1202
-330:1203
-330:1204
-330:1205
-330:1206
-330:1207
-330:1208
-330:1209
-330:1210
-330:1211
-330:1212
-330:1213
-330:1214
-330:1215
-330:1216
-330:1217
-331:1157
-331:1158
-331:1160
-331:1161
-331:1162
-331:1163
-331:1164
-331:1165
-331:1166
-331:1167
-331:1168
-331:1169
-331:1170
-331:1172
-331:1173
-331:1174
-331:1176
-331:1177
-331:1178
-331:1179
-331:1180
-331:1181
-331:1188
-331:1189
-331:1190
-331:1191
-331:1192
-332:1196
-332:1197
-332:1198
-332:1199
-335:1833
-335:1837
-335:1853
-335:1854
-335:1855
-335:1856
-335:1857
-335:1858
-335:1859
-335:1860
-335:1861
-335:1862
-335:1863
-336:1838
-336:1839
-336:1840
-336:1841
-336:1842
-336:1843
-336:1844
-336:1845
-336:1846
-336:1847
-336:1848
-336:1849
-336:1850
-336:1851
-339:1865
-339:1869
-339:1882
-339:1883
-339:1884
-339:1885
-339:1886
-339:1887
-339:1888
-339:1889
-339:1890
-339:1891
-339:1892
-340:1870
-340:1871
-340:1872
-340:1873
-340:1874
-340:1875
-340:1876
-340:1877
-340:1878
-340:1879
-340:1880
-343:1581
-343:1590
-343:1591
-343:1592
-343:1816
-343:1817
-343:1818
-343:1819
-343:1820
-343:1821
-343:1822
-343:1823
-343:1824
-343:1825
-343:1826
-343:1827
-343:1828
-343:1829
-343:1830
-343:1831
-344:1593
-344:1594
-344:1595
-344:1596
-344:1597
-344:1598
-344:1599
-344:1600
-344:1601
-344:1602
-345:1605
-345:1606
-345:1607
-345:1608
-345:1609
-345:1610
-345:1611
-345:1612
-345:1613
-345:1614
-346:1617
-346:1618
-346:1619
-346:1620
-346:1621
-346:1622
-346:1623
-346:1624
-347:1627
-347:1628
-347:1629
-347:1630
-347:1631
-347:1632
-347:1633
-347:1634
-347:1635
-347:1636
-348:1639
-348:1640
-348:1641
-348:1642
-348:1643
-348:1644
-348:1645
-348:1646
-348:1647
-348:1648
-348:1649
-348:1650
-349:1653
-349:1654
-349:1655
-349:1656
-349:1657
-349:1658
-349:1659
-349:1660
-349:1661
-349:1662
-349:1663
-349:1664
-350:1584
-350:1585
-350:1667
-350:1668
-350:1669
-350:1670
-350:1671
-350:1672
-350:1673
-350:1674
-350:1675
-350:1676
-350:1677
-350:1678
-350:1679
-350:1680
-351:1681
-352:1683
-353:1684
-354:1685
-355:1686
-356:1687
-357:1688
-358:1689
-359:1690
-360:1691
-361:1692
-362:1693
-364:1586
-364:1587
-364:1698
-364:1699
-364:1700
-364:1701
-364:1702
-364:1703
-364:1704
-364:1705
-364:1706
-364:1707
-364:1708
-364:1709
-364:1725
-364:1726
-365:1710
-366:1712
-367:1713
-368:1714
-369:1715
-370:1716
-371:1717
-372:1718
-373:1719
-374:1720
-375:1721
-376:1722
-379:1729
-379:1730
-379:1731
-379:1732
-379:1733
-379:1734
-379:1735
-380:1738
-380:1739
-380:1740
-380:1741
-381:1744
-381:1745
-381:1746
-381:1747
-381:1748
-381:1749
-381:1750
-381:1751
-381:1752
-381:1753
-382:1588
-382:1756
-382:1757
-382:1758
-382:1759
-382:1760
-383:1761
-384:1763
-385:1764
-387:1769
-387:1770
-387:1771
-387:1772
-387:1773
-388:1774
-389:1776
-390:1777
-392:1782
-392:1783
-392:1784
-392:1785
-392:1786
-393:1789
-393:1790
-393:1791
-393:1792
-393:1793
-394:1796
-394:1797
-394:1798
-394:1799
-394:1800
-395:1801
-396:1803
-397:1804
-399:1809
-399:1810
-399:1811
-399:1812
-399:1813
-402:2048
-402:2052
-402:2053
-402:2054
-402:2073
-402:2074
-402:2075
-402:2076
-402:2077
-402:2078
-402:2079
-402:2080
-402:2082
-402:2083
-402:2085
-402:2086
-402:2087
-402:2088
-402:2089
-402:2090
-402:2091
-402:2092
-402:2093
-402:2094
-402:2095
-402:2105
-402:2115
-402:2116
-402:2117
-402:2118
-402:2119
-402:2120
-402:2121
-402:2122
-402:2123
-402:2124
-402:2125
-402:2126
-402:2127
-402:2128
-402:2129
-402:2130
-403:2055
-403:2056
-403:2057
-403:2058
-404:2061
-404:2062
-404:2063
-404:2064
-404:2065
-404:2066
-404:2067
-404:2068
-404:2069
-404:2070
-405:2096
-405:2097
-405:2098
-405:2099
-405:2100
-405:2101
-405:2102
-405:2103
-405:2104
-406:2106
-406:2107
-406:2108
-406:2109
-406:2110
-406:2111
-406:2112
-406:2113
-406:2114
-411:2171
-411:2173
-411:2180
-411:2226
-411:2227
-411:2228
-411:2229
-411:2230
-411:2231
-411:2232
-411:2233
-411:2234
-411:2235
-411:2236
-412:2172
-412:2177
-415:2182
-415:2183
-415:2184
-415:2185
-415:2186
-415:2187
-415:2188
-415:2189
-415:2190
-415:2191
-415:2192
-415:2193
-415:2208
-415:2209
-415:2210
-415:2211
-415:2212
-416:2196
-416:2197
-416:2198
-416:2199
-416:2200
-416:2201
-416:2202
-416:2203
-416:2204
-416:2205
-418:2214
-419:2216
-420:2217
-421:2218
-422:2219
-423:2220
-424:2221
-425:2222
-426:2223
-430:2238
-430:2242
-430:2264
-430:2265
-430:2266
-430:2267
-430:2268
-430:2269
-430:2270
-430:2271
-430:2272
-430:2273
-430:2274
-431:2243
-431:2244
-431:2245
-431:2246
-431:2247
-431:2248
-431:2249
-431:2250
-431:2251
-431:2252
-431:2253
-431:2254
-431:2255
-431:2256
-431:2257
-431:2258
-431:2259
-431:2260
-431:2261
-431:2262
-431:2263
-434:2132
-434:2136
-434:2137
-434:2138
-434:2139
-434:2140
-434:2141
-434:2142
-434:2143
-434:2144
-434:2145
-434:2146
-434:2147
-434:2148
-434:2149
-434:2150
-434:2151
-434:2152
-434:2153
-434:2155
-434:2156
-434:2157
-434:2158
-434:2159
-434:2160
-434:2161
-434:2162
-434:2163
-434:2164
-434:2165
-434:2166
-434:2167
-434:2168
-434:2169
-437:1894
-437:1904
-437:1905
-437:1906
-437:2031
-437:2032
-437:2033
-437:2034
-437:2035
-437:2036
-437:2037
-437:2038
-437:2039
-437:2040
-437:2041
-437:2042
-437:2043
-437:2044
-437:2045
-437:2046
-438:1897
-438:1898
-438:1907
-438:1908
-438:1909
-438:1910
-438:1911
-438:1912
-438:1914
-438:1915
-438:1916
-438:1917
-438:1918
-438:1919
-438:1920
-438:1927
-438:1928
-438:1929
-438:1930
-438:1931
-438:1933
-438:1934
-439:1935
-440:1937
-441:1938
-442:1939
-443:1940
-444:1941
-446:1899
-446:1900
-446:1946
-446:1947
-446:1948
-446:1949
-446:1950
-446:1951
-446:1953
-446:1954
-446:1955
-446:1956
-446:1957
-446:1958
-446:1959
-446:1966
-446:1967
-446:1968
-446:1969
-446:1970
-446:1972
-446:1973
-447:1974
-448:1976
-449:1977
-450:1978
-451:1979
-452:1980
-453:1981
-454:1982
-455:1983
-456:1984
-457:1985
-459:1901
-459:1990
-459:1991
-459:1992
-459:1993
-459:1994
-460:1995
-461:1997
-462:1998
-463:1999
-464:2000
-465:2001
-466:2002
-468:1902
-468:2007
-468:2008
-468:2009
-468:2010
-468:2011
-469:2012
-470:2014
-471:2015
-472:2016
-473:2017
-474:2018
-475:2019
-477:2024
-477:2025
-477:2026
-477:2027
-477:2028
-480:2276
-480:2280
-480:2281
-480:2282
-480:2297
-480:2298
-480:2299
-480:2300
-480:2301
-480:2302
-480:2303
-480:2304
-480:2305
-480:2306
-480:2307
-480:2308
-480:2309
-480:2310
-480:2311
-480:2312
-481:2283
-481:2284
-481:2285
-481:2286
-481:2287
-482:2290
-482:2291
-482:2292
-482:2293
-482:2294
-485:1507
-485:1518
-485:1569
-485:1570
-485:1571
-485:1572
-485:1573
-485:1574
-485:1575
-485:1576
-485:1577
-485:1578
-485:1579
-486:1512
-487:1513
-488:1514
-489:1515
-492:1519
-492:1520
-492:1521
-492:1562
-492:1563
-492:1564
-492:1565
-492:1567
-492:1568
-493:1510
-493:1522
-493:1523
-493:1524
-493:1525
-493:1526
-493:1527
-493:1529
-493:1530
-493:1531
-493:1532
-493:1533
-493:1534
-493:1535
-493:1544
-493:1545
-493:1546
-493:1547
-493:1548
-493:1550
-493:1551
-493:1552
-493:1553
-494:1554
-495:1556
-496:1557
-497:1558
-498:1559
-504:2397
-504:2409
-504:2431
-504:2432
-504:2433
-504:2434
-504:2435
-504:2436
-504:2437
-504:2438
-504:2439
-504:2440
-504:2441
-505:2401
-506:2402
-507:2403
-508:2404
-509:2405
-510:2406
-513:2410
-513:2411
-513:2412
-513:2413
-513:2414
-513:2415
-513:2416
-513:2417
-513:2418
-513:2419
-513:2420
-515:2421
-516:2423
-517:2424
-518:2425
-519:2426
-520:2427
-521:2428
-525:2314
-525:2319
-525:2376
-525:2380
-525:2381
-525:2382
-525:2383
-525:2385
-525:2386
-525:2387
-525:2388
-525:2389
-525:2390
-525:2391
-525:2392
-525:2393
-525:2394
-525:2395
-527:2317
-527:2320
-527:2321
-527:2322
-527:2323
-527:2324
-527:2325
-527:2327
-527:2328
-527:2329
-527:2330
-527:2331
-527:2332
-527:2333
-527:2334
-527:2335
-527:2336
-527:2337
-527:2338
-527:2339
-527:2341
-527:2342
-527:2343
-527:2344
-527:2345
-527:2346
-527:2347
-527:2348
-527:2349
-527:2350
-527:2351
-527:2353
-527:2354
-527:2355
-527:2358
-527:2359
-527:2360
-527:2361
-527:2362
-527:2365
-527:2366
-527:2367
-527:2368
-527:2369
-527:2371
-527:2372
-527:2373
-527:2374
-527:2375
-528:2377
-528:2378
-528:2379
-531:2443
-531:2447
-531:2448
-531:2449
-531:2475
-531:2476
-531:2477
-531:2478
-531:2479
-531:2480
-531:2481
-531:2482
-531:2483
-531:2484
-531:2485
-531:2486
-531:2487
-531:2488
-531:2489
-531:2490
-532:2450
-532:2451
-532:2452
-532:2453
-532:2454
-532:2455
-532:2456
-532:2457
-532:2458
-533:2461
-533:2462
-533:2463
-533:2464
-533:2465
-533:2466
-534:2469
-534:2470
-534:2471
-534:2472
-537:2794
-537:2798
-537:2799
-537:2800
-537:2837
-537:2838
-537:2839
-537:2840
-537:2841
-537:2842
-537:2843
-537:2844
-537:2845
-537:2846
-537:2847
-537:2848
-537:2849
-537:2850
-537:2851
-537:2852
-538:2801
-538:2802
-538:2803
-538:2804
-538:2805
-538:2806
-538:2807
-538:2808
-538:2809
-538:2810
-539:2813
-539:2814
-539:2815
-539:2816
-539:2817
-539:2818
-539:2819
-539:2820
-539:2821
-539:2822
-540:2825
-540:2826
-540:2827
-540:2828
-540:2829
-540:2830
-540:2831
-540:2832
-540:2833
-540:2834
-543:2854
-543:2858
-543:2881
-543:2882
-543:2883
-543:2884
-543:2885
-543:2886
-543:2887
-543:2888
-543:2889
-543:2890
-543:2891
-544:2859
-544:2860
-544:2861
-544:2862
-544:2863
-544:2864
-544:2865
-544:2866
-544:2867
-544:2868
-544:2869
-544:2870
-544:2871
-544:2872
-544:2873
-544:2874
-544:2875
-544:2877
-544:2878
-544:2879
-544:2880
-547:2654
-547:2680
-547:2681
-547:2682
-547:2777
-547:2778
-547:2779
-547:2780
-547:2781
-547:2782
-547:2783
-547:2784
-547:2785
-547:2786
-547:2787
-547:2788
-547:2789
-547:2790
-547:2791
-547:2792
-548:2659
-549:2660
-550:2661
-551:2662
-552:2663
-553:2664
-554:2665
-555:2666
-556:2667
-557:2668
-558:2669
-559:2670
-560:2671
-561:2672
-562:2673
-563:2674
-564:2675
-565:2676
-566:2677
-569:2683
-569:2684
-569:2685
-569:2686
-569:2687
-570:2657
-570:2690
-570:2691
-570:2692
-570:2693
-570:2695
-570:2696
-570:2697
-570:2698
-570:2699
-570:2700
-570:2701
-570:2702
-570:2704
-570:2705
-570:2706
-570:2707
-570:2708
-570:2709
-570:2710
-570:2717
-570:2718
-570:2719
-570:2720
-570:2721
-570:2723
-570:2724
-570:2727
-570:2728
-570:2729
-570:2730
-570:2731
-570:2734
-570:2735
-570:2736
-570:2737
-570:2738
-570:2741
-570:2742
-570:2743
-570:2744
-570:2745
-571:2747
-572:2749
-573:2750
-574:2751
-575:2752
-578:2757
-578:2758
-578:2759
-578:2760
-578:2761
-580:2764
-580:2765
-580:2766
-580:2767
-580:2768
-581:2769
-582:2771
-583:2772
-587:2492
-587:2501
-587:2502
-587:2503
-587:2637
-587:2638
-587:2639
-587:2640
-587:2641
-587:2642
-587:2643
-587:2644
-587:2645
-587:2646
-587:2647
-587:2648
-587:2649
-587:2650
-587:2651
-587:2652
-588:2504
-588:2505
-588:2506
-588:2507
-588:2508
-588:2509
-588:2510
-588:2511
-588:2512
-588:2513
-588:2514
-588:2515
-588:2516
-588:2517
-589:2495
-589:2496
-589:2520
-589:2521
-589:2522
-589:2523
-589:2524
-589:2525
-589:2527
-589:2528
-589:2529
-589:2530
-589:2531
-589:2532
-589:2533
-589:2536
-589:2537
-589:2538
-589:2539
-589:2540
-589:2543
-589:2544
-589:2545
-589:2546
-589:2547
-589:2620
-589:2621
-590:2549
-590:2550
-590:2551
-590:2552
-590:2583
-590:2584
-591:2497
-591:2498
-591:2553
-591:2554
-591:2555
-591:2556
-591:2557
-591:2558
-591:2559
-591:2560
-591:2561
-591:2562
-591:2563
-591:2564
-591:2565
-591:2566
-591:2567
-591:2568
-591:2569
-591:2576
-591:2577
-591:2578
-591:2579
-591:2581
-591:2582
-592:2570
-593:2572
-594:2573
-598:2585
-599:2587
-600:2588
-601:2589
-602:2590
-605:2594
-605:2595
-605:2596
-605:2597
-605:2598
-605:2599
-605:2600
-605:2614
-605:2615
-605:2616
-605:2617
-605:2618
-606:2603
-606:2604
-606:2605
-606:2606
-606:2607
-610:2499
-610:2624
-610:2625
-610:2626
-610:2627
-610:2628
-611:2629
-612:2631
-613:2632
-*E
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.java b/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.java
deleted file mode 100644
index e4b12fa..0000000
--- a/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.java
+++ /dev/null
@@ -1,130 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "define.g" -> "DefineGrammarItemsWalker.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-	package org.antlr.tool;
-	import java.util.*;
-	import org.antlr.misc.*;
-
-public interface DefineGrammarItemsWalkerTokenTypes {
-	int EOF = 1;
-	int NULL_TREE_LOOKAHEAD = 3;
-	int OPTIONS = 4;
-	int TOKENS = 5;
-	int PARSER = 6;
-	int LEXER = 7;
-	int RULE = 8;
-	int BLOCK = 9;
-	int OPTIONAL = 10;
-	int CLOSURE = 11;
-	int POSITIVE_CLOSURE = 12;
-	int SYNPRED = 13;
-	int RANGE = 14;
-	int CHAR_RANGE = 15;
-	int EPSILON = 16;
-	int ALT = 17;
-	int EOR = 18;
-	int EOB = 19;
-	int EOA = 20;
-	int ID = 21;
-	int ARG = 22;
-	int ARGLIST = 23;
-	int RET = 24;
-	int LEXER_GRAMMAR = 25;
-	int PARSER_GRAMMAR = 26;
-	int TREE_GRAMMAR = 27;
-	int COMBINED_GRAMMAR = 28;
-	int INITACTION = 29;
-	int LABEL = 30;
-	int TEMPLATE = 31;
-	int SCOPE = 32;
-	int GATED_SEMPRED = 33;
-	int SYN_SEMPRED = 34;
-	int BACKTRACK_SEMPRED = 35;
-	int FRAGMENT = 36;
-	int ACTION = 37;
-	int DOC_COMMENT = 38;
-	int SEMI = 39;
-	int LITERAL_lexer = 40;
-	int LITERAL_tree = 41;
-	int LITERAL_grammar = 42;
-	int AMPERSAND = 43;
-	int COLON = 44;
-	int RCURLY = 45;
-	int ASSIGN = 46;
-	int STRING_LITERAL = 47;
-	int CHAR_LITERAL = 48;
-	int INT = 49;
-	int STAR = 50;
-	int TOKEN_REF = 51;
-	int LITERAL_protected = 52;
-	int LITERAL_public = 53;
-	int LITERAL_private = 54;
-	int BANG = 55;
-	int ARG_ACTION = 56;
-	int LITERAL_returns = 57;
-	int LITERAL_throws = 58;
-	int COMMA = 59;
-	int LPAREN = 60;
-	int OR = 61;
-	int RPAREN = 62;
-	int LITERAL_catch = 63;
-	int LITERAL_finally = 64;
-	int PLUS_ASSIGN = 65;
-	int SEMPRED = 66;
-	int IMPLIES = 67;
-	int ROOT = 68;
-	int RULE_REF = 69;
-	int NOT = 70;
-	int TREE_BEGIN = 71;
-	int QUESTION = 72;
-	int PLUS = 73;
-	int WILDCARD = 74;
-	int REWRITE = 75;
-	int DOLLAR = 76;
-	int DOUBLE_QUOTE_STRING_LITERAL = 77;
-	int DOUBLE_ANGLE_STRING_LITERAL = 78;
-	int WS = 79;
-	int COMMENT = 80;
-	int SL_COMMENT = 81;
-	int ML_COMMENT = 82;
-	int OPEN_ELEMENT_OPTION = 83;
-	int CLOSE_ELEMENT_OPTION = 84;
-	int ESC = 85;
-	int DIGIT = 86;
-	int XDIGIT = 87;
-	int NESTED_ARG_ACTION = 88;
-	int NESTED_ACTION = 89;
-	int ACTION_CHAR_LITERAL = 90;
-	int ACTION_STRING_LITERAL = 91;
-	int ACTION_ESC = 92;
-	int WS_LOOP = 93;
-	int INTERNAL_RULE_REF = 94;
-	int WS_OPT = 95;
-	int SRC = 96;
-}
diff --git a/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.txt b/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.txt
deleted file mode 100644
index 02b9fdc..0000000
--- a/src/org/antlr/tool/DefineGrammarItemsWalkerTokenTypes.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): define.g -> DefineGrammarItemsWalkerTokenTypes.txt$
-DefineGrammarItemsWalker    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
diff --git a/src/org/antlr/tool/RandomPhrase.java b/src/org/antlr/tool/RandomPhrase.java
deleted file mode 100644
index 80b69e6..0000000
--- a/src/org/antlr/tool/RandomPhrase.java
+++ /dev/null
@@ -1,180 +0,0 @@
-package org.antlr.tool;
-
-import org.antlr.analysis.NFAState;
-import org.antlr.analysis.RuleClosureTransition;
-import org.antlr.analysis.Transition;
-import org.antlr.analysis.Label;
-import org.antlr.misc.IntSet;
-import org.antlr.misc.Utils;
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Stack;
-import java.util.Random;
-
-/** Generate a random phrase given a grammar.
- *  Usage:
- *     java org.antlr.tool.RandomPhrase grammarFile.g startRule [seed]
- *
- *  For example:
- *     java org.antlr.tool.RandomPhrase simple.g program 342
- *
- *  The seed acts like a unique identifier so you can get the same random
- *  phrase back during unit testing, for example.
- *
- *  If you do not specify a seed then the current time in milliseconds is used
- *  guaranteeing that you'll never see that seed again.
- */
-public class RandomPhrase {
-	protected static Random random;
-
-	/** an experimental method to generate random phrases for a given
-	 *  grammar given a start rule.  Return a list of token types.
-	 */
-	protected static void randomPhrase(Grammar g, List tokenTypes, String startRule) {
-		NFAState state = g.getRuleStartState(startRule);
-		NFAState stopState = g.getRuleStopState(startRule);
-
-		Stack ruleInvocationStack = new Stack();
-		while ( true ) {
-			if ( state==stopState && ruleInvocationStack.size()==0 ) {
-				break;
-			}
-			//System.out.println("state "+state);
-			if ( state.getNumberOfTransitions()==0 ) {
-				//System.out.println("dangling state: "+state);
-				return;
-			}
-			// end of rule node
-			if ( state.isAcceptState() ) {
-				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
-				// System.out.println("pop invoking state "+invokingState);
-				RuleClosureTransition invokingTransition =
-					(RuleClosureTransition)invokingState.transition(0);
-				// move to node after state that invoked this rule
-				state = invokingTransition.getFollowState();
-				continue;
-			}
-			if ( state.getNumberOfTransitions()==1 ) {
-				// no branching, just take this path
-				Transition t0 = state.transition(0);
-				if ( t0 instanceof RuleClosureTransition ) {
-					ruleInvocationStack.push(state);
-					// System.out.println("push state "+state);
-					int ruleIndex = ((RuleClosureTransition)t0).getRuleIndex();
-					//System.out.println("invoke "+g.getRuleName(ruleIndex));
-				}
-				else if ( !t0.label.isEpsilon() ) {
-					tokenTypes.add( getTokenType(t0.label) );
-					//System.out.println(t0.label.toString(g));
-				}
-				state = (NFAState)t0.target;
-				continue;
-			}
-
-			int decisionNumber = state.getDecisionNumber();
-			if ( decisionNumber==0 ) {
-				System.out.println("weird: no decision number but a choice node");
-				continue;
-			}
-			// decision point, pick ith alternative randomly
-			int n = g.getNumberOfAltsForDecisionNFA(state);
-			int randomAlt = random.nextInt(n) + 1;
-			//System.out.println("randomAlt="+randomAlt);
-			NFAState altStartState =
-				g.getNFAStateForAltOfDecision(state, randomAlt);
-			Transition t = altStartState.transition(0);
-			/*
-			start of a decision could never be a labeled transition
-			if ( !t.label.isEpsilon() ) {
-				tokenTypes.add( getTokenType(t.label) );
-			}
-			*/
-			state = (NFAState)t.target;
-		}
-	}
-
-	protected static Integer getTokenType(Label label) {
-		if ( label.isSet() ) {
-			// pick random element of set
-			IntSet typeSet = label.getSet();
-			List typeList = typeSet.toList();
-			int randomIndex = random.nextInt(typeList.size());
-			return (Integer)typeList.get(randomIndex);
-		}
-		else {
-			return Utils.integer(label.getAtom());
-		}
-		//System.out.println(t0.label.toString(g));
-	}
-
-	/** Used to generate random strings */
-	public static void main(String[] args) throws Exception {
-		String grammarFileName = args[0];
-		String startRule = args[1];
-		long seed = System.currentTimeMillis(); // use random seed unless spec.
-		if ( args.length==3 ) {
-			String seedStr = args[2];
-			seed = Integer.parseInt(seedStr);
-		}
-		random = new Random(seed);
-
-		Grammar parser =
-			new Grammar(null,
-						grammarFileName,
-						new BufferedReader(new FileReader(grammarFileName)));
-		parser.createNFAs();
-
-		List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
-		if ( leftRecursiveRules.size()>0 ) {
-			return;
-		}
-
-		if ( parser.getRule(startRule)==null ) {
-			System.out.println("undefined start rule "+startRule);
-			return;
-		}
-
-		String lexerGrammarText = parser.getLexerGrammar();
-		Grammar lexer = new Grammar();
-		lexer.importTokenVocabulary(parser);
-		if ( lexerGrammarText!=null ) {
-			lexer.setGrammarContent(lexerGrammarText);
-		}
-		else {
-			System.err.println("no lexer grammar found in "+grammarFileName);
-		}
-		lexer.createNFAs();
-		leftRecursiveRules = lexer.checkAllRulesForLeftRecursion();
-		if ( leftRecursiveRules.size()>0 ) {
-			return;
-		}
-
-		List tokenTypes = new ArrayList(100);
-		randomPhrase(parser, tokenTypes, startRule);
-		//System.out.println("token types="+tokenTypes);
-		for (int i = 0; i < tokenTypes.size(); i++) {
-			Integer ttypeI = (Integer) tokenTypes.get(i);
-			int ttype = ttypeI.intValue();
-			String ttypeDisplayName = parser.getTokenDisplayName(ttype);
-			if ( Character.isUpperCase(ttypeDisplayName.charAt(0)) ) {
-				List charsInToken = new ArrayList(10);
-				randomPhrase(lexer, charsInToken, ttypeDisplayName);
-				System.out.print(" ");
-				for (int j = 0; j < charsInToken.size(); j++) {
-					java.lang.Integer cI = (java.lang.Integer) charsInToken.get(j);
-					System.out.print((char)cI.intValue());
-				}
-			}
-			else { // it's a literal
-				String literal =
-					ttypeDisplayName.substring(1,ttypeDisplayName.length()-1);
-				System.out.print(" "+literal);
-			}
-		}
-		System.out.println();
-	}
-
-}
diff --git a/src/org/antlr/tool/TreeToNFAConverter.java b/src/org/antlr/tool/TreeToNFAConverter.java
deleted file mode 100644
index e07e8c2..0000000
--- a/src/org/antlr/tool/TreeToNFAConverter.java
+++ /dev/null
@@ -1,2852 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "buildnfa.g" -> "TreeToNFAConverter.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.tool;
-import java.util.*;
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-
-import antlr.TreeParser;
-import antlr.Token;
-import antlr.collections.AST;
-import antlr.RecognitionException;
-import antlr.ANTLRException;
-import antlr.NoViableAltException;
-import antlr.MismatchedTokenException;
-import antlr.SemanticException;
-import antlr.collections.impl.BitSet;
-import antlr.ASTPair;
-import antlr.collections.impl.ASTArray;
-
-
-/** Build an NFA from a tree representing an ANTLR grammar. */
-public class TreeToNFAConverter extends antlr.TreeParser       implements TreeToNFAConverterTokenTypes
- {
-
-/** Factory used to create nodes and submachines */
-protected NFAFactory factory = null;
-
-/** Which NFA object are we filling in? */
-protected NFA nfa = null;
-
-/** Which grammar are we converting an NFA for? */
-protected Grammar grammar = null;
-
-protected String currentRuleName = null;
-
-protected int outerAltNum = 0;
-protected int blockLevel = 0;
-
-public TreeToNFAConverter(Grammar g, NFA nfa, NFAFactory factory) {
-	this();
-	this.grammar = g;
-	this.nfa = nfa;
-	this.factory = factory;
-}
-
-protected void init() {
-    // define all the rule begin/end NFAStates to solve forward reference issues
-    Collection rules = grammar.getRules();
-    for (Iterator itr = rules.iterator(); itr.hasNext();) {
-		Rule r = (Rule) itr.next();
-        String ruleName = r.name;
-        NFAState ruleBeginState = factory.newState();
-        ruleBeginState.setDescription("rule "+ruleName+" start");
-		ruleBeginState.setEnclosingRuleName(ruleName);
-        grammar.setRuleStartState(ruleName, ruleBeginState);
-        NFAState ruleEndState = factory.newState();
-        ruleEndState.setDescription("rule "+ruleName+" end");
-        ruleEndState.setAcceptState(true);
-		ruleEndState.setEnclosingRuleName(ruleName);
-        grammar.setRuleStopState(ruleName, ruleEndState);
-    }
-}
-
-protected void addFollowTransition(String ruleName, NFAState following) {
-     //System.out.println("adding follow link to rule "+ruleName);
-     // find last link in FOLLOW chain emanating from rule
-     NFAState end = grammar.getRuleStopState(ruleName);
-     while ( end.transition(1)!=null ) {
-         end = (NFAState)end.transition(1).target;
-     }
-     if ( end.transition(0)!=null ) {
-         // already points to a following node
-         // gotta add another node to keep edges to a max of 2
-         NFAState n = factory.newState();
-         Transition e = new Transition(Label.EPSILON, n);
-         end.addTransition(e);
-         end = n;
-     }
-     Transition followEdge = new Transition(Label.EPSILON, following);
-     end.addTransition(followEdge);
-}
-
-protected void finish() {
-    List rules = new LinkedList();
-    rules.addAll(grammar.getRules());
-    int numEntryPoints = factory.build_EOFStates(rules);
-    if ( numEntryPoints==0 ) {
-        ErrorManager.grammarWarning(ErrorManager.MSG_NO_GRAMMAR_START_RULE,
-                                   grammar,
-                                   null,
-                                   grammar.name);
-    }
-}
-
-    public void reportError(RecognitionException ex) {
-		Token token = null;
-		if ( ex instanceof MismatchedTokenException ) {
-			token = ((MismatchedTokenException)ex).token;
-		}
-		else if ( ex instanceof NoViableAltException ) {
-			token = ((NoViableAltException)ex).token;
-		}
-        ErrorManager.syntaxError(
-            ErrorManager.MSG_SYNTAX_ERROR,
-            grammar,
-            token,
-            "buildnfa: "+ex.toString(),
-            ex);
-    }
-public TreeToNFAConverter() {
-	tokenNames = _tokenNames;
-}
-
-	public final void grammar(AST _t) throws RecognitionException {
-		
-		GrammarAST grammar_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			init();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LEXER_GRAMMAR:
-			{
-				AST __t3 = _t;
-				GrammarAST tmp1_AST_in = (GrammarAST)_t;
-				match(_t,LEXER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t3;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PARSER_GRAMMAR:
-			{
-				AST __t4 = _t;
-				GrammarAST tmp2_AST_in = (GrammarAST)_t;
-				match(_t,PARSER_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t4;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TREE_GRAMMAR:
-			{
-				AST __t5 = _t;
-				GrammarAST tmp3_AST_in = (GrammarAST)_t;
-				match(_t,TREE_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t5;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case COMBINED_GRAMMAR:
-			{
-				AST __t6 = _t;
-				GrammarAST tmp4_AST_in = (GrammarAST)_t;
-				match(_t,COMBINED_GRAMMAR);
-				_t = _t.getFirstChild();
-				grammarSpec(_t);
-				_t = _retTree;
-				_t = __t6;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			finish();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void grammarSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST grammarSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST cmt = null;
-		
-		try {      // for error handling
-			GrammarAST tmp5_AST_in = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case DOC_COMMENT:
-			{
-				cmt = (GrammarAST)_t;
-				match(_t,DOC_COMMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case OPTIONS:
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				AST __t12 = _t;
-				GrammarAST tmp6_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getFirstChild();
-				GrammarAST tmp7_AST_in = (GrammarAST)_t;
-				if ( _t==null ) throw new MismatchedTokenException();
-				_t = _t.getNextSibling();
-				_t = __t12;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TOKENS:
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case TOKENS:
-			{
-				AST __t14 = _t;
-				GrammarAST tmp8_AST_in = (GrammarAST)_t;
-				match(_t,TOKENS);
-				_t = _t.getFirstChild();
-				GrammarAST tmp9_AST_in = (GrammarAST)_t;
-				if ( _t==null ) throw new MismatchedTokenException();
-				_t = _t.getNextSibling();
-				_t = __t14;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RULE:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop16:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==SCOPE)) {
-					attrScope(_t);
-					_t = _retTree;
-				}
-				else {
-					break _loop16;
-				}
-				
-			} while (true);
-			}
-			{
-			_loop18:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp10_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop18;
-				}
-				
-			} while (true);
-			}
-			rules(_t);
-			_t = _retTree;
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void attrScope(AST _t) throws RecognitionException {
-		
-		GrammarAST attrScope_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t8 = _t;
-			GrammarAST tmp11_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			GrammarAST tmp12_AST_in = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			GrammarAST tmp13_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t8;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rules(AST _t) throws RecognitionException {
-		
-		GrammarAST rules_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			int _cnt21=0;
-			_loop21:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==RULE)) {
-					rule(_t);
-					_t = _retTree;
-				}
-				else {
-					if ( _cnt21>=1 ) { break _loop21; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt21++;
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void rule(AST _t) throws RecognitionException {
-		
-		GrammarAST rule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		
-		StateCluster g=null;
-		StateCluster b = null;
-		String r=null;
-		
-		
-		try {      // for error handling
-			AST __t23 = _t;
-			GrammarAST tmp14_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			r=id.getText();
-			currentRuleName = r; factory.currentRuleName = r;
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			GrammarAST tmp15_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				GrammarAST tmp16_AST_in = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RET:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			}
-			{
-			GrammarAST tmp17_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ARG_ACTION:
-			{
-				GrammarAST tmp18_AST_in = (GrammarAST)_t;
-				match(_t,ARG_ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case OPTIONS:
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp19_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop32:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp20_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop32;
-				}
-				
-			} while (true);
-			}
-			GrammarAST blk = (GrammarAST)_t;
-			b=block(_t);
-			_t = _retTree;
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp21_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			
-			if ( blk.setValue!=null ) {
-			// if block comes back as a set not BLOCK, make it
-			// a single ALT block
-			b = factory.build_AlternativeBlockFromSet(b);
-			}
-							if ( Character.isLowerCase(r.charAt(0)) ||
-								 grammar.type==Grammar.LEXER )
-							{
-								// attach start node to block for this rule
-								NFAState start = grammar.getRuleStartState(r);
-								start.setAssociatedASTNode(id);
-								start.addTransition(new Transition(Label.EPSILON, b.left));
-			
-								// track decision if > 1 alts
-								if ( grammar.getNumberOfAltsForDecisionNFA(b.left)>1 ) {
-									b.left.setDescription(grammar.grammarTreeToString(rule_AST_in,false));
-									b.left.setDecisionASTNode(blk);
-									int d = grammar.assignDecisionNumber( b.left );
-									grammar.setDecisionNFA( d, b.left );
-				grammar.setDecisionBlockAST(d, blk);
-								}
-			
-								// hook to end of rule node
-								NFAState end = grammar.getRuleStopState(r);
-								b.right.addTransition(new Transition(Label.EPSILON,end));
-							}
-			
-			_t = __t23;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void modifier(AST _t) throws RecognitionException {
-		
-		GrammarAST modifier_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_protected:
-			{
-				GrammarAST tmp22_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_protected);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_public:
-			{
-				GrammarAST tmp23_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_public);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case LITERAL_private:
-			{
-				GrammarAST tmp24_AST_in = (GrammarAST)_t;
-				match(_t,LITERAL_private);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case FRAGMENT:
-			{
-				GrammarAST tmp25_AST_in = (GrammarAST)_t;
-				match(_t,FRAGMENT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void ruleScopeSpec(AST _t) throws RecognitionException {
-		
-		GrammarAST ruleScopeSpec_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t36 = _t;
-			GrammarAST tmp26_AST_in = (GrammarAST)_t;
-			match(_t,SCOPE);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ACTION:
-			{
-				GrammarAST tmp27_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case 3:
-			case ID:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop39:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ID)) {
-					GrammarAST tmp28_AST_in = (GrammarAST)_t;
-					match(_t,ID);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop39;
-				}
-				
-			} while (true);
-			}
-			_t = __t36;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StateCluster  block(AST _t) throws RecognitionException {
-		StateCluster g = null;
-		
-		GrammarAST block_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StateCluster a = null;
-		List alts = new LinkedList();
-		this.blockLevel++;
-		if ( this.blockLevel==1 ) {this.outerAltNum=1;}
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			if (((_t.getType()==BLOCK))&&(grammar.isValidSet(this,block_AST_in) &&
-		 !currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME))) {
-				g=set(_t);
-				_t = _retTree;
-				this.blockLevel--;
-			}
-			else if ((_t.getType()==BLOCK)) {
-				AST __t41 = _t;
-				GrammarAST tmp29_AST_in = (GrammarAST)_t;
-				match(_t,BLOCK);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case OPTIONS:
-				{
-					GrammarAST tmp30_AST_in = (GrammarAST)_t;
-					match(_t,OPTIONS);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case ALT:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				{
-				int _cnt44=0;
-				_loop44:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==ALT)) {
-						a=alternative(_t);
-						_t = _retTree;
-						rewrite(_t);
-						_t = _retTree;
-						
-						alts.add(a);
-						if ( this.blockLevel==1 ) {this.outerAltNum++;}
-						
-					}
-					else {
-						if ( _cnt44>=1 ) { break _loop44; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt44++;
-				} while (true);
-				}
-				GrammarAST tmp31_AST_in = (GrammarAST)_t;
-				match(_t,EOB);
-				_t = _t.getNextSibling();
-				_t = __t41;
-				_t = _t.getNextSibling();
-				g = factory.build_AlternativeBlock(alts);
-				this.blockLevel--;
-			}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final void exceptionGroup(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionGroup_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			{
-				{
-				int _cnt51=0;
-				_loop51:
-				do {
-					if (_t==null) _t=ASTNULL;
-					if ((_t.getType()==LITERAL_catch)) {
-						exceptionHandler(_t);
-						_t = _retTree;
-					}
-					else {
-						if ( _cnt51>=1 ) { break _loop51; } else {throw new NoViableAltException(_t);}
-					}
-					
-					_cnt51++;
-				} while (true);
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case LITERAL_finally:
-				{
-					finallyClause(_t);
-					_t = _retTree;
-					break;
-				}
-				case EOR:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				break;
-			}
-			case LITERAL_finally:
-			{
-				finallyClause(_t);
-				_t = _retTree;
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StateCluster  set(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST set_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST b = null;
-		
-		IntSet elements=new IntervalSet();
-		set_AST_in.setSetValue(elements); // track set for use by code gen
-		
-		
-		try {      // for error handling
-			AST __t99 = _t;
-			b = _t==ASTNULL ? null :(GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			int _cnt103=0;
-			_loop103:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					AST __t101 = _t;
-					GrammarAST tmp32_AST_in = (GrammarAST)_t;
-					match(_t,ALT);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BACKTRACK_SEMPRED:
-					{
-						GrammarAST tmp33_AST_in = (GrammarAST)_t;
-						match(_t,BACKTRACK_SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case BLOCK:
-					case CHAR_RANGE:
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case TOKEN_REF:
-					case NOT:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					setElement(_t,elements);
-					_t = _retTree;
-					GrammarAST tmp34_AST_in = (GrammarAST)_t;
-					match(_t,EOA);
-					_t = _t.getNextSibling();
-					_t = __t101;
-					_t = _t.getNextSibling();
-				}
-				else {
-					if ( _cnt103>=1 ) { break _loop103; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt103++;
-			} while (true);
-			}
-			GrammarAST tmp35_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t99;
-			_t = _t.getNextSibling();
-			
-			g = factory.build_Set(elements);
-			b.followingNFAState = g.right;
-			b.setValue = elements; // track set value of this block
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final StateCluster  alternative(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST alternative_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StateCluster e = null;
-		
-		
-		try {      // for error handling
-			AST __t46 = _t;
-			GrammarAST tmp36_AST_in = (GrammarAST)_t;
-			match(_t,ALT);
-			_t = _t.getFirstChild();
-			{
-			int _cnt48=0;
-			_loop48:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_tokenSet_0.member(_t.getType()))) {
-					e=element(_t);
-					_t = _retTree;
-					g = factory.build_AB(g,e);
-				}
-				else {
-					if ( _cnt48>=1 ) { break _loop48; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt48++;
-			} while (true);
-			}
-			_t = __t46;
-			_t = _t.getNextSibling();
-			
-			if (g==null) { // if alt was a list of actions or whatever
-			g = factory.build_Epsilon();
-			}
-			else {
-				factory.optimizeAlternative(g);
-			}
-			
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final void rewrite(AST _t) throws RecognitionException {
-		
-		GrammarAST rewrite_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			{
-			_loop62:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==REWRITE)) {
-					
-								if ( grammar.getOption("output")==null ) {
-									ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
-															  grammar, rewrite_AST_in.token, currentRuleName);
-								}
-								
-					AST __t59 = _t;
-					GrammarAST tmp37_AST_in = (GrammarAST)_t;
-					match(_t,REWRITE);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case SEMPRED:
-					{
-						GrammarAST tmp38_AST_in = (GrammarAST)_t;
-						match(_t,SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case ALT:
-					case TEMPLATE:
-					case ACTION:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case ALT:
-					{
-						GrammarAST tmp39_AST_in = (GrammarAST)_t;
-						match(_t,ALT);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case TEMPLATE:
-					{
-						GrammarAST tmp40_AST_in = (GrammarAST)_t;
-						match(_t,TEMPLATE);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case ACTION:
-					{
-						GrammarAST tmp41_AST_in = (GrammarAST)_t;
-						match(_t,ACTION);
-						_t = _t.getNextSibling();
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					_t = __t59;
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop62;
-				}
-				
-			} while (true);
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StateCluster  element(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST element_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST a = null;
-		GrammarAST b = null;
-		GrammarAST c1 = null;
-		GrammarAST c2 = null;
-		GrammarAST pred = null;
-		GrammarAST spred = null;
-		GrammarAST bpred = null;
-		GrammarAST gpred = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				AST __t64 = _t;
-				GrammarAST tmp42_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getFirstChild();
-				g=element(_t);
-				_t = _retTree;
-				_t = __t64;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				AST __t65 = _t;
-				GrammarAST tmp43_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getFirstChild();
-				g=element(_t);
-				_t = _retTree;
-				_t = __t65;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ASSIGN:
-			{
-				AST __t66 = _t;
-				GrammarAST tmp44_AST_in = (GrammarAST)_t;
-				match(_t,ASSIGN);
-				_t = _t.getFirstChild();
-				GrammarAST tmp45_AST_in = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				g=element(_t);
-				_t = _retTree;
-				_t = __t66;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case PLUS_ASSIGN:
-			{
-				AST __t67 = _t;
-				GrammarAST tmp46_AST_in = (GrammarAST)_t;
-				match(_t,PLUS_ASSIGN);
-				_t = _t.getFirstChild();
-				GrammarAST tmp47_AST_in = (GrammarAST)_t;
-				match(_t,ID);
-				_t = _t.getNextSibling();
-				g=element(_t);
-				_t = _retTree;
-				_t = __t67;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case RANGE:
-			{
-				AST __t68 = _t;
-				GrammarAST tmp48_AST_in = (GrammarAST)_t;
-				match(_t,RANGE);
-				_t = _t.getFirstChild();
-				a = _t==ASTNULL ? null : (GrammarAST)_t;
-				atom(_t);
-				_t = _retTree;
-				b = _t==ASTNULL ? null : (GrammarAST)_t;
-				atom(_t);
-				_t = _retTree;
-				_t = __t68;
-				_t = _t.getNextSibling();
-				g = factory.build_Range(grammar.getTokenType(a.getText()),
-				grammar.getTokenType(b.getText()));
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t69 = _t;
-				GrammarAST tmp49_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				c1 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				c2 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t69;
-				_t = _t.getNextSibling();
-				
-				if ( grammar.type==Grammar.LEXER ) {
-					g = factory.build_CharRange(c1.getText(), c2.getText());
-				}
-				
-				break;
-			}
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case NOT:
-			case WILDCARD:
-			{
-				g=atom_or_notatom(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case OPTIONAL:
-			case CLOSURE:
-			case POSITIVE_CLOSURE:
-			{
-				g=ebnf(_t);
-				_t = _retTree;
-				break;
-			}
-			case TREE_BEGIN:
-			{
-				g=tree(_t);
-				_t = _retTree;
-				break;
-			}
-			case SYNPRED:
-			{
-				AST __t70 = _t;
-				GrammarAST tmp50_AST_in = (GrammarAST)_t;
-				match(_t,SYNPRED);
-				_t = _t.getFirstChild();
-				block(_t);
-				_t = _retTree;
-				_t = __t70;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ACTION:
-			{
-				GrammarAST tmp51_AST_in = (GrammarAST)_t;
-				match(_t,ACTION);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case SEMPRED:
-			{
-				pred = (GrammarAST)_t;
-				match(_t,SEMPRED);
-				_t = _t.getNextSibling();
-				g = factory.build_SemanticPredicate(pred);
-				break;
-			}
-			case SYN_SEMPRED:
-			{
-				spred = (GrammarAST)_t;
-				match(_t,SYN_SEMPRED);
-				_t = _t.getNextSibling();
-				g = factory.build_SemanticPredicate(spred);
-				break;
-			}
-			case BACKTRACK_SEMPRED:
-			{
-				bpred = (GrammarAST)_t;
-				match(_t,BACKTRACK_SEMPRED);
-				_t = _t.getNextSibling();
-				g = factory.build_SemanticPredicate(bpred);
-				break;
-			}
-			case GATED_SEMPRED:
-			{
-				gpred = (GrammarAST)_t;
-				match(_t,GATED_SEMPRED);
-				_t = _t.getNextSibling();
-				g = factory.build_SemanticPredicate(gpred);
-				break;
-			}
-			case EPSILON:
-			{
-				GrammarAST tmp52_AST_in = (GrammarAST)_t;
-				match(_t,EPSILON);
-				_t = _t.getNextSibling();
-				g = factory.build_Epsilon();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final void exceptionHandler(AST _t) throws RecognitionException {
-		
-		GrammarAST exceptionHandler_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t54 = _t;
-			GrammarAST tmp53_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_catch);
-			_t = _t.getFirstChild();
-			GrammarAST tmp54_AST_in = (GrammarAST)_t;
-			match(_t,ARG_ACTION);
-			_t = _t.getNextSibling();
-			GrammarAST tmp55_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t54;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void finallyClause(AST _t) throws RecognitionException {
-		
-		GrammarAST finallyClause_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		try {      // for error handling
-			AST __t56 = _t;
-			GrammarAST tmp56_AST_in = (GrammarAST)_t;
-			match(_t,LITERAL_finally);
-			_t = _t.getFirstChild();
-			GrammarAST tmp57_AST_in = (GrammarAST)_t;
-			match(_t,ACTION);
-			_t = _t.getNextSibling();
-			_t = __t56;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final StateCluster  atom(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST atom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST r = null;
-		GrammarAST rarg = null;
-		GrammarAST as1 = null;
-		GrammarAST t = null;
-		GrammarAST targ = null;
-		GrammarAST as2 = null;
-		GrammarAST c = null;
-		GrammarAST as3 = null;
-		GrammarAST s = null;
-		GrammarAST as4 = null;
-		GrammarAST w = null;
-		GrammarAST as5 = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case RULE_REF:
-			{
-				AST __t85 = _t;
-				r = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,RULE_REF);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ARG_ACTION:
-				{
-					rarg = (GrammarAST)_t;
-					match(_t,ARG_ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				case BANG:
-				case ROOT:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case BANG:
-				case ROOT:
-				{
-					as1 = _t==ASTNULL ? null : (GrammarAST)_t;
-					ast_suffix(_t);
-					_t = _retTree;
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t85;
-				_t = _t.getNextSibling();
-				
-				NFAState start = grammar.getRuleStartState(r.getText());
-				if ( start!=null ) {
-				int ruleIndex = grammar.getRuleIndex(r.getText());
-				g = factory.build_RuleRef(ruleIndex, start);
-				r.followingNFAState = g.right;
-				if ( g.left.transition(0) instanceof RuleClosureTransition
-					 && grammar.type!=Grammar.LEXER )
-				{
-				addFollowTransition(r.getText(), g.right);
-				}
-				// else rule ref got inlined to a set
-				}
-				
-				break;
-			}
-			case TOKEN_REF:
-			{
-				AST __t88 = _t;
-				t = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case ARG_ACTION:
-				{
-					targ = (GrammarAST)_t;
-					match(_t,ARG_ACTION);
-					_t = _t.getNextSibling();
-					break;
-				}
-				case 3:
-				case BANG:
-				case ROOT:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case BANG:
-				case ROOT:
-				{
-					as2 = _t==ASTNULL ? null : (GrammarAST)_t;
-					ast_suffix(_t);
-					_t = _retTree;
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t88;
-				_t = _t.getNextSibling();
-				
-				if ( grammar.type==Grammar.LEXER ) {
-				NFAState start = grammar.getRuleStartState(t.getText());
-				if ( start!=null ) {
-				int ruleIndex = grammar.getRuleIndex(t.getText());
-				g = factory.build_RuleRef(ruleIndex, start);
-				// don't add FOLLOW transitions in the lexer;
-				// only exact context should be used.
-				}
-				}
-				else {
-				int tokenType = grammar.getTokenType(t.getText());
-				g = factory.build_Atom(tokenType);
-				t.followingNFAState = g.right;
-				}
-				
-				break;
-			}
-			case CHAR_LITERAL:
-			{
-				AST __t91 = _t;
-				c = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case BANG:
-				case ROOT:
-				{
-					as3 = _t==ASTNULL ? null : (GrammarAST)_t;
-					ast_suffix(_t);
-					_t = _retTree;
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t91;
-				_t = _t.getNextSibling();
-				
-					if ( grammar.type==Grammar.LEXER ) {
-						g = factory.build_CharLiteralAtom(c.getText());
-					}
-					else {
-				int tokenType = grammar.getTokenType(c.getText());
-				g = factory.build_Atom(tokenType);
-				c.followingNFAState = g.right;
-					}
-					
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				AST __t93 = _t;
-				s = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case BANG:
-				case ROOT:
-				{
-					as4 = _t==ASTNULL ? null : (GrammarAST)_t;
-					ast_suffix(_t);
-					_t = _retTree;
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t93;
-				_t = _t.getNextSibling();
-				
-					if ( grammar.type==Grammar.LEXER ) {
-						g = factory.build_StringLiteralAtom(s.getText());
-					}
-					else {
-				int tokenType = grammar.getTokenType(s.getText());
-				g = factory.build_Atom(tokenType);
-				s.followingNFAState = g.right;
-					}
-					
-				break;
-			}
-			case WILDCARD:
-			{
-				AST __t95 = _t;
-				w = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,WILDCARD);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case BANG:
-				case ROOT:
-				{
-					as5 = _t==ASTNULL ? null : (GrammarAST)_t;
-					ast_suffix(_t);
-					_t = _retTree;
-					break;
-				}
-				case 3:
-				{
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				_t = __t95;
-				_t = _t.getNextSibling();
-				g = factory.build_Wildcard();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final StateCluster  atom_or_notatom(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST atom_or_notatom_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST n = null;
-		GrammarAST c = null;
-		GrammarAST ast1 = null;
-		GrammarAST t = null;
-		GrammarAST ast3 = null;
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case STRING_LITERAL:
-			case CHAR_LITERAL:
-			case TOKEN_REF:
-			case RULE_REF:
-			case WILDCARD:
-			{
-				g=atom(_t);
-				_t = _retTree;
-				break;
-			}
-			case NOT:
-			{
-				AST __t80 = _t;
-				n = _t==ASTNULL ? null :(GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				{
-				if (_t==null) _t=ASTNULL;
-				switch ( _t.getType()) {
-				case CHAR_LITERAL:
-				{
-					c = (GrammarAST)_t;
-					match(_t,CHAR_LITERAL);
-					_t = _t.getNextSibling();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast1 = _t==ASTNULL ? null : (GrammarAST)_t;
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					
-						            int ttype=0;
-								if ( grammar.type==Grammar.LEXER ) {
-								ttype = Grammar.getCharValueFromGrammarCharLiteral(c.getText());
-								}
-								else {
-								ttype = grammar.getTokenType(c.getText());
-							}
-					IntSet notAtom = grammar.complement(ttype);
-					if ( notAtom.isNil() ) {
-					ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
-										  			              grammar,
-													              c.token,
-														          c.getText());
-					}
-						            g=factory.build_Set(notAtom);
-						
-					break;
-				}
-				case TOKEN_REF:
-				{
-					t = (GrammarAST)_t;
-					match(_t,TOKEN_REF);
-					_t = _t.getNextSibling();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BANG:
-					case ROOT:
-					{
-						ast3 = _t==ASTNULL ? null : (GrammarAST)_t;
-						ast_suffix(_t);
-						_t = _retTree;
-						break;
-					}
-					case 3:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					
-						            int ttype=0;
-					IntSet notAtom = null;
-								if ( grammar.type==Grammar.LEXER ) {
-								notAtom = grammar.getSetFromRule(this,t.getText());
-						   		if ( notAtom==null ) {
-							ErrorManager.grammarError(ErrorManager.MSG_RULE_INVALID_SET,
-									  			              grammar,
-												              t.token,
-													          t.getText());
-						   		}
-						   		else {
-						            		notAtom = grammar.complement(notAtom);
-						            	}
-								}
-								else {
-								ttype = grammar.getTokenType(t.getText());
-						            	notAtom = grammar.complement(ttype);
-							}
-					if ( notAtom==null || notAtom.isNil() ) {
-					ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
-									  			              grammar,
-												              t.token,
-													          t.getText());
-					}
-						           g=factory.build_Set(notAtom);
-						
-					break;
-				}
-				case BLOCK:
-				{
-					g=set(_t);
-					_t = _retTree;
-					
-						           GrammarAST stNode = (GrammarAST)n.getFirstChild();
-					//IntSet notSet = grammar.complement(stNode.getSetValue());
-					// let code generator complement the sets
-					IntSet s = stNode.getSetValue();
-					stNode.setSetValue(s);
-					// let code gen do the complement again; here we compute
-					// for NFA construction
-					s = grammar.complement(s);
-					if ( s.isNil() ) {
-					ErrorManager.grammarError(ErrorManager.MSG_EMPTY_COMPLEMENT,
-									  			              grammar,
-												              n.token);
-					}
-						           g=factory.build_Set(s);
-						
-					break;
-				}
-				default:
-				{
-					throw new NoViableAltException(_t);
-				}
-				}
-				}
-				n.followingNFAState = g.right;
-				_t = __t80;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final StateCluster  ebnf(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST ebnf_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StateCluster b = null;
-		GrammarAST blk = ebnf_AST_in;
-		if ( blk.getType()!=BLOCK ) {
-			blk = (GrammarAST)blk.getFirstChild();
-		}
-		GrammarAST eob = blk.getLastChild();
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONAL:
-			{
-				AST __t72 = _t;
-				GrammarAST tmp58_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONAL);
-				_t = _t.getFirstChild();
-				b=block(_t);
-				_t = _retTree;
-				_t = __t72;
-				_t = _t.getNextSibling();
-				
-				if ( blk.setValue!=null ) {
-				// if block comes back SET not BLOCK, make it
-				// a single ALT block
-				b = factory.build_AlternativeBlockFromSet(b);
-				}
-				g = factory.build_Aoptional(b);
-					g.left.setDescription(grammar.grammarTreeToString(ebnf_AST_in,false));
-				// there is always at least one alt even if block has just 1 alt
-				int d = grammar.assignDecisionNumber( g.left );
-						grammar.setDecisionNFA(d, g.left);
-				grammar.setDecisionBlockAST(d, blk);
-				g.left.setDecisionASTNode(ebnf_AST_in);
-					
-				break;
-			}
-			case CLOSURE:
-			{
-				AST __t73 = _t;
-				GrammarAST tmp59_AST_in = (GrammarAST)_t;
-				match(_t,CLOSURE);
-				_t = _t.getFirstChild();
-				b=block(_t);
-				_t = _retTree;
-				_t = __t73;
-				_t = _t.getNextSibling();
-				
-				if ( blk.setValue!=null ) {
-				b = factory.build_AlternativeBlockFromSet(b);
-				}
-				g = factory.build_Astar(b);
-						// track the loop back / exit decision point
-					b.right.setDescription("()* loopback of "+grammar.grammarTreeToString(ebnf_AST_in,false));
-				int d = grammar.assignDecisionNumber( b.right );
-						grammar.setDecisionNFA(d, b.right);
-				grammar.setDecisionBlockAST(d, blk);
-				b.right.setDecisionASTNode(eob);
-				// make block entry state also have same decision for interpreting grammar
-				NFAState altBlockState = (NFAState)g.left.transition(0).target;
-				altBlockState.setDecisionASTNode(ebnf_AST_in);
-				altBlockState.setDecisionNumber(d);
-				g.left.setDecisionNumber(d); // this is the bypass decision (2 alts)
-				g.left.setDecisionASTNode(ebnf_AST_in);
-					
-				break;
-			}
-			case POSITIVE_CLOSURE:
-			{
-				AST __t74 = _t;
-				GrammarAST tmp60_AST_in = (GrammarAST)_t;
-				match(_t,POSITIVE_CLOSURE);
-				_t = _t.getFirstChild();
-				b=block(_t);
-				_t = _retTree;
-				_t = __t74;
-				_t = _t.getNextSibling();
-				
-				if ( blk.setValue!=null ) {
-				b = factory.build_AlternativeBlockFromSet(b);
-				}
-				g = factory.build_Aplus(b);
-				// don't make a decision on left edge, can reuse loop end decision
-						// track the loop back / exit decision point
-					b.right.setDescription("()+ loopback of "+grammar.grammarTreeToString(ebnf_AST_in,false));
-				int d = grammar.assignDecisionNumber( b.right );
-						grammar.setDecisionNFA(d, b.right);
-				grammar.setDecisionBlockAST(d, blk);
-				b.right.setDecisionASTNode(eob);
-				// make block entry state also have same decision for interpreting grammar
-				NFAState altBlockState = (NFAState)g.left.transition(0).target;
-				altBlockState.setDecisionASTNode(ebnf_AST_in);
-				altBlockState.setDecisionNumber(d);
-				
-				break;
-			}
-			default:
-				if (_t==null) _t=ASTNULL;
-				if (((_t.getType()==BLOCK))&&(grammar.isValidSet(this,ebnf_AST_in))) {
-					g=set(_t);
-					_t = _retTree;
-				}
-				else if ((_t.getType()==BLOCK)) {
-					b=block(_t);
-					_t = _retTree;
-					
-					// track decision if > 1 alts
-					if ( grammar.getNumberOfAltsForDecisionNFA(b.left)>1 ) {
-					b.left.setDescription(grammar.grammarTreeToString(blk,false));
-					b.left.setDecisionASTNode(blk);
-					int d = grammar.assignDecisionNumber( b.left );
-					grammar.setDecisionNFA( d, b.left );
-					grammar.setDecisionBlockAST(d, blk);
-					}
-					g = b;
-					
-				}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final StateCluster  tree(AST _t) throws RecognitionException {
-		StateCluster g=null;
-		
-		GrammarAST tree_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		StateCluster e=null;
-		GrammarAST el=null;
-		StateCluster down=null, up=null;
-		
-		
-		try {      // for error handling
-			AST __t76 = _t;
-			GrammarAST tmp61_AST_in = (GrammarAST)_t;
-			match(_t,TREE_BEGIN);
-			_t = _t.getFirstChild();
-			el=(GrammarAST)_t;
-			g=element(_t);
-			_t = _retTree;
-			
-			down = factory.build_Atom(Label.DOWN);
-			// TODO set following states for imaginary nodes?
-			//el.followingNFAState = down.right;
-					   g = factory.build_AB(g,down);
-					
-			{
-			_loop78:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_tokenSet_0.member(_t.getType()))) {
-					el=(GrammarAST)_t;
-					e=element(_t);
-					_t = _retTree;
-					g = factory.build_AB(g,e);
-				}
-				else {
-					break _loop78;
-				}
-				
-			} while (true);
-			}
-			
-			up = factory.build_Atom(Label.UP);
-			//el.followingNFAState = up.right;
-					   g = factory.build_AB(g,up);
-					   // tree roots point at right edge of DOWN for LOOK computation later
-					   tree_AST_in.NFATreeDownState = down.left;
-					
-			_t = __t76;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-		return g;
-	}
-	
-	public final void ast_suffix(AST _t) throws RecognitionException {
-		
-		GrammarAST ast_suffix_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		if ( grammar.getOption("output")==null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
-									  grammar, ast_suffix_AST_in.token, currentRuleName);
-		}
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case ROOT:
-			{
-				GrammarAST tmp62_AST_in = (GrammarAST)_t;
-				match(_t,ROOT);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BANG:
-			{
-				GrammarAST tmp63_AST_in = (GrammarAST)_t;
-				match(_t,BANG);
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final void setElement(AST _t,
-		IntSet elements
-	) throws RecognitionException {
-		
-		GrammarAST setElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST c = null;
-		GrammarAST t = null;
-		GrammarAST s = null;
-		GrammarAST c1 = null;
-		GrammarAST c2 = null;
-		
-		int ttype;
-		IntSet ns=null;
-		StateCluster gset;
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				
-					if ( grammar.type==Grammar.LEXER ) {
-					ttype = Grammar.getCharValueFromGrammarCharLiteral(c.getText());
-					}
-					else {
-					ttype = grammar.getTokenType(c.getText());
-				}
-				if ( elements.member(ttype) ) {
-							ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
-													  grammar,
-													  c.token,
-													  c.getText());
-				}
-				elements.add(ttype);
-				
-				break;
-			}
-			case TOKEN_REF:
-			{
-				t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				
-						if ( grammar.type==Grammar.LEXER ) {
-							// recursively will invoke this rule to match elements in target rule ref
-							IntSet ruleSet = grammar.getSetFromRule(this,t.getText());
-							if ( ruleSet==null ) {
-								ErrorManager.grammarError(ErrorManager.MSG_RULE_INVALID_SET,
-												  grammar,
-												  t.token,
-												  t.getText());
-							}
-							else {
-								elements.addAll(ruleSet);
-							}
-						}
-						else {
-							ttype = grammar.getTokenType(t.getText());
-							if ( elements.member(ttype) ) {
-								ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
-														  grammar,
-														  t.token,
-														  t.getText());
-							}
-							elements.add(ttype);
-							}
-				
-				break;
-			}
-			case STRING_LITERAL:
-			{
-				s = (GrammarAST)_t;
-				match(_t,STRING_LITERAL);
-				_t = _t.getNextSibling();
-				
-				ttype = grammar.getTokenType(s.getText());
-				if ( elements.member(ttype) ) {
-							ErrorManager.grammarError(ErrorManager.MSG_DUPLICATE_SET_ENTRY,
-													  grammar,
-													  s.token,
-													  s.getText());
-				}
-				elements.add(ttype);
-				
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t118 = _t;
-				GrammarAST tmp64_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				c1 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				c2 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t118;
-				_t = _t.getNextSibling();
-				
-					if ( grammar.type==Grammar.LEXER ) {
-					        int a = Grammar.getCharValueFromGrammarCharLiteral(c1.getText());
-					    int b = Grammar.getCharValueFromGrammarCharLiteral(c2.getText());
-						elements.addAll(IntervalSet.of(a,b));
-					}
-					
-				break;
-			}
-			case BLOCK:
-			{
-				gset=set(_t);
-				_t = _retTree;
-				
-						Transition setTrans = gset.left.transition(0);
-				elements.addAll(setTrans.label.getSet());
-				
-				break;
-			}
-			case NOT:
-			{
-				AST __t119 = _t;
-				GrammarAST tmp65_AST_in = (GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				ns=new IntervalSet();
-				setElement(_t,ns);
-				_t = _retTree;
-				
-				IntSet not = grammar.complement(ns);
-				elements.addAll(not);
-				
-				_t = __t119;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException ex) {
-			reportError(ex);
-			if (_t!=null) {_t = _t.getNextSibling();}
-		}
-		_retTree = _t;
-	}
-	
-	public final IntSet  setRule(AST _t) throws RecognitionException {
-		IntSet elements=new IntervalSet();
-		
-		GrammarAST setRule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		IntSet s=null;
-		
-		try {      // for error handling
-			AST __t105 = _t;
-			GrammarAST tmp66_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp67_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getNextSibling();
-			GrammarAST tmp68_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp69_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop110:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp70_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop110;
-				}
-				
-			} while (true);
-			}
-			AST __t111 = _t;
-			GrammarAST tmp71_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp72_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case ALT:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			int _cnt115=0;
-			_loop115:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					AST __t114 = _t;
-					GrammarAST tmp73_AST_in = (GrammarAST)_t;
-					match(_t,ALT);
-					_t = _t.getFirstChild();
-					setElement(_t,elements);
-					_t = _retTree;
-					GrammarAST tmp74_AST_in = (GrammarAST)_t;
-					match(_t,EOA);
-					_t = _t.getNextSibling();
-					_t = __t114;
-					_t = _t.getNextSibling();
-				}
-				else {
-					if ( _cnt115>=1 ) { break _loop115; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt115++;
-			} while (true);
-			}
-			GrammarAST tmp75_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t111;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp76_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			_t = __t105;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException re) {
-			throw re;
-		}
-		_retTree = _t;
-		return elements;
-	}
-	
-/** Check to see if this block can be a set.  Can't have actions
- *  etc...  Also can't be in a rule with a rewrite as we need
- *  to track what's inside set for use in rewrite.
- */
-	public final void testBlockAsSet(AST _t) throws RecognitionException {
-		
-		GrammarAST testBlockAsSet_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		
-		int nAlts=0;
-		Rule r = grammar.getRule(currentRuleName);
-		
-		
-		try {      // for error handling
-			AST __t121 = _t;
-			GrammarAST tmp77_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			int _cnt125=0;
-			_loop125:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					AST __t123 = _t;
-					GrammarAST tmp78_AST_in = (GrammarAST)_t;
-					match(_t,ALT);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BACKTRACK_SEMPRED:
-					{
-						GrammarAST tmp79_AST_in = (GrammarAST)_t;
-						match(_t,BACKTRACK_SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case BLOCK:
-					case CHAR_RANGE:
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case TOKEN_REF:
-					case NOT:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					testSetElement(_t);
-					_t = _retTree;
-					nAlts++;
-					GrammarAST tmp80_AST_in = (GrammarAST)_t;
-					match(_t,EOA);
-					_t = _t.getNextSibling();
-					_t = __t123;
-					_t = _t.getNextSibling();
-					if (!(!r.hasRewrite(outerAltNum)))
-					  throw new SemanticException("!r.hasRewrite(outerAltNum)");
-				}
-				else {
-					if ( _cnt125>=1 ) { break _loop125; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt125++;
-			} while (true);
-			}
-			GrammarAST tmp81_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t121;
-			_t = _t.getNextSibling();
-			if (!(nAlts>1))
-			  throw new SemanticException("nAlts>1");
-		}
-		catch (RecognitionException re) {
-			throw re;
-		}
-		_retTree = _t;
-	}
-	
-/** Match just an element; no ast suffix etc.. */
-	public final void testSetElement(AST _t) throws RecognitionException {
-		
-		GrammarAST testSetElement_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST c = null;
-		GrammarAST t = null;
-		GrammarAST s = null;
-		GrammarAST c1 = null;
-		GrammarAST c2 = null;
-		
-		AST r = _t;
-		
-		
-		try {      // for error handling
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case CHAR_LITERAL:
-			{
-				c = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case TOKEN_REF:
-			{
-				t = (GrammarAST)_t;
-				match(_t,TOKEN_REF);
-				_t = _t.getNextSibling();
-				
-						if ( grammar.type==Grammar.LEXER ) {
-					        Rule rule = grammar.getRule(t.getText());
-					        if ( rule==null ) {
-					        	throw new RecognitionException("invalid rule");
-					        }
-							// recursively will invoke this rule to match elements in target rule ref
-					        testSetRule(rule.tree);
-						}
-				
-				break;
-			}
-			case CHAR_RANGE:
-			{
-				AST __t140 = _t;
-				GrammarAST tmp82_AST_in = (GrammarAST)_t;
-				match(_t,CHAR_RANGE);
-				_t = _t.getFirstChild();
-				c1 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				c2 = (GrammarAST)_t;
-				match(_t,CHAR_LITERAL);
-				_t = _t.getNextSibling();
-				_t = __t140;
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			{
-				testBlockAsSet(_t);
-				_t = _retTree;
-				break;
-			}
-			case NOT:
-			{
-				AST __t141 = _t;
-				GrammarAST tmp83_AST_in = (GrammarAST)_t;
-				match(_t,NOT);
-				_t = _t.getFirstChild();
-				testSetElement(_t);
-				_t = _retTree;
-				_t = __t141;
-				_t = _t.getNextSibling();
-				break;
-			}
-			default:
-				if (_t==null) _t=ASTNULL;
-				if (((_t.getType()==STRING_LITERAL))&&(grammar.type!=Grammar.LEXER)) {
-					s = (GrammarAST)_t;
-					match(_t,STRING_LITERAL);
-					_t = _t.getNextSibling();
-				}
-			else {
-				throw new NoViableAltException(_t);
-			}
-			}
-		}
-		catch (RecognitionException re) {
-			throw re;
-		}
-		_retTree = _t;
-	}
-	
-	public final void testSetRule(AST _t) throws RecognitionException {
-		
-		GrammarAST testSetRule_AST_in = (_t == ASTNULL) ? null : (GrammarAST)_t;
-		GrammarAST id = null;
-		
-		try {      // for error handling
-			AST __t127 = _t;
-			GrammarAST tmp84_AST_in = (GrammarAST)_t;
-			match(_t,RULE);
-			_t = _t.getFirstChild();
-			id = (GrammarAST)_t;
-			match(_t,ID);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case FRAGMENT:
-			case LITERAL_protected:
-			case LITERAL_public:
-			case LITERAL_private:
-			{
-				modifier(_t);
-				_t = _retTree;
-				break;
-			}
-			case ARG:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp85_AST_in = (GrammarAST)_t;
-			match(_t,ARG);
-			_t = _t.getNextSibling();
-			GrammarAST tmp86_AST_in = (GrammarAST)_t;
-			match(_t,RET);
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case OPTIONS:
-			{
-				GrammarAST tmp87_AST_in = (GrammarAST)_t;
-				match(_t,OPTIONS);
-				_t = _t.getNextSibling();
-				break;
-			}
-			case BLOCK:
-			case SCOPE:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case SCOPE:
-			{
-				ruleScopeSpec(_t);
-				_t = _retTree;
-				break;
-			}
-			case BLOCK:
-			case AMPERSAND:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			{
-			_loop132:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==AMPERSAND)) {
-					GrammarAST tmp88_AST_in = (GrammarAST)_t;
-					match(_t,AMPERSAND);
-					_t = _t.getNextSibling();
-				}
-				else {
-					break _loop132;
-				}
-				
-			} while (true);
-			}
-			AST __t133 = _t;
-			GrammarAST tmp89_AST_in = (GrammarAST)_t;
-			match(_t,BLOCK);
-			_t = _t.getFirstChild();
-			{
-			int _cnt137=0;
-			_loop137:
-			do {
-				if (_t==null) _t=ASTNULL;
-				if ((_t.getType()==ALT)) {
-					AST __t135 = _t;
-					GrammarAST tmp90_AST_in = (GrammarAST)_t;
-					match(_t,ALT);
-					_t = _t.getFirstChild();
-					{
-					if (_t==null) _t=ASTNULL;
-					switch ( _t.getType()) {
-					case BACKTRACK_SEMPRED:
-					{
-						GrammarAST tmp91_AST_in = (GrammarAST)_t;
-						match(_t,BACKTRACK_SEMPRED);
-						_t = _t.getNextSibling();
-						break;
-					}
-					case BLOCK:
-					case CHAR_RANGE:
-					case STRING_LITERAL:
-					case CHAR_LITERAL:
-					case TOKEN_REF:
-					case NOT:
-					{
-						break;
-					}
-					default:
-					{
-						throw new NoViableAltException(_t);
-					}
-					}
-					}
-					testSetElement(_t);
-					_t = _retTree;
-					GrammarAST tmp92_AST_in = (GrammarAST)_t;
-					match(_t,EOA);
-					_t = _t.getNextSibling();
-					_t = __t135;
-					_t = _t.getNextSibling();
-				}
-				else {
-					if ( _cnt137>=1 ) { break _loop137; } else {throw new NoViableAltException(_t);}
-				}
-				
-				_cnt137++;
-			} while (true);
-			}
-			GrammarAST tmp93_AST_in = (GrammarAST)_t;
-			match(_t,EOB);
-			_t = _t.getNextSibling();
-			_t = __t133;
-			_t = _t.getNextSibling();
-			{
-			if (_t==null) _t=ASTNULL;
-			switch ( _t.getType()) {
-			case LITERAL_catch:
-			case LITERAL_finally:
-			{
-				exceptionGroup(_t);
-				_t = _retTree;
-				break;
-			}
-			case EOR:
-			{
-				break;
-			}
-			default:
-			{
-				throw new NoViableAltException(_t);
-			}
-			}
-			}
-			GrammarAST tmp94_AST_in = (GrammarAST)_t;
-			match(_t,EOR);
-			_t = _t.getNextSibling();
-			_t = __t127;
-			_t = _t.getNextSibling();
-		}
-		catch (RecognitionException re) {
-			throw re;
-		}
-		_retTree = _t;
-	}
-	
-	
-	public static final String[] _tokenNames = {
-		"<0>",
-		"EOF",
-		"<2>",
-		"NULL_TREE_LOOKAHEAD",
-		"\"options\"",
-		"\"tokens\"",
-		"\"parser\"",
-		"LEXER",
-		"RULE",
-		"BLOCK",
-		"OPTIONAL",
-		"CLOSURE",
-		"POSITIVE_CLOSURE",
-		"SYNPRED",
-		"RANGE",
-		"CHAR_RANGE",
-		"EPSILON",
-		"ALT",
-		"EOR",
-		"EOB",
-		"EOA",
-		"ID",
-		"ARG",
-		"ARGLIST",
-		"RET",
-		"LEXER_GRAMMAR",
-		"PARSER_GRAMMAR",
-		"TREE_GRAMMAR",
-		"COMBINED_GRAMMAR",
-		"INITACTION",
-		"LABEL",
-		"TEMPLATE",
-		"\"scope\"",
-		"GATED_SEMPRED",
-		"SYN_SEMPRED",
-		"BACKTRACK_SEMPRED",
-		"\"fragment\"",
-		"ACTION",
-		"DOC_COMMENT",
-		"SEMI",
-		"\"lexer\"",
-		"\"tree\"",
-		"\"grammar\"",
-		"AMPERSAND",
-		"COLON",
-		"RCURLY",
-		"ASSIGN",
-		"STRING_LITERAL",
-		"CHAR_LITERAL",
-		"INT",
-		"STAR",
-		"TOKEN_REF",
-		"\"protected\"",
-		"\"public\"",
-		"\"private\"",
-		"BANG",
-		"ARG_ACTION",
-		"\"returns\"",
-		"\"throws\"",
-		"COMMA",
-		"LPAREN",
-		"OR",
-		"RPAREN",
-		"\"catch\"",
-		"\"finally\"",
-		"PLUS_ASSIGN",
-		"SEMPRED",
-		"IMPLIES",
-		"ROOT",
-		"RULE_REF",
-		"NOT",
-		"TREE_BEGIN",
-		"QUESTION",
-		"PLUS",
-		"WILDCARD",
-		"REWRITE",
-		"DOLLAR",
-		"DOUBLE_QUOTE_STRING_LITERAL",
-		"DOUBLE_ANGLE_STRING_LITERAL",
-		"WS",
-		"COMMENT",
-		"SL_COMMENT",
-		"ML_COMMENT",
-		"OPEN_ELEMENT_OPTION",
-		"CLOSE_ELEMENT_OPTION",
-		"ESC",
-		"DIGIT",
-		"XDIGIT",
-		"NESTED_ARG_ACTION",
-		"NESTED_ACTION",
-		"ACTION_CHAR_LITERAL",
-		"ACTION_STRING_LITERAL",
-		"ACTION_ESC",
-		"WS_LOOP",
-		"INTERNAL_RULE_REF",
-		"WS_OPT",
-		"SRC"
-	};
-	
-	private static final long[] mk_tokenSet_0() {
-		long[] data = { 38773375610519040L, 1270L, 0L, 0L};
-		return data;
-	}
-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
-	}
-	
diff --git a/src/org/antlr/tool/TreeToNFAConverter.smap b/src/org/antlr/tool/TreeToNFAConverter.smap
deleted file mode 100644
index 2274d2e..0000000
--- a/src/org/antlr/tool/TreeToNFAConverter.smap
+++ /dev/null
@@ -1,2084 +0,0 @@
-SMAP
-TreeToNFAConverter.java
-G
-*S G
-*F
-+ 0 buildnfa.g
-buildnfa.g
-*L
-1:3
-1:4
-1:5
-1:6
-1:8
-1:9
-1:10
-1:11
-1:12
-1:13
-1:14
-1:15
-1:16
-1:17
-1:19
-1:20
-1:21
-1:22
-1:23
-1:24
-1:25
-1:26
-1:27
-1:28
-1:29
-1:30
-1:31
-1:32
-1:33
-44:52
-45:53
-47:55
-48:56
-50:58
-51:59
-53:61
-55:63
-56:64
-58:66
-59:67
-60:68
-61:69
-62:70
-63:71
-65:73
-66:74
-67:75
-68:76
-69:77
-70:78
-71:79
-72:80
-73:81
-74:82
-75:83
-76:84
-77:85
-78:86
-79:87
-80:88
-81:89
-83:91
-84:92
-85:93
-86:94
-87:95
-88:96
-89:97
-90:98
-91:99
-92:100
-93:101
-94:102
-95:103
-96:104
-97:105
-98:106
-99:107
-100:108
-102:110
-103:111
-104:112
-105:113
-106:114
-107:115
-108:116
-109:117
-110:118
-111:119
-112:120
-114:122
-115:123
-116:124
-117:125
-118:126
-119:127
-120:128
-121:129
-122:130
-123:131
-124:132
-125:133
-126:134
-127:135
-128:136
-131:141
-131:145
-131:205
-131:206
-131:207
-131:208
-131:209
-131:210
-131:211
-132:146
-133:148
-133:149
-133:150
-133:151
-133:152
-133:153
-133:154
-133:155
-133:156
-133:157
-133:158
-133:159
-133:198
-133:199
-133:200
-133:201
-133:202
-134:162
-134:163
-134:164
-134:165
-134:166
-134:167
-134:168
-134:169
-134:170
-134:171
-135:174
-135:175
-135:176
-135:177
-135:178
-135:179
-135:180
-135:181
-135:182
-135:183
-136:186
-136:187
-136:188
-136:189
-136:190
-136:191
-136:192
-136:193
-136:194
-136:195
-138:204
-141:342
-141:346
-141:359
-141:360
-141:361
-141:362
-141:363
-141:364
-141:365
-142:347
-142:348
-142:349
-142:350
-142:351
-142:352
-142:353
-142:354
-142:355
-142:356
-142:357
-142:358
-145:213
-145:218
-145:334
-145:335
-145:336
-145:337
-145:338
-145:339
-145:340
-146:219
-146:220
-146:221
-147:216
-147:223
-147:224
-147:225
-147:226
-147:227
-147:228
-147:229
-147:240
-147:241
-147:242
-147:243
-147:244
-148:247
-148:248
-148:249
-148:250
-148:251
-148:252
-148:253
-148:254
-148:255
-148:256
-148:257
-148:258
-148:259
-148:269
-148:270
-148:271
-148:272
-148:273
-149:276
-149:277
-149:278
-149:279
-149:280
-149:281
-149:282
-149:283
-149:284
-149:285
-149:286
-149:287
-149:288
-149:297
-149:298
-149:299
-149:300
-149:301
-150:303
-150:304
-150:305
-150:306
-150:307
-150:308
-150:309
-150:310
-150:311
-150:312
-150:313
-150:315
-150:316
-151:317
-151:318
-151:319
-151:320
-151:321
-151:322
-151:323
-151:324
-151:325
-151:326
-151:327
-151:328
-151:330
-151:331
-152:332
-152:333
-155:367
-155:371
-155:388
-155:389
-155:390
-155:391
-155:392
-155:393
-155:394
-156:373
-156:374
-156:375
-156:376
-156:377
-156:378
-156:379
-156:380
-156:381
-156:382
-156:383
-156:385
-156:386
-156:387
-159:396
-159:406
-159:604
-159:605
-159:606
-159:607
-159:608
-159:609
-159:610
-160:401
-161:402
-162:403
-165:399
-165:407
-165:408
-165:409
-165:410
-165:411
-165:412
-165:413
-165:414
-165:602
-165:603
-166:415
-167:417
-167:418
-167:419
-167:420
-167:421
-167:422
-167:423
-167:424
-167:425
-167:432
-167:433
-167:434
-167:435
-167:436
-168:439
-168:440
-168:441
-168:443
-168:444
-168:445
-168:446
-168:447
-168:448
-168:449
-168:456
-168:457
-168:458
-168:459
-168:460
-169:464
-169:465
-169:466
-169:468
-169:469
-169:470
-169:471
-169:472
-169:473
-169:474
-169:484
-169:485
-169:486
-169:487
-169:488
-170:492
-170:493
-170:494
-170:495
-170:496
-170:497
-170:498
-170:507
-170:508
-170:509
-170:510
-170:511
-171:514
-171:515
-171:516
-171:517
-171:518
-171:519
-171:527
-171:528
-171:529
-171:530
-171:531
-172:533
-172:534
-172:535
-172:536
-172:537
-172:538
-172:539
-172:540
-172:541
-172:542
-172:543
-172:544
-172:546
-172:547
-173:548
-174:549
-174:550
-175:552
-175:553
-175:554
-175:555
-175:556
-175:557
-175:558
-175:565
-175:566
-175:567
-175:568
-175:569
-176:571
-176:572
-176:573
-178:575
-179:576
-180:577
-181:578
-182:579
-183:580
-184:581
-185:582
-186:583
-187:584
-188:585
-189:586
-191:588
-192:589
-193:590
-194:591
-195:592
-196:593
-197:594
-198:595
-200:597
-201:598
-202:599
-203:600
-208:612
-208:616
-208:617
-208:618
-208:647
-208:648
-208:649
-208:650
-208:651
-208:652
-208:653
-208:654
-208:655
-208:656
-208:657
-208:658
-209:619
-209:620
-209:621
-209:622
-209:623
-210:626
-210:627
-210:628
-210:629
-210:630
-211:633
-211:634
-211:635
-211:636
-211:637
-212:640
-212:641
-212:642
-212:643
-212:644
-215:660
-215:664
-215:707
-215:708
-215:709
-215:710
-215:711
-215:712
-215:713
-216:665
-216:666
-216:667
-216:668
-216:670
-216:671
-216:672
-216:673
-216:674
-216:675
-216:676
-216:684
-216:685
-216:686
-216:687
-216:688
-216:690
-216:691
-216:692
-216:693
-216:694
-216:695
-216:696
-216:697
-216:698
-216:699
-216:700
-216:701
-216:703
-216:704
-216:705
-216:706
-219:715
-219:716
-219:726
-219:733
-219:788
-219:789
-219:790
-219:791
-219:793
-219:794
-219:795
-219:796
-219:797
-219:798
-219:799
-219:800
-220:720
-221:721
-222:722
-223:723
-228:727
-228:728
-228:729
-228:730
-228:731
-229:732
-231:734
-231:735
-231:736
-231:737
-231:738
-231:740
-231:741
-231:742
-231:743
-231:744
-231:745
-231:746
-231:753
-231:754
-231:755
-231:756
-231:757
-231:784
-231:785
-232:760
-232:761
-232:762
-232:763
-232:764
-232:765
-232:766
-232:767
-232:768
-232:773
-232:774
-232:775
-232:776
-232:778
-232:779
-232:780
-234:770
-235:771
-238:781
-238:782
-238:783
-240:786
-241:787
-244:951
-244:952
-244:959
-244:991
-244:992
-244:993
-244:994
-244:995
-244:996
-244:997
-244:998
-245:956
-248:960
-248:961
-248:962
-248:963
-248:965
-248:966
-248:967
-248:968
-248:969
-248:970
-248:971
-248:972
-248:973
-248:974
-248:975
-248:976
-248:978
-248:979
-248:980
-248:981
-248:982
-250:984
-251:985
-252:986
-253:987
-254:988
-255:989
-259:802
-259:806
-259:807
-259:808
-259:854
-259:855
-259:856
-259:857
-259:858
-259:859
-259:860
-259:861
-259:862
-259:863
-259:864
-259:865
-260:809
-260:810
-260:812
-260:813
-260:814
-260:815
-260:816
-260:817
-260:818
-260:819
-260:820
-260:821
-260:822
-260:824
-260:825
-260:826
-260:828
-260:829
-260:830
-260:831
-260:832
-260:833
-260:840
-260:841
-260:842
-260:843
-260:844
-261:848
-261:849
-261:850
-261:851
-264:1297
-264:1301
-264:1314
-264:1315
-264:1316
-264:1317
-264:1318
-264:1319
-264:1320
-265:1302
-265:1303
-265:1304
-265:1305
-265:1306
-265:1307
-265:1308
-265:1309
-265:1310
-265:1311
-265:1312
-265:1313
-268:1322
-268:1326
-268:1336
-268:1337
-268:1338
-268:1339
-268:1340
-268:1341
-268:1342
-269:1327
-269:1328
-269:1329
-269:1330
-269:1331
-269:1332
-269:1333
-269:1334
-269:1335
-272:1000
-272:1004
-272:1081
-272:1082
-272:1083
-272:1084
-272:1085
-272:1086
-272:1087
-273:1005
-273:1006
-273:1007
-273:1074
-273:1075
-273:1076
-273:1077
-273:1079
-273:1080
-274:1008
-274:1009
-275:1011
-276:1012
-277:1013
-278:1014
-280:1016
-280:1017
-280:1018
-280:1019
-280:1021
-280:1022
-280:1023
-280:1024
-280:1025
-280:1026
-280:1027
-280:1036
-280:1037
-280:1038
-280:1039
-280:1040
-280:1043
-280:1044
-280:1045
-280:1046
-280:1047
-280:1048
-280:1049
-280:1052
-280:1053
-280:1054
-280:1055
-280:1056
-280:1059
-280:1060
-280:1061
-280:1062
-280:1063
-280:1066
-280:1067
-280:1068
-280:1069
-280:1070
-280:1072
-280:1073
-284:1089
-284:1090
-284:1102
-284:1103
-284:1104
-284:1283
-284:1284
-284:1285
-284:1286
-284:1287
-284:1288
-284:1289
-284:1290
-284:1291
-284:1292
-284:1293
-284:1294
-284:1295
-285:1105
-285:1106
-285:1107
-285:1108
-285:1109
-285:1110
-285:1111
-285:1112
-285:1113
-285:1114
-286:1117
-286:1118
-286:1119
-286:1120
-286:1121
-286:1122
-286:1123
-286:1124
-286:1125
-286:1126
-287:1129
-287:1130
-287:1131
-287:1132
-287:1133
-287:1134
-287:1135
-287:1136
-287:1137
-287:1138
-287:1139
-287:1140
-287:1141
-288:1144
-288:1145
-288:1146
-288:1147
-288:1148
-288:1149
-288:1150
-288:1151
-288:1152
-288:1153
-288:1154
-288:1155
-288:1156
-289:1093
-289:1094
-289:1159
-289:1160
-289:1161
-289:1162
-289:1163
-289:1164
-289:1165
-289:1166
-289:1167
-289:1168
-289:1169
-289:1170
-289:1171
-289:1172
-290:1173
-291:1174
-292:1095
-292:1096
-292:1177
-292:1178
-292:1179
-292:1180
-292:1181
-292:1182
-292:1183
-292:1184
-292:1185
-292:1186
-292:1187
-292:1188
-292:1189
-292:1190
-294:1192
-295:1193
-296:1194
-298:1198
-298:1199
-298:1200
-298:1201
-298:1202
-298:1203
-298:1204
-298:1205
-298:1206
-299:1209
-299:1210
-299:1211
-299:1212
-299:1213
-299:1214
-299:1215
-300:1218
-300:1219
-300:1220
-300:1221
-301:1224
-301:1225
-301:1226
-301:1227
-301:1228
-301:1229
-301:1230
-301:1231
-301:1232
-301:1233
-302:1236
-302:1237
-302:1238
-302:1239
-302:1240
-303:1097
-303:1243
-303:1244
-303:1245
-303:1246
-303:1247
-303:1248
-304:1098
-304:1251
-304:1252
-304:1253
-304:1254
-304:1255
-304:1256
-305:1099
-305:1259
-305:1260
-305:1261
-305:1262
-305:1263
-305:1264
-306:1100
-306:1267
-306:1268
-306:1269
-306:1270
-306:1271
-306:1272
-307:1275
-307:1276
-307:1277
-307:1278
-307:1279
-307:1280
-310:1807
-310:1808
-310:1820
-310:1821
-310:1822
-310:1908
-310:1913
-310:1928
-310:1929
-310:1930
-310:1931
-310:1932
-310:1933
-310:1934
-310:1935
-310:1936
-310:1937
-310:1938
-310:1939
-310:1940
-311:1812
-312:1813
-313:1814
-314:1815
-315:1816
-316:1817
-319:1909
-319:1910
-319:1911
-319:1912
-321:1914
-321:1915
-321:1916
-323:1918
-324:1919
-325:1920
-326:1921
-327:1922
-328:1923
-329:1924
-330:1925
-331:1926
-333:1823
-333:1824
-333:1825
-333:1826
-333:1827
-333:1828
-333:1829
-333:1830
-333:1831
-333:1832
-335:1834
-336:1835
-337:1836
-338:1837
-339:1838
-340:1839
-341:1840
-342:1841
-343:1842
-344:1843
-345:1844
-346:1845
-348:1849
-348:1850
-348:1851
-348:1852
-348:1853
-348:1854
-348:1855
-348:1856
-348:1857
-348:1858
-350:1860
-351:1861
-352:1862
-353:1863
-354:1864
-355:1865
-356:1866
-357:1867
-358:1868
-359:1869
-360:1870
-361:1871
-362:1872
-363:1873
-364:1874
-365:1875
-367:1879
-367:1880
-367:1881
-367:1882
-367:1883
-367:1884
-367:1885
-367:1886
-367:1887
-367:1888
-369:1890
-370:1891
-371:1892
-372:1893
-373:1894
-374:1895
-375:1896
-376:1897
-377:1898
-378:1899
-379:1900
-380:1901
-381:1902
-382:1903
-383:1904
-387:1942
-387:1943
-387:1952
-387:1991
-387:1992
-387:1993
-387:1994
-387:1995
-387:1996
-387:1997
-387:1998
-388:1947
-389:1948
-390:1949
-393:1953
-393:1954
-393:1955
-393:1956
-393:1989
-393:1990
-394:1957
-395:1958
-395:1959
-397:1961
-398:1962
-399:1963
-400:1964
-402:1966
-402:1967
-402:1968
-402:1969
-402:1970
-402:1971
-402:1972
-402:1973
-402:1974
-402:1975
-402:1976
-402:1977
-402:1978
-402:1980
-402:1981
-404:1983
-405:1984
-406:1985
-407:1986
-408:1987
-413:1628
-413:1629
-413:1638
-413:1639
-413:1640
-413:1793
-413:1794
-413:1795
-413:1796
-413:1797
-413:1798
-413:1799
-413:1800
-413:1801
-413:1802
-413:1803
-413:1804
-413:1805
-414:1641
-414:1642
-414:1643
-414:1644
-414:1645
-414:1646
-414:1647
-414:1648
-415:1632
-415:1651
-415:1652
-415:1653
-415:1654
-415:1655
-415:1656
-415:1789
-415:1790
-416:1633
-416:1634
-416:1658
-416:1659
-416:1660
-416:1661
-416:1662
-416:1663
-416:1664
-416:1666
-416:1667
-416:1668
-416:1669
-416:1670
-416:1671
-416:1672
-416:1673
-416:1680
-416:1681
-416:1682
-416:1683
-416:1684
-416:1782
-416:1783
-416:1784
-416:1785
-416:1786
-418:1687
-419:1688
-420:1689
-421:1690
-422:1691
-423:1692
-424:1693
-425:1694
-426:1695
-427:1696
-428:1697
-429:1698
-430:1699
-431:1700
-432:1701
-434:1635
-434:1636
-434:1705
-434:1706
-434:1707
-434:1708
-434:1709
-434:1711
-434:1712
-434:1713
-434:1714
-434:1715
-434:1716
-434:1717
-434:1718
-434:1725
-434:1726
-434:1727
-434:1728
-434:1729
-436:1732
-437:1733
-438:1734
-439:1735
-440:1736
-441:1737
-442:1738
-443:1739
-444:1740
-445:1741
-446:1742
-447:1743
-448:1744
-449:1745
-450:1746
-451:1747
-452:1748
-453:1749
-454:1750
-455:1751
-456:1752
-457:1753
-458:1754
-459:1755
-460:1756
-462:1760
-462:1761
-462:1762
-462:1763
-464:1765
-465:1766
-466:1767
-467:1768
-468:1769
-469:1770
-470:1771
-471:1772
-472:1773
-473:1774
-474:1775
-475:1776
-476:1777
-477:1778
-480:1788
-484:1344
-484:1345
-484:1361
-484:1362
-484:1363
-484:1614
-484:1615
-484:1616
-484:1617
-484:1618
-484:1619
-484:1620
-484:1621
-484:1622
-484:1623
-484:1624
-484:1625
-484:1626
-485:1348
-485:1349
-485:1350
-485:1364
-485:1365
-485:1366
-485:1367
-485:1368
-485:1369
-485:1371
-485:1372
-485:1373
-485:1374
-485:1375
-485:1376
-485:1377
-485:1386
-485:1387
-485:1388
-485:1389
-485:1390
-485:1393
-485:1394
-485:1395
-485:1396
-485:1397
-485:1398
-485:1399
-485:1400
-485:1407
-485:1408
-485:1409
-485:1410
-485:1411
-485:1413
-485:1414
-487:1416
-488:1417
-489:1418
-490:1419
-491:1420
-492:1421
-493:1422
-494:1423
-495:1424
-496:1425
-497:1426
-498:1427
-501:1351
-501:1352
-501:1353
-501:1431
-501:1432
-501:1433
-501:1434
-501:1435
-501:1436
-501:1438
-501:1439
-501:1440
-501:1441
-501:1442
-501:1443
-501:1444
-501:1453
-501:1454
-501:1455
-501:1456
-501:1457
-501:1460
-501:1461
-501:1462
-501:1463
-501:1464
-501:1465
-501:1466
-501:1467
-501:1474
-501:1475
-501:1476
-501:1477
-501:1478
-501:1480
-501:1481
-503:1483
-504:1484
-505:1485
-506:1486
-507:1487
-508:1488
-509:1489
-510:1490
-511:1491
-512:1492
-513:1493
-514:1494
-515:1495
-516:1496
-519:1354
-519:1355
-519:1500
-519:1501
-519:1502
-519:1503
-519:1504
-519:1505
-519:1507
-519:1508
-519:1509
-519:1510
-519:1511
-519:1512
-519:1513
-519:1514
-519:1521
-519:1522
-519:1523
-519:1524
-519:1525
-519:1527
-519:1528
-521:1530
-522:1531
-523:1532
-524:1533
-525:1534
-526:1535
-527:1536
-528:1537
-531:1356
-531:1357
-531:1541
-531:1542
-531:1543
-531:1544
-531:1545
-531:1546
-531:1548
-531:1549
-531:1550
-531:1551
-531:1552
-531:1553
-531:1554
-531:1555
-531:1562
-531:1563
-531:1564
-531:1565
-531:1566
-531:1568
-531:1569
-533:1571
-534:1572
-535:1573
-536:1574
-537:1575
-538:1576
-539:1577
-540:1578
-543:1358
-543:1359
-543:1582
-543:1583
-543:1584
-543:1585
-543:1586
-543:1587
-543:1589
-543:1590
-543:1591
-543:1592
-543:1593
-543:1594
-543:1595
-543:1596
-543:1603
-543:1604
-543:1605
-543:1606
-543:1607
-543:1609
-543:1610
-543:1611
-548:2000
-548:2010
-548:2011
-548:2012
-548:2027
-548:2028
-548:2029
-548:2030
-548:2031
-548:2032
-548:2033
-548:2034
-548:2035
-548:2036
-548:2037
-548:2038
-549:2004
-550:2005
-551:2006
-552:2007
-555:2013
-555:2014
-555:2015
-555:2016
-555:2017
-556:2020
-556:2021
-556:2022
-556:2023
-556:2024
-559:867
-559:868
-559:877
-559:942
-559:943
-559:944
-559:945
-559:946
-559:947
-559:948
-559:949
-560:873
-561:874
-564:871
-564:878
-564:879
-564:880
-564:881
-564:935
-564:936
-565:883
-565:884
-565:885
-565:886
-565:887
-565:888
-565:889
-565:890
-565:891
-565:893
-565:894
-565:895
-565:896
-565:897
-565:898
-565:899
-565:911
-565:912
-565:913
-565:914
-565:915
-565:917
-565:918
-565:919
-565:920
-565:921
-565:922
-565:923
-565:924
-565:925
-565:926
-565:927
-565:929
-565:930
-565:931
-566:932
-566:933
-566:934
-569:938
-570:939
-571:940
-576:2193
-576:2194
-576:2198
-576:2200
-576:2372
-576:2376
-576:2377
-576:2378
-578:2197
-578:2201
-578:2202
-578:2203
-578:2204
-578:2205
-578:2206
-578:2207
-578:2209
-578:2210
-578:2211
-578:2212
-578:2213
-578:2214
-578:2215
-578:2216
-578:2217
-578:2224
-578:2225
-578:2226
-578:2227
-578:2228
-578:2230
-578:2231
-578:2232
-578:2233
-578:2234
-578:2235
-578:2237
-578:2238
-578:2239
-578:2240
-578:2241
-578:2242
-578:2243
-578:2252
-578:2253
-578:2254
-578:2255
-578:2256
-578:2259
-578:2260
-578:2261
-578:2262
-578:2263
-578:2264
-578:2272
-578:2273
-578:2274
-578:2275
-578:2276
-578:2370
-578:2371
-579:2278
-579:2279
-579:2280
-579:2281
-579:2282
-579:2283
-579:2284
-579:2285
-579:2286
-579:2287
-579:2288
-579:2289
-579:2291
-579:2292
-580:2293
-580:2294
-580:2295
-580:2296
-580:2298
-580:2299
-580:2300
-580:2301
-580:2302
-580:2303
-580:2304
-580:2311
-580:2312
-580:2313
-580:2314
-580:2315
-580:2345
-580:2346
-581:2318
-581:2319
-581:2320
-581:2321
-581:2322
-581:2323
-581:2324
-581:2325
-581:2326
-581:2327
-581:2328
-581:2329
-581:2330
-581:2331
-581:2332
-581:2333
-581:2334
-581:2335
-581:2336
-581:2337
-581:2339
-581:2340
-581:2341
-582:2342
-582:2343
-582:2344
-584:2348
-584:2349
-584:2350
-584:2351
-584:2352
-584:2353
-584:2354
-584:2361
-584:2362
-584:2363
-584:2364
-584:2365
-585:2367
-585:2368
-585:2369
-589:2373
-589:2374
-589:2375
-591:2040
-591:2041
-591:2042
-591:2056
-591:2057
-591:2058
-591:2180
-591:2181
-591:2182
-591:2183
-591:2184
-591:2185
-591:2186
-591:2187
-591:2188
-591:2189
-591:2190
-591:2191
-592:2051
-593:2052
-594:2053
-597:2045
-597:2059
-597:2060
-597:2061
-597:2062
-597:2063
-599:2065
-600:2066
-601:2067
-602:2068
-603:2069
-604:2070
-605:2071
-606:2072
-607:2073
-608:2074
-609:2075
-610:2076
-611:2077
-613:2046
-613:2081
-613:2082
-613:2083
-613:2084
-613:2085
-615:2087
-616:2088
-617:2089
-618:2090
-619:2091
-620:2092
-621:2093
-622:2094
-623:2095
-624:2096
-625:2097
-626:2098
-627:2099
-628:2100
-629:2101
-630:2102
-631:2103
-632:2104
-633:2105
-634:2106
-635:2107
-636:2108
-637:2109
-640:2047
-640:2113
-640:2114
-640:2115
-640:2116
-640:2117
-642:2119
-643:2120
-644:2121
-645:2122
-646:2123
-647:2124
-648:2125
-649:2126
-651:2048
-651:2049
-651:2130
-651:2131
-651:2132
-651:2133
-651:2134
-651:2135
-651:2136
-651:2137
-651:2138
-651:2139
-651:2140
-651:2141
-651:2142
-651:2143
-653:2145
-654:2146
-655:2147
-656:2148
-657:2149
-660:2153
-660:2154
-660:2155
-660:2156
-662:2158
-663:2159
-666:2163
-666:2164
-666:2165
-666:2166
-666:2167
-666:2168
-666:2169
-666:2176
-666:2177
-667:2170
-667:2171
-669:2173
-670:2174
-679:2380
-679:2384
-679:2392
-679:2457
-679:2461
-679:2462
-680:2381
-680:2388
-681:2382
-681:2389
-682:2383
-684:2393
-684:2394
-684:2395
-684:2396
-684:2453
-684:2454
-685:2398
-685:2399
-685:2400
-685:2401
-685:2402
-685:2403
-685:2404
-685:2405
-685:2406
-685:2408
-685:2409
-685:2410
-685:2411
-685:2412
-685:2413
-685:2414
-685:2426
-685:2427
-685:2428
-685:2429
-685:2430
-685:2432
-685:2433
-685:2434
-685:2435
-685:2436
-685:2437
-685:2438
-685:2439
-685:2442
-685:2443
-685:2444
-685:2445
-685:2447
-685:2448
-685:2449
-686:2440
-686:2441
-688:2450
-688:2451
-688:2452
-690:2455
-690:2456
-693:2458
-693:2459
-693:2460
-695:2556
-695:2561
-695:2738
-695:2742
-695:2743
-696:2559
-696:2562
-696:2563
-696:2564
-696:2565
-696:2566
-696:2567
-696:2568
-696:2570
-696:2571
-696:2572
-696:2573
-696:2574
-696:2575
-696:2576
-696:2577
-696:2578
-696:2585
-696:2586
-696:2587
-696:2588
-696:2589
-696:2591
-696:2592
-696:2593
-696:2594
-696:2595
-696:2596
-696:2598
-696:2599
-696:2600
-696:2601
-696:2602
-696:2603
-696:2604
-696:2613
-696:2614
-696:2615
-696:2616
-696:2617
-696:2620
-696:2621
-696:2622
-696:2623
-696:2624
-696:2625
-696:2633
-696:2634
-696:2635
-696:2636
-696:2637
-696:2736
-696:2737
-697:2639
-697:2640
-697:2641
-697:2642
-697:2643
-697:2644
-697:2645
-697:2646
-697:2647
-697:2648
-697:2649
-697:2650
-697:2652
-697:2653
-698:2654
-698:2655
-698:2656
-698:2657
-698:2711
-698:2712
-699:2659
-699:2660
-699:2661
-699:2662
-699:2663
-699:2664
-699:2665
-699:2666
-699:2667
-699:2669
-699:2670
-699:2671
-699:2672
-699:2673
-699:2674
-699:2675
-699:2687
-699:2688
-699:2689
-699:2690
-699:2691
-699:2693
-699:2694
-699:2695
-699:2696
-699:2697
-699:2698
-699:2699
-699:2700
-699:2701
-699:2702
-699:2703
-699:2705
-699:2706
-699:2707
-700:2708
-700:2709
-700:2710
-702:2714
-702:2715
-702:2716
-702:2717
-702:2718
-702:2719
-702:2720
-702:2727
-702:2728
-702:2729
-702:2730
-702:2731
-703:2733
-703:2734
-703:2735
-707:2739
-707:2740
-707:2741
-710:2464
-710:2465
-710:2477
-710:2478
-710:2479
-710:2538
-710:2544
-710:2545
-710:2546
-710:2547
-710:2548
-710:2549
-710:2553
-710:2554
-711:2474
-714:2468
-714:2480
-714:2481
-714:2482
-714:2483
-714:2484
-715:2469
-715:2487
-715:2488
-715:2489
-715:2490
-715:2491
-717:2493
-718:2494
-719:2495
-720:2496
-721:2497
-722:2498
-723:2499
-724:2500
-726:2470
-726:2539
-726:2540
-726:2541
-726:2542
-726:2543
-727:2471
-727:2472
-727:2504
-727:2505
-727:2506
-727:2507
-727:2508
-727:2509
-727:2510
-727:2511
-727:2512
-727:2513
-727:2514
-727:2515
-727:2516
-727:2517
-728:2520
-728:2521
-728:2522
-728:2523
-729:2526
-729:2527
-729:2528
-729:2529
-729:2530
-729:2531
-729:2532
-729:2533
-729:2534
-729:2535
-732:2550
-732:2551
-732:2552
-*E
diff --git a/src/org/antlr/tool/TreeToNFAConverterTokenTypes.java b/src/org/antlr/tool/TreeToNFAConverterTokenTypes.java
deleted file mode 100644
index 893f235..0000000
--- a/src/org/antlr/tool/TreeToNFAConverterTokenTypes.java
+++ /dev/null
@@ -1,131 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): "buildnfa.g" -> "TreeToNFAConverter.java"$
-
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.tool;
-import java.util.*;
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-
-public interface TreeToNFAConverterTokenTypes {
-	int EOF = 1;
-	int NULL_TREE_LOOKAHEAD = 3;
-	int OPTIONS = 4;
-	int TOKENS = 5;
-	int PARSER = 6;
-	int LEXER = 7;
-	int RULE = 8;
-	int BLOCK = 9;
-	int OPTIONAL = 10;
-	int CLOSURE = 11;
-	int POSITIVE_CLOSURE = 12;
-	int SYNPRED = 13;
-	int RANGE = 14;
-	int CHAR_RANGE = 15;
-	int EPSILON = 16;
-	int ALT = 17;
-	int EOR = 18;
-	int EOB = 19;
-	int EOA = 20;
-	int ID = 21;
-	int ARG = 22;
-	int ARGLIST = 23;
-	int RET = 24;
-	int LEXER_GRAMMAR = 25;
-	int PARSER_GRAMMAR = 26;
-	int TREE_GRAMMAR = 27;
-	int COMBINED_GRAMMAR = 28;
-	int INITACTION = 29;
-	int LABEL = 30;
-	int TEMPLATE = 31;
-	int SCOPE = 32;
-	int GATED_SEMPRED = 33;
-	int SYN_SEMPRED = 34;
-	int BACKTRACK_SEMPRED = 35;
-	int FRAGMENT = 36;
-	int ACTION = 37;
-	int DOC_COMMENT = 38;
-	int SEMI = 39;
-	int LITERAL_lexer = 40;
-	int LITERAL_tree = 41;
-	int LITERAL_grammar = 42;
-	int AMPERSAND = 43;
-	int COLON = 44;
-	int RCURLY = 45;
-	int ASSIGN = 46;
-	int STRING_LITERAL = 47;
-	int CHAR_LITERAL = 48;
-	int INT = 49;
-	int STAR = 50;
-	int TOKEN_REF = 51;
-	int LITERAL_protected = 52;
-	int LITERAL_public = 53;
-	int LITERAL_private = 54;
-	int BANG = 55;
-	int ARG_ACTION = 56;
-	int LITERAL_returns = 57;
-	int LITERAL_throws = 58;
-	int COMMA = 59;
-	int LPAREN = 60;
-	int OR = 61;
-	int RPAREN = 62;
-	int LITERAL_catch = 63;
-	int LITERAL_finally = 64;
-	int PLUS_ASSIGN = 65;
-	int SEMPRED = 66;
-	int IMPLIES = 67;
-	int ROOT = 68;
-	int RULE_REF = 69;
-	int NOT = 70;
-	int TREE_BEGIN = 71;
-	int QUESTION = 72;
-	int PLUS = 73;
-	int WILDCARD = 74;
-	int REWRITE = 75;
-	int DOLLAR = 76;
-	int DOUBLE_QUOTE_STRING_LITERAL = 77;
-	int DOUBLE_ANGLE_STRING_LITERAL = 78;
-	int WS = 79;
-	int COMMENT = 80;
-	int SL_COMMENT = 81;
-	int ML_COMMENT = 82;
-	int OPEN_ELEMENT_OPTION = 83;
-	int CLOSE_ELEMENT_OPTION = 84;
-	int ESC = 85;
-	int DIGIT = 86;
-	int XDIGIT = 87;
-	int NESTED_ARG_ACTION = 88;
-	int NESTED_ACTION = 89;
-	int ACTION_CHAR_LITERAL = 90;
-	int ACTION_STRING_LITERAL = 91;
-	int ACTION_ESC = 92;
-	int WS_LOOP = 93;
-	int INTERNAL_RULE_REF = 94;
-	int WS_OPT = 95;
-	int SRC = 96;
-}
diff --git a/src/org/antlr/tool/TreeToNFAConverterTokenTypes.txt b/src/org/antlr/tool/TreeToNFAConverterTokenTypes.txt
deleted file mode 100644
index f799624..0000000
--- a/src/org/antlr/tool/TreeToNFAConverterTokenTypes.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-// $ANTLR 2.7.7 (2006-01-29): buildnfa.g -> TreeToNFAConverterTokenTypes.txt$
-TreeToNFAConverter    // output token vocab name
-OPTIONS="options"=4
-TOKENS="tokens"=5
-PARSER="parser"=6
-LEXER=7
-RULE=8
-BLOCK=9
-OPTIONAL=10
-CLOSURE=11
-POSITIVE_CLOSURE=12
-SYNPRED=13
-RANGE=14
-CHAR_RANGE=15
-EPSILON=16
-ALT=17
-EOR=18
-EOB=19
-EOA=20
-ID=21
-ARG=22
-ARGLIST=23
-RET=24
-LEXER_GRAMMAR=25
-PARSER_GRAMMAR=26
-TREE_GRAMMAR=27
-COMBINED_GRAMMAR=28
-INITACTION=29
-LABEL=30
-TEMPLATE=31
-SCOPE="scope"=32
-GATED_SEMPRED=33
-SYN_SEMPRED=34
-BACKTRACK_SEMPRED=35
-FRAGMENT="fragment"=36
-ACTION=37
-DOC_COMMENT=38
-SEMI=39
-LITERAL_lexer="lexer"=40
-LITERAL_tree="tree"=41
-LITERAL_grammar="grammar"=42
-AMPERSAND=43
-COLON=44
-RCURLY=45
-ASSIGN=46
-STRING_LITERAL=47
-CHAR_LITERAL=48
-INT=49
-STAR=50
-TOKEN_REF=51
-LITERAL_protected="protected"=52
-LITERAL_public="public"=53
-LITERAL_private="private"=54
-BANG=55
-ARG_ACTION=56
-LITERAL_returns="returns"=57
-LITERAL_throws="throws"=58
-COMMA=59
-LPAREN=60
-OR=61
-RPAREN=62
-LITERAL_catch="catch"=63
-LITERAL_finally="finally"=64
-PLUS_ASSIGN=65
-SEMPRED=66
-IMPLIES=67
-ROOT=68
-RULE_REF=69
-NOT=70
-TREE_BEGIN=71
-QUESTION=72
-PLUS=73
-WILDCARD=74
-REWRITE=75
-DOLLAR=76
-DOUBLE_QUOTE_STRING_LITERAL=77
-DOUBLE_ANGLE_STRING_LITERAL=78
-WS=79
-COMMENT=80
-SL_COMMENT=81
-ML_COMMENT=82
-OPEN_ELEMENT_OPTION=83
-CLOSE_ELEMENT_OPTION=84
-ESC=85
-DIGIT=86
-XDIGIT=87
-NESTED_ARG_ACTION=88
-NESTED_ACTION=89
-ACTION_CHAR_LITERAL=90
-ACTION_STRING_LITERAL=91
-ACTION_ESC=92
-WS_LOOP=93
-INTERNAL_RULE_REF=94
-WS_OPT=95
-SRC=96
diff --git a/README.txt b/tool/CHANGES.txt
similarity index 55%
rename from README.txt
rename to tool/CHANGES.txt
index a88c0f9..928b96c 100644
--- a/README.txt
+++ b/tool/CHANGES.txt
@@ -1,133 +1,1144 @@
-Early Access ANTLR v3
-ANTLR 3.0.1
-August 13, 2007
+ANTLR 3.2 Release
+Sep 21, 2009
 
 Terence Parr, parrt at cs usfca edu
 ANTLR project lead and supreme dictator for life
 University of San Francisco
 
-INTRODUCTION 
+CHANGES
 
-[Java, C, Python, C# targets are available; others coming soon]
+September 22, 2009 -- ANTLR v3.2
+
+September 21, 2009 [Jim Idle]
+
+* Added new options for tool invocation to control the points at which the code
+  generator tells the target code to use its equivalent of switch() instead of
+  inline ifs.
+      -Xmaxswitchcaselabels m don't generate switch() statements for dfas
+                              bigger  than m [300]
+      -Xminswitchalts m       don't generate switch() statements for dfas smaller
+                              than m [3]
+* Upgraded -X help output to include new optins and provide the default
+  settings, as well as provide units for those settings that need them.
+
+* Change the C Target to overide the deafults for the new settings to 
+  generate the most optimizable C code from the modern C compiler point of
+  view. This is essentially to always use swtich statements unless there
+  is absolutely no other option. C defaults are to use 1 for minimum and
+  3000 for maximum number of alts that trigger switch(). This results in
+  object code that is 30% smaller and up to 20% faster.
+ 
+April 23, 2009
 
-Welcome to ANTLR v3!  I've been working on this for nearly 4 years and it's
-finally ready!  I have lots of features to add later, but this will be
-the first set.
+* Added reset to TreeNodeStream interface.
 
-You should use v3 in conjunction with ANTLRWorks:
+April 22, 2009
 
-    http://www.antlr.org/works/index.html 
+* Fixed ANTLR-374.  Was caused by moved of grammars. %foo() stuff didn't work
 
-The book will also help you a great deal (printed May 15, 2007); you
-can also buy the PDF:
+April 9, 2009
 
-http://www.pragmaticprogrammer.com/titles/tpantlr/index.html
+* .g and .g3 file extensions work again.
+* introduced bug in 3.1.3: gives exception not error msg upon
+  missing .g file
 
-See the getting started document:
+March 26, 2009
 
-http://www.antlr.org/wiki/display/ANTLR3/FAQ+-+Getting+Started
+* Made ctor in TreeRewriter and TreeFilter call this not super.
 
-You also have the examples plus the source to guide you.
+March 21, 2009
 
-See the new wiki FAQ:
+* Added ctor to RecognizerSharedState to allow cloning it.
 
-    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+March 17, 2009 -- ANTLR v3.1.3
 
-and general doc root:
+* improved ANTLRv3.g to handle <...> element options
 
-    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+3+Wiki+Home
+March 15, 2009
 
-Please help add/update FAQ entries.
+* Fixed ANTLR-389. Strip didn't ignore options in subrules; also seemed
+  to demand stdin.
 
-If all else fails, you can buy support or ask the antlr-interest list:
+March 15, 2009
 
-    http://www.antlr.org/support.html
+* ANTLR always sorts incoming grammar list by dependency.  For example,
+  If W.g depends on tokens from P.g then P.g is done first even if
+  W.g mentioned first on command line.  It does not ignore any files you
+  specify the commandline.  If you do *.g and that includes some
+  imported grammars, it will run antlr on them.
 
-I have made very little effort at this point to deal well with
-erroneous input (e.g., bad syntax might make ANTLR crash).  I will clean
-this up after I've rewritten v3 in v3.  v3 is written in v2 at the moment.
+* -make option prevents ANTLR from running on P.g if P older than
+  generated files.
 
-Per the license in LICENSE.txt, this software is not guaranteed to
-work and might even destroy all life on this planet:
+* Added org.antlr.tool.GrammarSpelunker to build a faster dependency
+  checker (what grammars depend on etc...).  Totally independent of any
+  ANTLR code; easy to pull into other tools.
 
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
+* Added org.antlr.misc.Graph, a general graph with nodes
+  containing an Object payload. It knows how to do a topological sort
+  on the nodes.
 
-----------------------------------------------------------------------
+March 10, 2009
 
-EXAMPLES
+* Added associativity token option to support upcoming special expression
+  parsing. Added rule option strategy=precedence also
 
-ANTLR v3 sample grammars (currently for C, C#, Java targets):
+March 1, 2009
 
-    http://www.antlr.org/download/examples-v3.tar.gz
+* Changed ANTLRWorks debug port from 49153 to 49100.  Apparently we change the port in
+  ANTLRWorks to 49100 in 1.2 but forgot to do so in the ANTLR targets.
 
-contains the following examples: LL-star, cminus, dynamic-scope,
-fuzzy, hoistedPredicates, island-grammar, java, python, scopes,
-simplecTreeParser, treeparser, tweak, xmlLexer.
+START CHANGES FOR TREE FILTER MODE (pulled from dev branch)
 
-Also check out Mantra Programming Language for a prototype (work in
-progress) using v3:
+This feature will be announced in 3.2, but I am integrating from my development branch now into the mainline so target developers have a chance to implement. We might release 3.1.3 bug fix release before 3.2.
 
-    http://www.linguamantra.org/
+* CommonTreeNodeStream -> BufferedTreeNodeStream.  Now,
+  CommonTreeNodeStream is completely unbuffered unless you are
+  backtracking.  No longer making a list of all nodes before tree parsing.
 
-----------------------------------------------------------------------
+* Added tree grammar filter=true mode.  
 
-What is ANTLR?
+  Altered templates:
+	Java.stg: added filterMode to genericParser and treeParser.
+	This required a change to ANTLRCore.sti
+	Defined a default parameter in treeParser to set the superclass
+	to TreeFilter for tree grammar with filter=true. It sets
+	superclass to TreeRewriter if filter=true and output=AST.
+  Other them that, I only had to change ANTLR itself a little bit.
+  Made filter mode valid for tree grammars and have it automatically set
+  the necessary elements: @synpredgate, backtrack=true, rewrite=true
+  (if output=AST).  Added error message for detecting conflicting
+  options.
 
-ANTLR stands for (AN)other (T)ool for (L)anguage (R)ecognition and was
-originally known as PCCTS.  ANTLR is a language tool that provides a
-framework for constructing recognizers, compilers, and translators
-from grammatical descriptions containing actions.  Target language list:
+* Added misc.FastQueue and TestFastQueue:
+  A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
+  A linked list is fast at dequeue but slow at get(i).  An array is
+  the reverse.  This is O(1) for both operations.
 
-http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
+* Added tree.TreeIterator, a generator that walks a doubly linked tree.
+  The nodes must know what index they are. It's an Iterator but
+  remove() is not supported. Returns navigation nodes always:
+  UP, DOWN, EOF.
 
-----------------------------------------------------------------------
+* Added misc.LookaheadStream: A lookahead queue that knows how
+  to mark/release locations in the buffer for backtracking purposes.
+  I hope to use for both tree nodes and tokens.  Just implement
+  nextElement() to say how to get next node or token.
 
-How is ANTLR v3 different than ANTLR v2?
+END CHANGES FOR TREE FILTER MODE
 
-See "What is the difference between ANTLR v2 and v3?"
+February 23, 2009 -- ANTLR v3.1.2
 
-    http://www.antlr.org/wiki/pages/viewpage.action?pageId=719
+February 18, 2009
 
-See migration guide:
+* Added org.antlr.tool.Strip (reads from file arg or stdin, emits to stdout)
+  to strip actions from a grammar.
 
-    http://www.antlr.org/wiki/display/ANTLR3/Migrating+from+ANTLR+2+to+ANTLR+3
+February 4, 2009
 
-----------------------------------------------------------------------
+* Added CommonTree.setUnknownTokenBoundaries().  Sometimes we build trees
+  in a grammar and some of the token boundaries are not set properly.
+  This only matters if you want to print out the original text associated
+  with a subtree.  Check this out rule:
 
-How do I install this damn thing?
+	postfixExpression
+	    :   primary ('.'^ ID)*
+	    ;
 
-Just untar and you'll get:
+  For a.b.c, we get a '.' that does not have the token boundaries set.
+  ANTLR only sets token boundaries for subtrees returned from a rule.
+  SO, the overall '.' operator has the token boundaries set from 'a'
+  to 'c' tokens, but the lower '.' subtree does not get the boundaries
+  set (they are -1,-1).  Calling setUnknownTokenBoundaries() on
+  the returned tree sets the boundaries appropriately according to the
+  children's token boundaries.
 
-antlr-3.0.1/README.txt (this file)
-antlr-3.0.1/LICENSE.txt
-antlr-3.0.1/src/org/antlr/...
-antlr-3.0.1/lib/stringtemplate-3.0.jar (3.0.1 needs 3.0)
-antlr-3.0.1/lib/antlr-2.7.7.jar
-antlr-3.0.1/lib/antlr-3.0.1.jar
+January 22, 2009
 
-Then you need to add all the jars in lib to your CLASSPATH.
+* fixed to be listeners.add(listener); in addListener() of DebugEventHub.java
 
-Please see the FAQ
+January 20, 2009
 
-http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+* Removed runtime method: mismatch in BaseRecognizer and TreeParser.  Seems
+  to be unused.  Had to override method recoverFromMismatchedToken() in
+  TreeParser to get rid of single token insertion and deletion for
+  tree parsing because it makes no sense with all of the up-and-down nodes.
 
-----------------------------------------------------------------------
+* Changed JIRA port number from 8888 to no port spec (aka port 80) and all
+  refs to it in this file.
 
-CHANGES
+* Changed BaseTree to Tree typecase in getChild and toStringTree() and
+  deleteChild() to make more generic.
 
-INCOMPATIBILITY WARNING -- templates have changed; must regen output from
-                           grammars.  Runtime libraries have also changed.
-                           Debug event listener interface has changed also.
+December 16, 2008
+
+* Added -verbose cmd-line option and turned off standard header
+  and list of read files.  Silent now without -verbose.
+
+November 24, 2008
+
+* null-ptr protected getParent and a few others.
+
+* Added new ctor to CommonTreeNodeStream for walking subtrees.  Avoids
+  having to make new serialized stream as it can reuse overall node stream
+  buffer.
+
+November 20, 2008
+
+* Updated BaseTest to isolate tests better.
+
+November 17, 2008
+
+* BaseTreeAdaptor.getType() was hosed; always gave 0.  Thanks to Sam Harwell.
+
+November 8, 2008
+
+* Added methods to BaseRecognizer:
+  public void setBacktrackingLevel(int n) { state.backtracking = n; }
+  /** Return whether or not a backtracking attempt failed. */
+  public boolean failed() { return state.failed; }
+
+November 5, 2008
+
+* Tweaked traceIn/Out to say "fail/succeeded"
+
+* Bug in code gen for tree grammar wildcard list label x+=.
+
+* Use of backtrack=true anywhere in grammar causes backtracking sensitive
+  code to be generated.  Actions are gated etc...  Previously, that only
+  happened when a syntactic predicate appeared in a DFA.  But, we need
+  to gate actions when backtracking option is set even if no decision
+  is generated to support filtering of trees.
+
+October 25, 2008
+
+* Fixed debug event socket protocol to allow spaces in filenames.
+
+* Added TreeVisitor and TreeVisitorAction to org.antlr.runtime.tree.
+
+October 22, 2008
+
+* Added inContext() to TreeParser.  Very useful for predicating
+  tree grammar productions according to context (their parent list).
+  Added new TestTreeContext unit tests (15).
+
+    /** Check if current node in input has a context.  Context means sequence
+     *  of nodes towards root of tree.  For example, you might say context
+     *  is "MULT" which means my parent must be MULT.  "CLASS VARDEF" says
+     *  current node must be child of a VARDEF and whose parent is a CLASS node.
+     *  You can use "..." to mean zero-or-more nodes.  "METHOD ... VARDEF"
+     *  means my parent is VARDEF and somewhere above that is a METHOD node.
+     *  The first node in the context is not necessarily the root.  The context
+     *  matcher stops matching and returns true when it runs out of context.
+     *  There is no way to force the first node to be the root. 
+     */
+    public boolean inContext(String context) {...}
+
+* Added 3 methods to Tree interface [BREAKS BACKWARD COMPATIBILITY]
+
+    /** Is there is a node above with token type ttype? */
+    public boolean hasAncestor(int ttype);
+
+    /** Walk upwards and get first ancestor with this token type. */
+    public Tree getAncestor(int ttype);
+
+    /** Return a list of all ancestors of this node.  The first node of
+     *  list is the root and the last is the parent of this node.
+     */
+    public List getAncestors();
+
+October 21, 2008
+
+* Updated unit tests to be correct for \uFFFE->\uFFFF change
+
+* Made . in tree grammar look like ^(. .*) to analysis, though ^(. foo)
+  is illegal (can't have . at root). Wildcard is subtree or node.
+  Fixed bugs:
+    http://www.antlr.org/browse/ANTLR-248
+    http://www.antlr.org/browse/ANTLR-344
+
+October 1, 2008 -- ANTLR v3.1.1
+
+September 8, 2008
+
+* Labels on tokens, rules carry into synpreds now so semantic predicates work.
+  This didn't work since labels were stripped in the synpred and they weren't
+  defined in the generated method.
+
+  a : x=A z=a {$x.text.equals($z.text)}? A
+    | y=A a A A 
+    ;
+
+September 3, 2008
+
+* Made a REV static variable in Tool so that we can change the rev for
+  daily builds.
+
+* Made \uFFFF a valid character. Token types are 32-bit clean using -1
+  not 0x0000FFFF as -1 so it should be okay.  Label.java:
+    public static final int MIN_CHAR_VALUE = '\u0000';
+    public static final int MAX_CHAR_VALUE = '\uFFFF';
+
+August 30, 2008
+
+* Changed messages in en.stg so that TOKEN_NONDETERMINISM correctly
+  indicates when actions hid semantic predicates.
+
+August 15, 2008
+
+* Tweaked build properties and build.xml
+
+August 13, 2008
+
+* Fixed ANTLR-314; 3.1 introduced a problem with list labels +=
+
+August 12, 2008 -- ANTLR v3.1
+
+* Added JavaScript target
+
+August 7, 2008
+
+* an NFA target of EOF predicate transition in DFA cause an exception in
+  getPredicatesPerNonDeterministicAlt().
+
+* Kay Roepke found a nasty bug when debugging AST-constructing
+  composite recognizers.  If the input state was null to the constructor,
+  super class constructor created a new parser state object.
+  Later, though we passed the argument state not this.state
+  to the delegate constructors, forcing them to share a different
+  state objects!  Changed state to this.state in Dbg.stg constructors.
+
+* Ack. messed up debug/AST.  Have to set proxy's tree adaptor; it's
+  a circular ref.  Just an ASTDbg.stg change.
+
+August 4, 2008
+
+* superClass works now for lexers
+
+* Made Grammar.defineNamedAction propogate header actions down to all
+  delegates if root grammar; regardless of lexer/parser scope.
+
+* Rejiggered AST templates to propogate changes to tree adaptor
+  for delegate grammars. Fixes ANTLR-302
+
+August 4, 2008
+
+* FOLLOW set computations altered constant FOLLOW bit sets.
+
+* Added (...) are all predicate evaluations.
+
+* Extra init code for tree parser nonrewrite mode removed.
+
+* Added empty child list check in becomeRoot
+
+August 3, 2008
+
+* Was using RuleReturnScope not Rulename_return for list labels in tree
+  parser.
+
+* Didn't set _last in tree parser for rule ref track stuff (rewrite=true)
+
+August 2, 2008
+ 
+* Benjamin found another rewrite engine bug.
+
+July 30, 2008
+
+* CommonTreeNodeStream / CommonTokenStream did not reset properly.
+
+July 29, 2008
+
+* Fixed another bug in TokenRewriteStream; didn't like inserts after end.
+
+July 28, 2008
+
+* Fixed bug in TokenRewriteStream.toString(start,stop); it ignored
+  parameters. ;)
+
+July 17, 2008
+
+* allow qualified type names in hetero <...> options like T<a.b.c.Node>
+
+July 5, 2008
+
+* treeLevel not set for setBlock alts; added unit test
+
+July 3, 2008
+
+* Fixed ANTLR-267. parse tree added nodes during backtracking and
+  cyclic DFAs.  tracks hidden tokens too now. Added toInputString() to
+  get text back including hidden tokens.  Shows <epsilon> for rules
+  that match nothing.
+
+June 26, 2008
+
+* Added gParent ptr that points to immediate parent grammar. E.g.,
+    // delegators
+    public MParser gM; 
+    public M_S gS;
+    public M_S gParent = gS; // NEW
+
+* Grammar imports didn't set all the delegate pointers...lots of imported
+  grammars would cause a null ptr exception.  Fixes ANTLR-292.
+
+June 25, 2008
+
+* List labels in tree construction didn't always track the tree; sometimes
+  had a rule result structure.
+
+June 4, 2008
+
+* Improved unit testing so that each test suite executes and builds grammars
+  in a separate temporary directory. This means they can execute concurrently.
+  Also seem to be a problem with my class path during execution. Moved
+  tmpdir for ahead of standard CLASSPATH.
+
+* By virtue of an improvement to StringTemplate, output newlines 
+  in generated files should be normalized to whatever your host uses.
+
+June 3, 2008
+
+* Restrict legality of grammar options; for example you cannot use output option
+  in lexer anymore.
+
+June 2, 2008
+
+* Throw illegal arg exception upon invalid TokenRewriteStream ops. Rewrote
+  core of engine.  Slightly different operation.  Added many more unit tests.
+
+3.1b1 - May 20, 2008
+
+May 11, 2008
+
+* rewrite=true, output=AST for tree grammar was not working.  Altered trees were not
+  propagated back up the rule reference chain.  Required a number of mods to
+  ASTTreeParser.stg.  Added unit tests.
+
+May 10, 2008
+
+* [BACKWARD INCOMPATIBLE if you override match()]
+  I had turned off single token insertion and deletion because I could not figure
+  out how to work with trees and actions. Figure that out and so I turned it back on.
+  match() returns Object matched now (parser, tree parser) so we can set labels
+  on token refs properly after single token ins/del error recovery.  Allows actions
+  and tree construction to proceed normally even though we recover in the middle of
+  an alternative.  Added methods for conjuring up missing symbols: getMissingSymbol().
+
+* refactored BaseRecognizer error handling routines
+
+* Single token error recovery was not properly taking into consideration EOF.
+
+* ANTLR no longer tries to recover in tree parsers inline using single node deletion or insertion; throw exception.  Trees should be well formed as they are not created by users.
+
+* Added empty constructors to the exception classes that did not have them so that ANTLRWorks can create the exceptions.
+
+* Made debug tree adaptor deal with tokens conjured up during error recovery.
+
+* Removed extra location() debug element that was emitted.
+
+May 8, 2008
+
+* ANTLR didn't update line/col to DFA map for AW.
+
+May 6-7, 2008
+
+* Insufficiently covered (with semantic predicates) alt warnings are now emitted before
+  nondeterminisms so it's clear the nondeterminism is a result of insufficient preds.
+
+* Improved insufficiently covered alt warnings from:
+    warning(203): T.g:2:3: The following alternatives are insufficiently covered with predicates: 1
+  to:
+    warning(203): T.g:2:3: Input B is insufficiently covered with predicates at loca
+tions: alt 1: line 3:15, alt 2: line 2:9
+
+* Improved nondeterminism warning to have:
+  Semantic predicates were present but were hidden by actions.
+parser grammar U;
+a : (A B)? ;
+b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ;
+
+To create the prediction DFA for the optional sub rule in 'a', ANTLR must find all references to 'a' to determine what can follow. A B can follow 'a' in the first two alts rule 'b'.   To resolve the conflict between matching A B immediately in the sub rule and exiting rule 'a' to match it in 'b', ANTLR looks for predicates. In this case, there are two predicates that indicate the semantic context in which the surrounding alternatives are valid. The problem is that one of the predicates i [...]
+
+ANTLR Parser Generator  Version 3.1b1 (??)  1989-2007
+warning(203): U.g:2:5: Input such as "A B" is insufficiently covered with predicates at locations: alt 2: line 3:38 at B
+Semantic predicates were present but were hidden by actions.
+warning(200): U.g:2:5: Decision can match input such as "A B" using multiple alternatives: 1, 2
+As a result, alternative(s) 2 were disabled for that input
+Semantic predicates were present but were hidden by actions.
+
+* Fixed issue where 
+r41
+   : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';'
+   ;
+still warned about $r41 being ambig.
+
+* actions are now added to the NFA.
+
+* Fixed ANTLR-222.  ANTLR now ignores preds after actions.
+
+May 5, 2008
+
+* Fixed ANTLR-235 by backing out a change from 12-31-07.
+
+* Fixed ANTLR-249; I include semantic context again in closure busy signal.
+
+May 3, 2008
+
+* Fixed ANTLR-208.  Looks in library or in -o output path.  antlr -o foo T.g U.g where U needs T.tokens won't work unless we look in foo too.  fixed.
+
+* Refactored assign.types.g to move methods to a class called AssignTokenTypesBehavior.
+
+* Fixed ANTLR-207.  Lexers importing vocabs didn't see ';'=4 type aliases in .tokens.
+
+* Fixed ANTLR-228.  Couldn't use wildcard in alts with AST rewrites.
+
+May 2, 2008
+
+* Fixed ANTLR-230; can use \' now in action.
+
+* Scope attributes no longer have a stack depth check on front.  If you ref $r::a when r has not invoked you, then you get an exception not a default value.  Back to the way 3.0.1 worked.
+
+* $channel was a global variable in 3.0.1 unlike $type which did not affect an invoking lexer rule.  Now it's local too.  Only $type and $channel are ever set with regularity.  Setting those should not affect an invoking lexer rule as in the following should work:
+
+  X : ID WS? '=' ID ;  // result is X on normal channel
+  WS : ' '+ {$channel = HIDDEN; } ;
+
+  STRING : '"' (ESC|.)* '"' ;  // result is STRING not ESC
+
+  FLOAT : INT '.' INT? ; // should be FLOAT
+  INT : Digit+ ;
+  fragment
+  Digit : '0'..'9' ;
+
+* Fixed bug in interpreter regarding (...)* loops
+
+May 1, 2008
+
+* Fixed ANTLR-202.  These now give warnings about ambig ref to $a.
+    a : ID a -> $a | INT ;
+  and
+    a : A a {$a.text} | B ;
+
+April 30, 2008
+
+* Fixed ANTLR-237. updated -depend to know about imported grammars.
+$ java org.antlr.Tool -depend -lib foo T.g
+  ANTLR Parser Generator  Version 3.1b1 (??)  1989-2007
+  T.g: foo/Java.g
+  TParser.java : T.g
+  T.tokens : T.g
+  TLexer.java : T.g
+  T_Java : T.g
+
+April 29, 2008
+
+* Fixed ANTLR-217; scope A,B,C; didn't work
+
+* Fixed ANTLR-224; ! or ^ on item in alt with rewrite gave exception
+
+* Added token options to terminals: ID<node=V; foo="Big bob"> etc...
+  node is default so you can do ID<V> for hetero tree types. most common.
+
+April 17, 2008
+
+* Use default msg if unknown recog type in getErrorMessage():
+	String msg = e.getMessage();
+
+April 14, 2008
+
+* %x.y = foo; was not working in @members section
+
+March 29, 2008
+
+* Import couldn't handle A imports B imports C.
+
+March 27, 2008
+
+* Added get/setInputStream to Token interface and affected classes.
+
+February 26, 2008
+
+* made fillBuffer public in CommonTreeNodeStream so we can add trees
+  to stream for interactive interpreters.
+
+February 14, 2008
+
+* Fixed a bug in the code generation where tree level 0 was used
+  no matter what to rewrite trees in tree grammars. added unit test
+
+* Fixed ANTLR-221. exceptions were generated when using
+  AST construction operators and no output=AST option.
+
+February 13, 2008
+
+* Improved error msgs for unreachable alts and tokens.
+
+February 11-12, 2008
+
+* Fixed ANTLR-219.
+  It looks like the AST construction code for sets was totally messed up.
+  This was for not only the new tree parser AST construction, but also
+  the regular tree construction for parsers. I had to introduce templates
+  in the ASTTreeParser.stg file to deal with this. added unit tests:
+  TestTreeGrammarRewriteAST.testSetMatchNoRewrite(),
+  testSetMatchNoRewriteLevel2(), testSetMatchNoRewriteLevel2Root().
+  Had to add template matchRuleBlockSet()
+  to differentiate between a regular set in one that is an entire rule.
+  If it is an entire rule, it has to set the return value, retval.tree.
+
+* Fixed ANTLR-220.
+  Made TreeAdaptor dupNode and dupTree events emit debugging events
+  so AW could see tree node duplications.
+
+February 4, 2008
+
+* BACKWARD INCOMPATIBILITY
+  Added getSourceName to IntStream and TokenSource interfaces and also the
+  BaseRecognizer.  Have to know where char come from for error messages.
+  Widespread change, but a trivial one. 
+
+January 17, 2008
+
+* Interpreter throws FailedPredicateException now when it sees a predicate;
+  before it was silently failing.  I'll make it work one of these days. ;)
+
+January 12, 2008
+
+* Copy ctor not copying start and stop for common token. Fixes ANTLR-212
+
+* Removed single token insertion and deletion for tokens, sets.
+  Required a change to the code generation for matchSet() template
+  and a tweak inside the BaseRecognizer.  To engage this again is easy,
+  just override mismatch() to call mismatchRecover(). I changed it to simply
+  throw an exception.
+
+* Added syntaxError recognizer state var so you can easily tell if
+  a recognizer failed.  Added getNumberOfSyntaxErrors() to recognizers.
+
+* Added doc for the error node stuff:
+  http://www.antlr.org/wiki/display/ANTLR3/Tree+construction
+
+* Fixed ANTLR-193
+
+* Added recognizer methods to answer questions about current mismatched
+  token error.  Useful now since i don't automatically recover inline
+  to such errors (I throw exception):
+	mismatchIsUnwantedToken(IntStream input, int ttype) 
+	mismatchIsMissingToken(IntStream input, BitSet follow)
+
+* Added UnwantedTokenException and MissingTokenException to make
+  match() problems more precise in case you want to catch differently.
+  Updated getErrorMessage() to be more precise.  Says:
+
+	line 2:9 missing EQ at '0'
+
+  now instead of
+
+	line 2:9 mismatched input '0' expecting EQ
+
+  Input "x=9 9;" gives
+
+	line 3:8 extraneous input '9' expecting ';'
+
+  When very confused, "x=9 for;", you still get old mismatched message:
+
+	line 3:8 extraneous input 'for' expecting ';'
+	line 3:11 mismatched input ';' expecting '('
+
+* Added unit tests to TestAutoAST and copied to TestRewriteAST with
+  suitable rewrites to ensure basic error node insertion works.
+
+January 11, 2008
+
+* Adding errorNode to TreeAdaptor and various debug
+  events/listeners.  Had to add new class runtime.tree.CommonErrorNode
+  to hold all the goodies: input stream, start/stop objects.
+
+* Tweaked CommonTree.getType() to return INVALID_TOKEN_TYPE
+  instead of literal 0 (same thing at moment though).
+
+* Updated ANTLRWorks to show error nodes in tree as much as I could; Jean
+  will get to rest of it.
+
+January 9-10, 2008
+
+* Continued work on debugging/profiling composite grammars.
+
+* Updated debug protocol for debugging composite grammars.  enter/exit
+  rule needs grammar to know when to flip display in AW.
+
+* Fixed ANTLR-209.  ANTLR consumed 2 not 1 char to recover in lexer.
+
+* Added two faqs instead of making changes to antlr runtime about
+  lexer error handling:
+  http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341230
+  http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341217
+
+January 1-8, 2008
+
+* Making debugging/profiling work with composite grammars.
+
+* Updated ANTLRWorks so it works still for noncomposite grammars.
+
+* two new examples: import and composite-java (the java example grammar
+  broken up into multiple pieces using import).
+
+* Worked on composite grammars.  Had to refactor a lot of code to make
+  ANTLR deal with one meta grammar made up of multiple grammars.  I 
+  thought I had it sort of working back in August.  Yes, but barely. Lots
+  of work to do it seemed.  Lots of clean up work.  Many new unit tests
+  in TestCompositeGrammars.  Had to add new error messages warning about
+  conflicting tokens inherited from multiple grammars etc...
+
+    TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
+      "cannot alias <arg>; string already assigned to <arg2>"
+    TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
+      "cannot alias <arg>; token name already assigned to <arg2>"
+    TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
+      "tokenVocab option ignored in imported grammar <arg>"
+    INVALID_IMPORT(arg,arg2) ::=
+      "<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
+    IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
+      "no lexer rules contributed to <arg> from imported grammar <arg2>"
+    IMPORT_NAME_CLASH(arg,arg2) ::=
+      "combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
+
+  This stuff got really really complicated.  Syntactic predicate names even
+  had to be scoped per grammar so they don't conflict.
+
+* When using subrules like (atom->atom) to set result tree, it was not
+  properly setting result (early enough).  Future code got null for
+  $rule.tree.
+
+December 31, 2007
+
+* Added the start of a semantic predicate computation for LL(1) to
+  solve a problem with slow grammar analysis even with k=1 due to
+  predicates.  Then I realized the problem with that grammar was
+  elsewhere.  Semantic context really shouldn't be used when
+  preventing closure recomputation (May 2008 I discovered I was
+  wrong--you do need it).  The predicates became huge even though the
+  reduced value would be no different.  The analyzer seems faster now
+  that I am not testing predicate values all the time.  Further it may
+  terminate sooner just due to reduced closure recursion.
+
+* Moved FIRST/FOLLOW computations to a separate class LL1Analyzer to
+  tidy up.
+
+* ANTLR lexer allowed octal escapes, but they didn't work. ;)  Rather than
+  fix, I'm removing.  Use '\uxxxx' to get even 8 bit char values: \u00xx.
+
+December 29, 2007
+
+* Fixed ANTLR-206. I wasn't avoiding analyzing decisions in
+  left-recursive rules.
+
+* Had to add hetero arg to all tokenRef*() templates.  Added _last
+  local var to track last child so we can do replaceChildren() during
+  AST rewrite mode for tree grammars.  Should be useful later for .text
+  property.  Ack, hetero arg is on lots of templates. :(  Moved
+  ruleCleanUp() template into ASTTreeParser and ASTParser groups.
+
+* added noRewrite() template (to Java.stg) so we can insert code during
+  rewrite mode to return original tree if no rewrite.  Might be useful
+  for token rewrites later.  For templates too?
+
+* Had to add if !rewriteMode around tree construction in tree parser
+  templates.
+
+* Harald Muller pointed out that we need to use <initValue(attr.type)>
+  in our tests for null token/rule property references. For int types
+  we need 0 not null. (p!=null?p.line:0).  Changed scopeAttributeRef,
+  ruleLabelRef.  Also changed the known typed attributes like 
+  lexerRuleLabelPropertyRef_line to yield 0 upon null rule ref to
+  be consistent with case when we don't know the type.  Fixes ANTLR-195.
+  Added testTypeOfGuardedAttributeRefIsCorrect test and reset expected
+  output for 13 tests that now "fail".
+
+December 28, 2007
+
+* added polydiff example (Java target)
+
+* added "int" property for token and lexer rule refs.  super convenient. E.g.,
+  a : b=INT {int x = $b.int;} ;
+
+December 27, 2007
+
+* Changed -Xnoinlinedfa to -Xmaxinlinedfastates m where m is
+  maximum number of states a DFA can have before ANTLR avoids
+  inlining it.  Instead, you get a table-based DFA.  This
+  affectively avoids some acyclic DFA that still have many states
+  with multiple incident edges.  The combinatorial explosion smacks
+  of infinite loop.  Fixes ANTLR-130.
+
+* [...] are allowed in args now but ] must be escaped as \]. E.g.,
+  a[String[\] ick, int i] : ... ;
+  And calling a rule: foo[x[i\], 34]
+  Fixes ANTLR-140.
+
+* Fixed ANTLR-105.  Target.getTargetStringLiteralFromANTLRStringLiteral()
+  escaped " that were already escaped.
+
+* target's can now specify how to encode int as char escape.  Moved
+  DFA.encodeIntAsCharEscape to Target.
+
+* Bug in runtime.DFA.  If a special state (one with predicate) failed, it
+  tried to continue (causing out of range exception due to state = -1)
+  instead of reporting error.
+
+* If -dfa with combined grammar T.g, builds T.dec-*.dot and TLexer.dec-*.dot
+
+* Fix ANTLR-165.
+  Generate TParser.java and TLexer.java from T.g if combined, else
+  use T.java as output regardless of type.
+  BACKWARD INCOMPATIBILITY since file names change.
+  I changed the examples-v3/java to be consistent.  Required XML.g ->
+  XMLLexer.java and fuzzy/Main.java change.
+
+* Fix ANTLR-169.  Deletes tmp lexer grammar file.
+
+December 25, 2007
+
+* Fixed ANTLR-111.  More unit tests in TestAttributes.
+
+December 25, 2007
+
+* Dangling states ("decision cannot distinguish between alternatives
+  for at least one input sequence") is now an error not a warning.
+
+* Added sample input sequence that leads to dangling DFA state, one
+  that cannot reach an accept state.  ANTLR ran into a case where
+  the same input sequence reaches multiple locations in the NFA
+  (and so not nondeterministic), but analysis ran out of further
+  NFA states to look for more input.  Commonly at EOF target states.
+  Now says:
+
+  error(202): CS.g:248:95: the decision cannot distinguish between alternative(s) 1,2 for input such as "DOT IDENTIFIER EOF"
+
+  Also fixed bug where dangling states did not resolve to stop states.
+
+* Fixed ANTLR-123
+
+December 17-21, 2007
+
+* k=1 doesn't prevent backtracking anymore as in
+  (options {k=1;}:'else' statement)?
+  if backtrack=true for overall grammar.  Set to false in subrule.
+
+* Optimized the analysis engine for LL(1).  Doesn't attempt LL(*) unless
+  LL(1) fails.  If not LL(1) but autobacktracking but no other kind of
+  predicate, it also avoids LL(*).  This is only important for really
+  big 4000 line grammars etc...
+
+* Lots of code clean up
+
+December 16, 2007
+
+* Yet more Kay pair programming.  Saved yet more RAM; 15% by
+  wacking NFA configurations etc in each DFA state after DFA construction.
+
+* Overall we drop from 2m49s to 1m11s for a huge 4000 line TSQL grammar
+  with k=*.  Only needs -Xconversiontimeout 2000 now not
+  -Xconversiontimeout 5000 too.  With k=1, it's 1m30s down to 40s.
+
+December 15, 2007
+
+* Working with Kay Roepke, we got about 15% speed improvement in
+  overall ANTLR exec time.  Memory footprint seems to be about 50%
+  smaller.
+
+December 13-14, 2007
+
+* I abort entire DFA construction now when I see recursion in > 1 alt.
+  Decision is non-LL(*) even if some pieces are LL(*).  Safer to bail
+  out and try with fixed k.  If user set fixed k then it continues because
+  analysis will eventually terminate for sure.  If a pred is encountered
+  and k=* and it's non-LL(*), it aborts and retries at k=1 but does NOT
+  emit an error.
+
+* Decided that recursion overflow while computing a lookahead DFA is
+  serious enough that I should bail out of entire DFA computation.
+  Previously analysis tried to keep going and made the rules about
+  how analysis worked more complicated.  Better to simply abort when
+  decision can't be computed with current max stack (-Xm option).
+  User can adjust or add predicate etc...  This is now an error
+  not a warning.
+
+* Recursion overflow and unreachable alt is now a fatal error; no code gen.
+  The decision will literally not work.
+
+* Cleaned up how DFA construction/analysis aborts due to non-LL(*) and
+  overflow etc...  Throws exceptions now, which cleans up a bunch of IF
+  checks etc...  Very nice now. Exceptions:
+	analysis/AnalysisRecursionOverflowException.java
+	analysis/AnalysisTimeoutException.java
+	analysis/NonLLStarDecisionException.java
+
+* ErrorManager.grammarWarning() counted them as errors not warnings.
+
+* Unreachable alt warnings are now errors.
+
+* The upshot of these changes is that I fixed ANTLR-178 and did
+  lots of refactoring of code handling analysis failure.
+
+December 11, 2007
+
+* Could not deal with spaces, oddly enough in arg lists:
+	grammar Bad;
+	a : A b["foo", $A.text] ;
+	b[String x, String y] : C ;
+
+October 28, 2007
+
+* Made ANTLR emit a better error message when it cannot write the
+  implicit lexer file from a combined grammar. Used to say "cannot open
+  file", now says "cannot write file" and gives backtrace.
+
+September 15, 2007
+
+add getCharStream to Lexer.
+
+September 10, 2007
+
+* Added {{...}} forced action executed even during backtracking.
+
+September 9, 2007
+
+* r='string' in lexer got a duplicate label definition.
+
+August 21, 2007
+
+* $scope::variable refs now check for empty stack so that expr == null if
+  $scope has an empty stack. Works for $scope[...]::variable too.  Nice!
+
+August 20, 2007
+
+* Added reset() to CommonTreeNodeStream, token stream too
+
+* Made refs to rule/token properties use ?: to avoid null ptr exception.
+  $label.st now is label!=null?label.st:null.  Updated TestAttributes.
+  This is useful not only for optional rule/token refs, but also during
+  error recovery.  If ID is not matched, $ID.text won't cause a null ptr.
+
+August 20, 2007
+*	Fixed ANTLR-177: hashCode/equals not consistent for label
+	Fixed bug where Rule was compared to string; introduced from dev branch
+
+August 15, 2007 -- Got rough draft of the grammar import working.
+                   Submit to dev and then integrate into mainline.
+
+	All file changes/additions:
+	
+	README.txt	# edit
+	CHANGES.txt	# add
+	  Factored out the changes from the readme.
+
+	runtime/Java/src/org/antlr/runtime/BaseRecognizer.java	# edit
+	runtime/Java/src/org/antlr/runtime/DFA.java	# edit
+	runtime/Java/src/org/antlr/runtime/Lexer.java	# edit
+	runtime/Java/src/org/antlr/runtime/Parser.java	# edit
+	runtime/Java/src/org/antlr/runtime/debug/DebugParser.java	# edit
+	runtime/Java/src/org/antlr/runtime/tree/TreeParser.java	# edit
+	  Factored state fields into RecognizerSharedState
+	  object. You will see a lot of things like
+            state.errorRecovery = false;
+	runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java	# add
+          Shares all recognizer state variables including lexer even though
+	  these are superfluous to parsers and tree parsers.  There
+	  was a casting issue that I could not resolve.
+
+	src/org/antlr/Tool.java	# edit
+	  Broke a part Grammar.setGrammarContent() into
+	  parseAndBuildAST() and analyzeGrammar() to make the grammar
+	  import work. I needed to be able to look at the trees for
+	  imported grammars before analyzing them and building DFA. Added
+	  use of the CompositeGrammar object and handling of multiple
+	  delegate grammars. Changed decision DFA DOT file names to
+	  include the grammar name.
+
+	src/org/antlr/analysis/DFA.java	# edit
+	  Just tweaked to use generics, updated a comment.
+
+	src/org/antlr/analysis/DecisionProbe.java	# edit
+	  Just tweaked to use generics.
+
+	src/org/antlr/analysis/NFA.java	# edit
+	  NFA now span multiple grammars and so I moved the NFAs state
+	  tracking to the composite grammar object.
+
+	src/org/antlr/analysis/NFAState.java	# edit
+	  Added some null checking and made a field public.
+
+	src/org/antlr/analysis/NFAToDFAConverter.java	# edit
+	  Changed a method call to directly access a field.
+
+	src/org/antlr/analysis/RuleClosureTransition.java	# edit
+	  Instead of using a rule index, which does not span multiple
+	  grammars, the transition object now attracts a pointer to
+	  the actual Rule definition object.
+
+	src/org/antlr/analysis/SemanticContext.java	# edit
+	  Tweaked to use a field instead of a method
+
+	src/org/antlr/codegen/ActionTranslator.g	# edit
+	src/org/antlr/codegen/ActionTranslatorLexer.java	# edit
+	  Tweaked to use new runtime and they changed method name.
+
+	src/org/antlr/codegen/CodeGenerator.java	# edit
+	  Tweaked comments.
+
+	src/org/antlr/codegen/codegen.g	# edit
+	  Added import grammar syntax and altered rule atom to pass a
+	  scope around so that grammar.rule works.  Caution this
+	  feature is used internally by ANTLR and is not meant to be
+	  used by users at this point.
+
+	src/org/antlr/codegen/templates/ANTLRCore.sti	# edit
+	  Added scope to all ruleref template interfaces.
+
+	src/org/antlr/codegen/templates/Java/Java.stg	# edit
+	  Grammars can now import other grammars, which I implemented
+	  using a delegation pointer to the other grammar(s). So if
+	  grammar A imports grammars B and C, then the generated
+	  recognizer for A must have delegation pointers to BParser
+	  and CParser objects. These are now fields:
+
+	    // delegates
+	    <grammar.delegates:
+	     {g|public <g.name>Lexer <g:delegateName()>;}; separator="\n">
+
+          Also, B and C must have back pointers to the delegator so
+          that they can refer to rules that have been overridden.
+          This is a mechanism akin to static inheritance:
+
+	    // delegators
+	    <grammar.delegators:
+	     {g|public <g.name>Lexer <g:delegateName()>;}; separator="\n">
+
+	  This file also has a lot of changes so that state variables
+	  now are state.backtracking instead of the implied
+	  this.backtracking.
+
+	  The file also refers to grammar.delegatedRules attribute
+	  which is the list of Rule objects for which you must
+	  generate manual delegation.  This amounts to a stub whereby
+	  rule foo's method foo() simply calls X.foo() if foo is not
+	  defined inside the delegator.
+
+	  You will notice that the ruleref templates now take a scope
+	  so that I can have implicit rule Tokens referred to
+	  delegate.Tokens rule in a delegate grammar.  This is the way
+	  I do lexer grammar imports.
+
+	  I added a template called delegateName which uses the
+	  grammar name to compute a delegate name if the user does not
+	  specify a label in the import statement such as:
+
+	  import x=X;
+
+	  Oh, note that rule reference templates all receive a Rule
+	  object now instead of the simple rule name as the 'rule'
+	  attribute.  You will see me doing <rule.name> instead of
+	  <name> now.
+
+	src/org/antlr/codegen/templates/Java/Dbg.stg	# edit
+	  Changes mirroring the constructor and field stuff from
+	  Java.stg. Part of this is a cut and paste because of a bug
+	  in ST.
+
+	src/org/antlr/codegen/templates/Java/AST.stg	# edit
+	src/org/antlr/codegen/templates/Java/ASTParser.stg	# edit
+	src/org/antlr/codegen/templates/Java/ASTTreeParser.stg	# edit
+	  Just added the scope attribute.
+
+	src/org/antlr/test/BaseTest.java	# edit
+	  Added functionality to support testing composite grammars.
+	    execLexer()
+
+	src/org/antlr/test/TestAttributes.java	# edit
+	  Tweak to deal with shared recognizer state.
+
+	src/org/antlr/test/TestCompositeGrammars.java	# add
+	  Start of my unit tests.
+
+	src/org/antlr/tool/CompositeGrammar.java	# add
+	src/org/antlr/tool/CompositeGrammarTree.java	# add
+	  Tracks main grammar and all delegate grammars. Tracks unique
+	  NFA state numbers and unique token types. This keeps a tree
+	  of grammars computed from the import/delegation chain. When
+	  you want to look up a rule, it starts at the root of the
+	  tree and does a pre-order search to find the rule.
+
+	src/org/antlr/tool/ActionAnalysis.g	# edit
+	src/org/antlr/tool/ActionAnalysisLexer.java	# edit
+
+	src/org/antlr/tool/AttributeScope.java	# edit
+	  Updated to use generics in one place.
+
+	src/org/antlr/tool/DOTGenerator.java	# edit
+	  Updated to indicate when nonlocal rules are referenced.
+
+	src/org/antlr/tool/ErrorManager.java	# edit
+	  Added some error messages for import grammars; I need more.
+
+	src/org/antlr/tool/FASerializer.java	# edit
+	  Tweaked to use a field not method.
+
+	src/org/antlr/tool/Grammar.java	# edit
+	  This is where most of the meat is for the grammar import
+	  stuff as you can imagine.  I factored out the token type
+	  tracking into the CompositeGrammar object. I added code to
+	  the addArtificialMatchTokensRule method so that it includes
+	  references to all delegate lexer Tokens rules. Altered the
+	  rule lookup stuff so that it knows about delegate grammars.
+
+	src/org/antlr/tool/GrammarAST.java	# edit
+	src/org/antlr/tool/GrammarAnalysisAbortedMessage.java	# edit
+	src/org/antlr/tool/GrammarReport.java	# edit
+	src/org/antlr/tool/NonRegularDecisionMessage.java	# edit
+	  Made enclosing rule visible as field.
+
+	src/org/antlr/tool/GrammarSanity.java	# edit
+	  General cleanup and addition of generics.
+
+	src/org/antlr/tool/Interpreter.java	# edit
+	  Reference fields instead of methods.
+
+	src/org/antlr/tool/NFAFactory.java	# edit
+	  General cleanup and use of Rule object instead of rule
+	  index.
+
+	src/org/antlr/tool/NameSpaceChecker.java	# edit
+	  A little bit of cleanup and changes to use either the local
+	  or globally visible rule. Added code to check that scopes
+	  are valid on scoped rule references. again this is an
+	  internal feature, not to be used by users.
+
+	src/org/antlr/tool/RandomPhrase.java	# edit
+	  Tweaked.
+
+	src/org/antlr/tool/Rule.java	# edit
+	  Added field imported. Removed some unused methods by
+	  commenting them out. Made toString() more expressive.
+
+	src/org/antlr/tool/antlr.g	# edit
+	src/org/antlr/tool/antlr.print.g	# edit
+	src/org/antlr/tool/assign.types.g	# edit
+	src/org/antlr/tool/buildnfa.g	# edit
+	src/org/antlr/tool/define.g	# edit
+	  Added syntax for import statement.  assign.types.g is the
+	  grammar that invokes Grammar.importGrammar().
+
+	src/org/antlr/tool/templates/messages/languages/en.stg	# edit
+	  Added error messages.
+
+	Added
+
+	CHANGES.txt
+	runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java
+	src/org/antlr/test/TestCompositeGrammars.java
+	src/org/antlr/tool/CompositeGrammar.java
+	src/org/antlr/tool/CompositeGrammarTree.java
 
 3.0.1 - August 13, 2007
 
@@ -142,11 +1153,197 @@ July 22, 2007
 * fixed dynamic scope implementation in lexers. They were not creating new scope
   entries on the stack.  Unsupported feature!
 
+July 30, 2007
+
+* float return values were initalized to 0.0 not 0.0f in java.
+
+July 28, 2007
+
+* Sam Ellis points out an init var bug in ANTLRReaderStream.
+
+July 27, 2007 (done in dev branch)
+
+* Moved token type index stuff from CommonTreeNodeStream to TreeWizard
+
+* Added getChildren to BaseTree.
+
+* Added heterogeneous tree functionality; rewrite for parser/tree parser
+  and auto AST constr. for parser.
+
+	org/antlr/runtime/tree/RewriteRuleElementStream.java
+	org/antlr/runtime/tree/RewriteRuleNodeStream.java
+	org/antlr/runtime/tree/RewriteRuleTokenStream.java
+		Renamed method next() and re-factor things to have more
+		specific methods: nextToken, nextNode, nextTree.
+
+	codegen/codegen.g
+		Updated to include new <NodeType> AST structure for
+		token references.  Pushed hereto attribute into
+		all tokenRef* templates.
+	codegen/templates/Java/AST.stg
+		Factored out a few templates:
+			createImaginaryNode(tokenType,hetero,args)
+			createRewriteNodeFromElement(token,hetero,args)
+		Converted a lot of stream next() calls to more specific
+			nextToken, nextNode, nextTree per above.
+	codegen/templates/Java/ASTParser.stg
+		Added createNodeFromToken template and re-factored creation
+		sites to use that template.  Added hetero attribute.
+	codegen/templates/Java/ASTTreeParser.stg
+		Added createRewriteNodeFromElement template and re-factored.
+
+	test/TestHeteroAST.java
+		New file. Unit tests to test new hetero tree construction.
+	test/TestRewriteAST.java
+		Fixed test.  Nil single-node trees no longer return nil;
+		They return null.
+
+	tool/ErrorManager.java
+	tool/templates/messages/languages/en.stg
+		Added error message:
+		HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
+		  "alts with rewrites can't use heterogeneous types left of ->"
+
+	tool/antlr.g
+	tool/antlr.print.g
+	tool/assign.types.g
+	tool/buildnfa.g
+	tool/define.g
+		Added syntax for <NodeType> to token references.
+		Altered AST structure rippled through different phases.
+
+July 24, 2007
+
+* Deleted DoubleLinkTree.java; CommonTree does that now.
+
+July 23, 2007
+
+* template group outputFile; changed rewrite arg to rewriteMode.
+
+* added rewrite mode for tree parser build AST.
+
+July 22, 2007
+
+* Kay fixed dynamic scope implementation in lexers. They were not
+  creating new scope entries on the stack.  This is an UNSUPPORTED feature.
+
+* added getParent and getChildIndex to TreeAdaptor.  Added
+  implementation to CommonTree.  It's just too useful having those
+  parent and child indexes available for rewriting etc...  I tried 2x
+  to make an implementation of tree rewriting w/o this and the
+  constraints just made it too expensive and complicated.  Have to
+  update adaptors to set parent, child index values.  Updated Tree
+  interface and BaseTree also.  Should only affect target developers
+  not users.  Well, unless they impl Tree.
+
+* dupNode (via ctor) of CommonTree didn't copy start/stop token indexes.
+
+TARGET DEVELOPERS WARNING -- AST.stg split with some functionality
+                             going into ASTParser.stg then I added
+                             ASTTreeParser.stg.  CodeGenerator
+                             assumes new subgroups exist.
+
+July 20, 2007
+
+* Added AST construction for tree parsers including -> rewrite rules.
+  Rewrite mode (rewrite=true) alters the tree in place rather than
+  constructing a whole new tree.  Implementation notes:
+
+  org/antlr/runtime/tree/Tree.java
+	Add methods for parent and child index functionality.
+	Also added freshenParentAndChildIndexes() which you can use
+	to ensure that all double linking is set up right after you
+	manipulate the tree manually.  The setChild preteens etc. do
+	the proper thing so you shouldn't need this.
+	Added replaceChildren() to support tree rewrite mode in tree parsers
+  org/antlr/runtime/tree/BaseTree.java
+	Updated to set parent and child index stuff.  Added replaceChildren
+	method etc...  It still only has a list of children as sole field
+     	but calls methods that subclasses can choose to implement such as
+	CommonTree.
+  org/antlr/runtime/tree/CommonTree.java
+	Added parent and childIndex fields to doubly link.
+  org/antlr/runtime/tree/TreeAdaptor.java
+	Added methods for new parent and child index functionality.
+	Also added method for rewrite mode in tree parsers:
+	replaceChildren(Object parent, int startChildIndex,
+                        int stopChildIndex, Object t);
+	Added setChild and deleteChild methods
+  org/antlr/runtime/tree/BaseTreeAdaptor.java
+	Moved dupTree here from BaseTree.
+	Updated rulePostProcessing to deal with parent and child index.
+	Added setChild and deleteChild implementations
+  org/antlr/runtime/tree/CommonTreeAdaptor.java
+	Added methods to deal with the parent and child index for a node.
+
+  org/antlr/runtime/tree/CommonTreeNodeStream.java
+	Removed token type index and method fillReverseIndex etc...
+	Probably will move into the tree wizard in the future.
+	Changed call/seek stack implementation to use IntArray
+	Added replaceChildren interface.
+  org/antlr/runtime/tree/TreeNodeStream.java
+	Added replaceChildren.
+  org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
+	Added replaceChildren method but no implementation
+
+  codegen/templates/ANTLRCore.sti
+	Changed rewrite to a better name: rewriteMode
+	Added tree level argument to alt, tree so that auto AST
+        construction can occur while recognizing in tree parsers.
+
+  codegen/templates/Java/AST.stg
+	Split template group: added two subclasses to handle different
+	functionality for normal parsing and tree parsing + AST
+	construction.  Tree parsers default behavior is to dup tree
+	not construct another.  Added ASTParser.stg and
+	ASTTreeParser.stg to handle auto AST construction during
+	recognition for the two different parser types.  I just copied
+	the token, rule, set, wildcard templates to the subclasses.
+	The rewrite templates are still in AST.stg. I factored out the
+	node creation so that the same rewrite templates can be used
+	for both parsing and tree parsing.
+
+  codegen/templates/Java/ASTParser.stg
+	The templates needed to build trees with auto construction
+	during parsing.
+  codegen/templates/Java/ASTTreeParser.stg
+	The templates needed to build trees with auto construction
+	during tree parsing.
+  codegen/templates/Java/Java.stg
+	genericParser now has rewriteElementType (Note or Token) so
+	that the rewrite streams know what kind of elements are inside
+	during rewrite rule tree construction.
+  codegen/templates/Java/ST.stg
+	rewrite attribute name change to rewriteMode
+
+  org/antlr/runtime/debug/DebugTreeAdaptor.java
+  org/antlr/runtime/debug/DebugTreeNodeStream.java
+	Updated to handle new interfaces
+
+  test/BaseTest.java
+	Added test rig update to handle AST construction by tree parsers.
+	All tree construction runs automatically test sanity of parent
+	and child indexes.
+  test/TestTreeGrammarRewriteAST.java
+  test/TestTreeNodeStream.java
+  test/TestTrees.java
+	new file; tests the new parent and child index stuff in trees.
+
 July 19, 2007
 
 * implemented new unique ID; GC was causing non unique hash codes.  Debugging
   tree grammars was messing up.
 
+* got tree rewrites working in tree grammars.  It builds a completely new
+  tree from old tree; i.e., you get two trees in memory.  W/o a rewrite
+  rule, the input for that rule is duplicated and returned. -> w/o elements
+  to the right means don't return anything; i.e., delete.  Ooops...way
+  harder than I thought.  Real implementation notes above.
+
+INCOMPATIBILITY WARNING -- templates have changed; must regen output from
+                           grammars.  Runtime libraries have also changed.
+                           Debug event listener interface has changed also.
+
 July 17, 2007
 
 * Added line/charposition to node socket events and event dump so
@@ -296,18 +1493,18 @@ April 22, 2007
 April 21, 2007
 
 * Pushing a huge update that fixes:
-	http://www.antlr.org:8888/browse/ANTLR-112
-	http://www.antlr.org:8888/browse/ANTLR-110
-	http://www.antlr.org:8888/browse/ANTLR-109
-	http://www.antlr.org:8888/browse/ANTLR-103
-	http://www.antlr.org:8888/browse/ANTLR-97
-	http://www.antlr.org:8888/browse/ANTLR-113
-	http://www.antlr.org:8888/browse/ANTLR-66
-	http://www.antlr.org:8888/browse/ANTLR-98
-	http://www.antlr.org:8888/browse/ANTLR-24
-	http://www.antlr.org:8888/browse/ANTLR-114
-	http://www.antlr.org:8888/browse/ANTLR-5
-	http://www.antlr.org:8888/browse/ANTLR-6
+	http://www.antlr.org/browse/ANTLR-112
+	http://www.antlr.org/browse/ANTLR-110
+	http://www.antlr.org/browse/ANTLR-109
+	http://www.antlr.org/browse/ANTLR-103
+	http://www.antlr.org/browse/ANTLR-97
+	http://www.antlr.org/browse/ANTLR-113
+	http://www.antlr.org/browse/ANTLR-66
+	http://www.antlr.org/browse/ANTLR-98
+	http://www.antlr.org/browse/ANTLR-24
+	http://www.antlr.org/browse/ANTLR-114
+	http://www.antlr.org/browse/ANTLR-5
+	http://www.antlr.org/browse/ANTLR-6
 
   Basically, I gutted the way AST rewrites work.  MUCH better.
 
@@ -424,10 +1621,10 @@ March 14, 2007
 
 * Added -Xnoinlinedfa make all DFA with tables; no inline prediction with IFs
 
-* Fixed http://www.antlr.org:8888/browse/ANTLR-80.
+* Fixed http://www.antlr.org/browse/ANTLR-80.
   Sem pred states didn't define lookahead vars.
 
-* Fixed http://www.antlr.org:8888/browse/ANTLR-91.  
+* Fixed http://www.antlr.org/browse/ANTLR-91.
   When forcing some acyclic DFA to be state tables, they broke.
   Forcing all DFA to be state tables should give same results.
 
@@ -595,7 +1792,7 @@ December 27, 2006
   $slist.text will get only first stat.  I need to add a warning about
   this...
 
-* Fixed http://www.antlr.org:8888/browse/ANTLR-76 for Java.
+* Fixed http://www.antlr.org/browse/ANTLR-76 for Java.
   Enhanced TokenRewriteStream so it accepts any object; converts
   to string at last second.  Allows you to rewrite with StringTemplate
   templates now :)
diff --git a/LICENSE.txt b/tool/LICENSE.txt
similarity index 97%
rename from LICENSE.txt
rename to tool/LICENSE.txt
index 1d1d5d6..67e047c 100644
--- a/LICENSE.txt
+++ b/tool/LICENSE.txt
@@ -1,5 +1,5 @@
 [The "BSD licence"]
-Copyright (c) 2003-2006 Terence Parr
+Copyright (c) 2003-2008 Terence Parr
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
diff --git a/tool/README.txt b/tool/README.txt
new file mode 100644
index 0000000..f11633a
--- /dev/null
+++ b/tool/README.txt
@@ -0,0 +1,123 @@
+ANTLR v3.1.3
+March 17, 2009
+
+Terence Parr, parrt at cs usfca edu
+ANTLR project lead and supreme dictator for life
+University of San Francisco
+
+INTRODUCTION 
+
+Welcome to ANTLR v3!  ANTLR (ANother Tool for Language Recognition) is
+a language tool that provides a framework for constructing
+recognizers, interpreters, compilers, and translators from grammatical
+descriptions containing actions in a variety of target
+languages. ANTLR provides excellent support for tree construction,
+tree walking, translation, error recovery, and error reporting. I've
+been working on parser generators for 20 years and on this particular
+version of ANTLR for 5 years.
+
+You should use v3 in conjunction with ANTLRWorks:
+
+    http://www.antlr.org/works/index.html 
+
+and gUnit (grammar unit testing tool included in distribution):
+
+    http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing
+
+The book will also help you a great deal (printed May 15, 2007); you
+can also buy the PDF:
+
+    http://www.pragmaticprogrammer.com/titles/tpantlr/index.html
+
+(New book coming out in beta Summer 2009: "Language Design Patterns")
+
+See the getting started document:
+
+    http://www.antlr.org/wiki/display/ANTLR3/FAQ+-+Getting+Started
+
+You also have the examples plus the source to guide you.
+
+See the wiki FAQ:
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+
+and general doc root:
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+3+Wiki+Home
+
+Please help add/update FAQ entries.
+
+If all else fails, you can buy support or ask the antlr-interest list:
+
+    http://www.antlr.org/support.html
+
+Per the license in LICENSE.txt, this software is not guaranteed to
+work and might even destroy all life on this planet:
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+EXAMPLES
+
+ANTLR v3 sample grammars:
+
+    http://www.antlr.org/download/examples-v3.tar.gz
+
+Also check out Mantra Programming Language for a prototype (work in
+progress) using v3:
+
+    http://www.linguamantra.org/
+
+----------------------------------------------------------------------
+
+What is ANTLR?
+
+ANTLR stands for (AN)other (T)ool for (L)anguage (R)ecognition and was
+originally known as PCCTS.  ANTLR is a language tool that provides a
+framework for constructing recognizers, compilers, and translators
+from grammatical descriptions containing actions.  Target language list:
+
+http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
+
+----------------------------------------------------------------------
+
+How is ANTLR v3 different than ANTLR v2?
+
+See "What is the difference between ANTLR v2 and v3?"
+
+    http://www.antlr.org/wiki/pages/viewpage.action?pageId=719
+
+See migration guide:
+
+    http://www.antlr.org/wiki/display/ANTLR3/Migrating+from+ANTLR+2+to+ANTLR+3
+
+----------------------------------------------------------------------
+
+How do I install this damn thing?
+
+Just untar and you'll get:
+
+antlr-3.1.3/README.txt (this file)
+antlr-3.1.3/LICENSE.txt
+antlr-3.1.3/src/main/java/org/antlr/...
+antlr-3.1.3/lib/stringtemplate-3.2.jar
+antlr-3.1.3/lib/antlr-2.7.7.jar (ANTLR v3 currently written in v2)
+antlr-3.1.3/lib/antlr-3.1.3.jar (all jars combined, runtime and tools)
+antlr-3.1.3/lib/antlr-runtime-3.1.3.jar (only what is needed to use ANTLR parsers)
+
+Then you need to add all the jars in lib to your CLASSPATH.
+
+Please see the FAQ
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
diff --git a/tool/antlr.config b/tool/antlr.config
new file mode 100644
index 0000000..e69de29
diff --git a/tool/pom.xml b/tool/pom.xml
new file mode 100644
index 0000000..fc3124b
--- /dev/null
+++ b/tool/pom.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr</artifactId>
+    <packaging>jar</packaging>
+    <name>ANTLR Grammar Tool</name>
+    <url>http://antlr.org</url>
+
+
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.2</version>
+    </parent>
+    <dependencies>
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr-runtime</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.5</version>
+            <scope>test</scope>
+        </dependency>
+
+    </dependencies>
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build, run and test the ANTLR Tool. The ANTLR Tool uses earlier versions
+    of ANTLR at runtime (for the moment), uses the current
+    released version of ANTLR String template, but obviously is
+    reliant on the latest snapshot of the runtime, which will either be
+    taken from the antlr-snapshot repository, or your local .m2
+    repository if you built and installed that locally.
+
+    -->
+
+    
+    <build>
+      
+        <defaultGoal>install</defaultGoal>
+         
+        <plugins>
+            
+            <plugin>
+
+                <groupId>org.antlr</groupId>
+                <artifactId>antlr3-maven-plugin</artifactId>
+                <version>3.1.3-1</version>
+                <configuration>
+                    <libDirectory>target/generated-sources/antlr/org/antlr/grammar/v3</libDirectory>
+                </configuration>
+                <executions>
+                    <execution>
+                        
+                        <goals>
+                            <goal>antlr</goal>
+                        </goals>
+                    </execution>
+                </executions>
+                
+            </plugin>
+            
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>antlr-maven-plugin</artifactId>
+                <configuration>
+                    <sourceDirectory>src/main/antlr2/org/antlr/grammar/v2</sourceDirectory>
+                    <grammars>antlr.g codegen.g, antlr.print.g, assign.types.g, buildnfa.g, define.g</grammars>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>generate</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <configuration>
+                    <source>1.5</source>
+                    <target>1.5</target>
+                    <sourceDirectory>src</sourceDirectory>
+                </configuration>
+            </plugin>
+
+        </plugins>
+
+
+    </build>
+</project>
diff --git a/src/org/antlr/tool/antlr.g b/tool/src/main/antlr2/org/antlr/grammar/v2/antlr.g
similarity index 79%
rename from src/org/antlr/tool/antlr.g
rename to tool/src/main/antlr2/org/antlr/grammar/v2/antlr.g
index 7180759..eade736 100644
--- a/src/org/antlr/tool/antlr.g
+++ b/tool/src/main/antlr2/org/antlr/grammar/v2/antlr.g
@@ -1,7 +1,7 @@
 header {
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -26,12 +26,26 @@ header {
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.tool;
+package org.antlr.grammar.v2;
 import java.util.*;
 import java.io.*;
 import org.antlr.analysis.*;
 import org.antlr.misc.*;
-import antlr.*;
+import org.antlr.tool.*;
+
+import antlr.TokenBuffer;
+import antlr.TokenStreamException;
+import antlr.Token;
+import antlr.TokenStream;
+import antlr.RecognitionException;
+import antlr.NoViableAltException;
+import antlr.ParserSharedInputState;
+import antlr.collections.impl.BitSet;
+import antlr.collections.AST;
+import antlr.ASTFactory;
+import antlr.ASTPair;
+import antlr.TokenWithIndex;
+import antlr.collections.impl.ASTArray;
 }
 
 /** Read in an ANTLR grammar and build an AST.  Try not to do
@@ -55,7 +69,7 @@ options {
     buildAST = true;
 	exportVocab=ANTLR;
     ASTLabelType="GrammarAST";
-	k=2;
+	k=3;
 }
 
 tokens {
@@ -86,37 +100,41 @@ tokens {
     TREE_GRAMMAR;
     COMBINED_GRAMMAR;
     INITACTION;
+    FORCED_ACTION; // {{...}} always exec even during syn preds
     LABEL; // $x used in rewrite rules
     TEMPLATE;
     SCOPE="scope";
+    IMPORT="import";
     GATED_SEMPRED; // {p}? =>
     SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
     BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
     FRAGMENT="fragment";
+    DOT;
 }
 
 {
-	Grammar grammar = null;
+	protected Grammar grammar = null;
 	protected int gtype = 0;
-	protected String currentRuleName = null;
-	protected GrammarAST currentBlockAST = null;
 
-	/* this next stuff supports construction of the Tokens artificial rule.
-	   I hate having some partial functionality here, I like doing everything
-	   in future tree passes, but the Tokens rule is sensitive to filter mode.
-	   And if it adds syn preds, future tree passes will need to process the
-	   fragments defined in Tokens; a cyclic dependency.
-	   As of 1-17-06 then, Tokens is created for lexer grammars in the
-	   antlr grammar parser itself.
+    public Grammar getGrammar() {
+        return grammar;
+    }
 
-	   This grammar is also sensitive to the backtrack grammar option that
-	   tells ANTLR to automatically backtrack when it can't compute a DFA.
+    public void setGrammar(Grammar grammar) {
+        this.grammar = grammar;
+    }
+
+    public int getGtype() {
+        return gtype;
+    }
 
-	   7-2-06 I moved all option processing to antlr.g from define.g as I
-	   need backtrack option etc... for blocks.  Got messy.
-	*/
-	protected List lexerRuleNames = new ArrayList();
-	public List getLexerRuleNames() { return lexerRuleNames; }
+    public void setGtype(int gtype) {
+        this.gtype = gtype;
+        }
+	
+    protected String currentRuleName = null;
+	protected GrammarAST currentBlockAST = null;
+	protected boolean atTreeRoot; // are we matching a tree root in tree grammar?
 
 	protected GrammarAST setToBlockWithSet(GrammarAST b) {
 		GrammarAST alt = #(#[ALT,"ALT"],#b,#[EOA,"<end-of-alt>"]);
@@ -131,7 +149,6 @@ tokens {
 	 *  labels, tree operators, rewrites are removed.
 	 */
 	protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {
-		//GrammarAST nalt = (GrammarAST)astFactory.dupTree(alt);
 		GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);
 		GrammarAST blk = #(#[BLOCK,"BLOCK"],
 						   nalt,
@@ -146,7 +163,7 @@ tokens {
 	 */
 	protected void prefixWithSynPred(GrammarAST alt) {
 		// if they want backtracking and it's not a lexer rule in combined grammar
-		String autoBacktrack = (String)currentBlockAST.getOption("backtrack");
+		String autoBacktrack = (String)grammar.getBlockOption(currentBlockAST, "backtrack");
 		if ( autoBacktrack==null ) {
 			autoBacktrack = (String)grammar.getOption("backtrack");
 		}
@@ -179,7 +196,6 @@ tokens {
 		// during code gen we convert to function call with templates
 		String synpredinvoke = predName;
 		GrammarAST p = #[synpredTokenType,synpredinvoke];
-		p.setEnclosingRule(currentRuleName);
 		// track how many decisions have synpreds
 		grammar.blocksWithSynPreds.add(currentBlockAST);
 		return p;
@@ -228,7 +244,8 @@ tokens {
 			GrammarAST tokensRuleAST =
 			    grammar.addArtificialMatchTokensRule(
 			    	root,
-			    	lexerRuleNames,
+			    	grammar.lexerRuleNamesInCombined,
+                    grammar.getDelegateNames(),
 			    	filter!=null&&filter.equals("true"));
 		}
     }
@@ -240,31 +257,49 @@ grammar![Grammar g]
 	GrammarAST opt=null;
 	Token optionsStartToken = null;
 	Map opts;
+	// set to factory that sets enclosing rule
+	astFactory = new ASTFactory() {
+		{
+			setASTNodeClass(GrammarAST.class);
+			setASTNodeClass("org.antlr.tool.GrammarAST");
+		}
+		public AST create(Token token) {
+			AST t = super.create(token);
+			((GrammarAST)t).enclosingRuleName = currentRuleName;
+			return t;
+		}
+		public AST create(int i) {
+			AST t = super.create(i);
+			((GrammarAST)t).enclosingRuleName = currentRuleName;
+			return t;
+		}
+	};
 }
    :    //hdr:headerSpec
         ( ACTION )?
 	    ( cmt:DOC_COMMENT  )?
-        gr:grammarType gid:id SEMI
+        gr:grammarType gid:id {grammar.setName(#gid.getText());} SEMI
 			( {optionsStartToken=LT(1);}
 			  opts=optionsSpec {grammar.setOptions(opts, optionsStartToken);}
 			  {opt=(GrammarAST)returnAST;}
 			)?
+            (ig:delegateGrammars)?
 		    (ts:tokensSpec!)?
         	scopes:attrScopes
 		    (a:actions)?
 	        r:rules
         EOF
         {
-        #grammar = #(null, #(#gr, #gid, #cmt, opt, #ts, #scopes, #a, #r));
+        #grammar = #(null, #(#gr, #gid, #cmt, opt, #ig, #ts, #scopes, #a, #r));
         cleanup(#grammar);
         }
 	;
 
 grammarType
-    :   (	"lexer"!  {gtype=LEXER_GRAMMAR;}    // pure lexer
-    	|   "parser"! {gtype=PARSER_GRAMMAR;}   // pure parser
-    	|   "tree"!   {gtype=TREE_GRAMMAR;}     // a tree parser
-    	|			  {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
+    :   (	"lexer"!  {gtype=LEXER_GRAMMAR; grammar.type = Grammar.LEXER;}       // pure lexer
+    	|   "parser"! {gtype=PARSER_GRAMMAR; grammar.type = Grammar.PARSER;}     // pure parser
+    	|   "tree"!   {gtype=TREE_GRAMMAR; grammar.type = Grammar.TREE_PARSER;}  // a tree parser
+    	|			  {gtype=COMBINED_GRAMMAR; grammar.type = Grammar.COMBINED;} // merged parser/lexer
     	)
     	gr:"grammar" {#gr.setType(gtype);}
     ;
@@ -287,21 +322,6 @@ actionScopeName
     |   p:"parser"	{#p.setType(ID);}
 	;
 
-/*
-optionsSpec returns [Map opts=new HashMap()]
-    :   #( OPTIONS (option[opts])+ )
-    ;
-
-option[Map opts]
-{
-    String key=null;
-    Object value=null;
-}
-    :   #( ASSIGN id:ID {key=#id.getText();} value=optionValue )
-        {opts.put(key,value);}
-    ;
-*/
-
 optionsSpec returns [Map opts=new HashMap()]
 	:	OPTIONS^ (option[opts] SEMI!)+ RCURLY!
 	;
@@ -314,21 +334,6 @@ option[Map opts]
     	{
     	opts.put(#o.getText(), value);
     	}
-    	/*
-    	{
-    	if ( #o.getText().equals("filter") && #v.getText().equals("true") ) {
-    		isFilterMode = true;
-    	}
-    	else if ( #o.getText().equals("backtrack") && #v.getText().equals("true") ) {
-    		if ( currentRuleName==null ) { // must grammar level
-    			isAutoBacktrackMode = true;
-    		}
-    		else {
-    			blockAutoBacktrackMode = true;
-    		}
-    	}
-    	}
-    	*/
     ;
 
 optionValue returns [Object value=null]
@@ -342,15 +347,14 @@ optionValue returns [Object value=null]
 //  |   cs:charSet       {value = #cs;} // return set AST in this case
     ;
 
-/*
-optionValue
-	:	id
-	|   STRING_LITERAL
-	|	CHAR_LITERAL
-	|	INT
-//	|   cs:charSet       {value = #cs;} // return set AST in this case
-	;
-*/
+delegateGrammars
+    :   "import"^ delegateGrammar (COMMA! delegateGrammar)* SEMI!
+    ;
+
+delegateGrammar
+    :   lab:id ASSIGN^ g:id {grammar.importGrammar(#g, #lab.getText());}
+    |   g2:id               {grammar.importGrammar(#g2,null);}
+    ;
 
 tokensSpec
 	:	TOKENS^
@@ -401,7 +405,7 @@ Map opts = null;
 	ruleName:id
 	{currentRuleName=#ruleName.getText();
      if ( gtype==LEXER_GRAMMAR && #p4==null ) {
-         lexerRuleNames.add(currentRuleName);
+         grammar.lexerRuleNamesInCombined.add(currentRuleName);
 	 }
 	}
 	( BANG )?
@@ -414,20 +418,11 @@ Map opts = null;
 	colon:COLON
 	{
 	blkRoot = #[BLOCK,"BLOCK"];
-	blkRoot.options = opts;
+	blkRoot.setBlockOptions(opts);
 	blkRoot.setLine(colon.getLine());
 	blkRoot.setColumn(colon.getColumn());
 	eob = #[EOB,"<end-of-block>"];
     }
-    /*
-	(	{!currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME)}?
-		(setNoParens SEMI) => s:setNoParens // try to collapse sets
-		{
-		blk = #(blkRoot,#(#[ALT,"ALT"],#s,#[EOA,"<end-of-alt>"]),eob);
-		}
-	|	b:altList[opts] {blk = #b;}
-	)
-	*/
 	b:altList[opts] {blk = #b;}
 	semi:SEMI
 	( ex:exceptionGroup )?
@@ -436,14 +431,13 @@ Map opts = null;
 	eob.setLine(semi.getLine());
 	eob.setColumn(semi.getColumn());
     GrammarAST eor = #[EOR,"<end-of-rule>"];
-   	eor.setEnclosingRule(#ruleName.getText());
 	eor.setLine(semi.getLine());
 	eor.setColumn(semi.getColumn());
 	GrammarAST root = #[RULE,"rule"];
 	root.ruleStartTokenIndex = start;
 	root.ruleStopTokenIndex = stop;
 	root.setLine(startLine);
-	root.options = opts;
+	root.setBlockOptions(opts);
     #rule = #(root,
               #ruleName,modifier,#(#[ARG,"ARG"],#aa),#(#[RET,"RET"],#rt),
               opt,#scopes,#a,blk,ex,eor);
@@ -486,12 +480,7 @@ block
 GrammarAST save = currentBlockAST;
 Map opts=null;
 }
-    :   /*
-        (set) => s:set  // special block like ('a'|'b'|'0'..'9')
-
-    |	*/
-
-    	lp:LPAREN^ {#lp.setType(BLOCK); #lp.setText("BLOCK");}
+    :   lp:LPAREN^ {#lp.setType(BLOCK); #lp.setText("BLOCK");}
 		(
 			// 2nd alt and optional branch ambig due to
 			// linear approx LL(2) issue.  COLON ACTION
@@ -527,7 +516,7 @@ Map opts=null;
 altList[Map opts]
 {
 	GrammarAST blkRoot = #[BLOCK,"BLOCK"];
-	blkRoot.options = opts;
+	blkRoot.setBlockOptions(opts);
 	blkRoot.setLine(LT(0).getLine()); // set to : or (
 	blkRoot.setColumn(LT(0).getColumn());
 	GrammarAST save = currentBlockAST;
@@ -590,26 +579,40 @@ elementNoOptionSpec
     IntSet elements=null;
     GrammarAST sub, sub2;
 }
-	:	id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
-        ( sub=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub;} )?
-    |   atom
-        ( sub2=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub2;} )?
-    |	ebnf
-	|   ACTION
-	|   p:SEMPRED ( IMPLIES! {#p.setType(GATED_SEMPRED);} )?
-		{
-		#p.setEnclosingRule(currentRuleName);
-		grammar.blocksWithSemPreds.add(currentBlockAST);
-		}
-	|   t3:tree
+	:	(	id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
+			( sub=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub;} )?
+		|   atom
+			( sub2=ebnfSuffix[(GrammarAST)currentAST.root,false]! {#elementNoOptionSpec=sub2;} )?
+		|	ebnf
+		|   FORCED_ACTION
+		|   ACTION
+		|   p:SEMPRED ( IMPLIES! {#p.setType(GATED_SEMPRED);} )?
+			{
+			grammar.blocksWithSemPreds.add(currentBlockAST);
+			}
+		|   t3:tree
+		)
 	;
 
-atom:   range (ROOT^|BANG^)?
-    |   terminal
+atom
+    :   range (ROOT^|BANG^)?
+    |   (   options {
+            // TOKEN_REF WILDCARD could match terminal here then WILDCARD next
+            generateAmbigWarnings=false;
+        }
+        :   // grammar.rule but ensure no spaces. "A . B" is not a qualified ref
+        	// We do here rather than lexer so we can build a tree
+            {LT(1).getColumn()+LT(1).getText().length()==LT(2).getColumn()&&
+			 LT(2).getColumn()+1==LT(3).getColumn()}?
+			id w:WILDCARD^ (terminal|ruleref) {#w.setType(DOT);}
+        |   terminal
+        |   ruleref
+        )
     |	notSet (ROOT^|BANG^)?
-    |   rr:RULE_REF^
-		( ARG_ACTION )?
-		(ROOT^|BANG^)?
+    ;
+
+ruleref
+    :   rr:RULE_REF^ ( ARG_ACTION )? (ROOT^|BANG^)?
     ;
 
 notSet
@@ -625,11 +628,16 @@ notSet
         {#notSet.setLine(line); #notSet.setColumn(col);}
 	;
 
-tree :
-	TREE_BEGIN^
-        element ( element )+
-    RPAREN!
-	;
+treeRoot
+    :   {atTreeRoot=true;}
+        (   id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
+	    |   atom
+	    |   block
+	    )
+        {atTreeRoot=false;}
+    ;
+
+tree:   TREE_BEGIN^ treeRoot ( element )+ RPAREN! ;
 
 /** matches ENBF blocks (and sets via block rule) */
 ebnf!
@@ -681,15 +689,43 @@ terminal
 {
 GrammarAST ebnfRoot=null, subrule=null;
 }
-    :   cl:CHAR_LITERAL^ (ROOT^|BANG^)?
+    :   cl:CHAR_LITERAL^ ( elementOptions[#cl]! )? (ROOT^|BANG^)?
 
 	|   tr:TOKEN_REF^
+            ( elementOptions[#tr]! )?
 			( ARG_ACTION )? // Args are only valid for lexer rules
             (ROOT^|BANG^)?
 
-	|   sl:STRING_LITERAL (ROOT^|BANG^)?
+	|   sl:STRING_LITERAL^ ( elementOptions[#sl]! )? (ROOT^|BANG^)?
 
 	|   wi:WILDCARD (ROOT^|BANG^)?
+	    {
+		if ( atTreeRoot ) {
+		    ErrorManager.syntaxError(
+			    ErrorManager.MSG_WILDCARD_AS_ROOT,grammar,wi,null,null);
+	    }
+	    }
+	;
+
+elementOptions[GrammarAST terminalAST]
+	:	OPEN_ELEMENT_OPTION^ defaultNodeOption[terminalAST] CLOSE_ELEMENT_OPTION!
+	|	OPEN_ELEMENT_OPTION^ elementOption[terminalAST] (SEMI! elementOption[terminalAST])* CLOSE_ELEMENT_OPTION!
+	;
+
+defaultNodeOption[GrammarAST terminalAST]
+{
+StringBuffer buf = new StringBuffer();
+}
+	:	i:id {buf.append(#i.getText());} (WILDCARD i2:id {buf.append("."+#i2.getText());})*
+	    {terminalAST.setTerminalOption(grammar,Grammar.defaultTokenOption,buf.toString());}
+	;
+
+elementOption[GrammarAST terminalAST]
+	:	a:id ASSIGN^ (b:id|s:STRING_LITERAL)
+		{
+		Object v = (#b!=null)?#b.getText():#s.getText();
+		terminalAST.setTerminalOption(grammar,#a.getText(),v);
+		}
 	;
 
 ebnfSuffix[GrammarAST elemAST, boolean inRewrite] returns [GrammarAST subrule=null]
@@ -728,19 +764,13 @@ notTerminal
 	;
 
 idList
-	:	(id)+
+	:	id (COMMA! id)*
 	;
 
 id	:	TOKEN_REF {#id.setType(ID);}
 	|	RULE_REF  {#id.setType(ID);}
 	;
 
-/** Match anything that looks like an ID and return tree as token type ID */
-idToken
-    :	TOKEN_REF {#idToken.setType(ID);}
-	|	RULE_REF  {#idToken.setType(ID);}
-	;
-
 // R E W R I T E  S Y N T A X
 
 rewrite
@@ -751,10 +781,6 @@ rewrite
 		( options { warnWhenFollowAmbig=false;}
 		: rew:REWRITE pred:SEMPRED alt:rewrite_alternative
 	      {root.addChild( #(#rew, #pred, #alt) );}
-		  {
-          #pred.setEnclosingRule(currentRuleName);
-          #rew.setEnclosingRule(currentRuleName);
-          }
 	    )*
 		rew2:REWRITE alt2:rewrite_alternative
         {
@@ -796,6 +822,8 @@ rewrite_alternative
         }
 
    	|   {#rewrite_alternative = #(altRoot,#[EPSILON,"epsilon"],eoa);}
+
+   	|	{grammar.buildAST()}? ETC
     ;
 
 rewrite_element
@@ -813,16 +841,15 @@ rewrite_atom
 {
 GrammarAST subrule=null;
 }
-    :   cl:CHAR_LITERAL
-	|   tr:TOKEN_REF^ (ARG_ACTION)? // for imaginary nodes
+    :   tr:TOKEN_REF^ (elementOptions[#tr]!)? (ARG_ACTION)? // for imaginary nodes
     |   rr:RULE_REF
-	|   sl:STRING_LITERAL
+	|   cl:CHAR_LITERAL^ (elementOptions[#cl]!)?
+	|   sl:STRING_LITERAL^ (elementOptions[#sl]!)?
 	|!  d:DOLLAR i:id // reference to a label in a rewrite rule
 		{
 		#rewrite_atom = #[LABEL,i_AST.getText()];
 		#rewrite_atom.setLine(#d.getLine());
 		#rewrite_atom.setColumn(#d.getColumn());
-        #rewrite_atom.setEnclosingRule(currentRuleName);
 		}
 	|	ACTION
 	;
@@ -902,7 +929,7 @@ rewrite_template_arg
 
 class ANTLRLexer extends Lexer;
 options {
-	k=2;
+	k=3;
 	exportVocab=ANTLR;
 	testLiterals=false;
 	interactive=true;
@@ -916,6 +943,7 @@ options {
     public void tab() {
 		setColumn( getColumn()+1 );
     }
+    public boolean hasASTOperator = false;
 }
 
 WS	:	(	' '
@@ -990,14 +1018,16 @@ REWRITE : "->" ;
 
 SEMI:	';' ;
 
-ROOT : '^' ;
+ROOT : '^' {hasASTOperator=true;} ;
 
-BANG : '!' ;
+BANG : '!' {hasASTOperator=true;} ;
 
 OR	:	'|' ;
 
 WILDCARD : '.' ;
 
+ETC : "..." ;
+
 RANGE : ".." ;
 
 NOT :	'~' ;
@@ -1006,6 +1036,18 @@ RCURLY:	'}'	;
 
 DOLLAR : '$' ;
 
+STRAY_BRACKET
+	:	']'
+		{
+		ErrorManager.syntaxError(
+			ErrorManager.MSG_SYNTAX_ERROR,
+			null,
+			_token,
+			"antlr: dangling ']'? make sure to escape with \\]",
+			null);
+		}
+	;
+
 CHAR_LITERAL
 	:	'\'' (ESC|'\n'{newline();}|~'\'')* '\''
 		{
@@ -1017,7 +1059,7 @@ CHAR_LITERAL
 	;
 
 DOUBLE_QUOTE_STRING_LITERAL
-	:	'"' ('\\'! '"'|'\n'{newline();}|~'"')* '"'
+	:	'"' ('\\'! '"'|'\\' ~'"'|'\n'{newline();}|~'"')* '"'
 	;
 
 DOUBLE_ANGLE_STRING_LITERAL
@@ -1035,29 +1077,6 @@ ESC	:	'\\'
 		|	'\''
 		|	'\\'
 		|	'>'
-		|	('0'..'3')
-			(
-				options {
-					warnWhenFollowAmbig = false;
-				}
-			:
-			('0'..'9')
-				(
-					options {
-						warnWhenFollowAmbig = false;
-					}
-				:
-				'0'..'9'
-				)?
-			)?
-		|	('4'..'7')
-			(
-				options {
-					warnWhenFollowAmbig = false;
-				}
-			:
-			('0'..'9')
-			)?
 		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
 		|	. // unknown, leave as it is
 		)
@@ -1078,22 +1097,22 @@ XDIGIT :
 INT	:	('0'..'9')+
 	;
 
+//HETERO_TYPE : '<'! ~'<' (~'>')* '>'! ;
+
 ARG_ACTION
-   :
-	NESTED_ARG_ACTION
+	:	'['! NESTED_ARG_ACTION ']'!
 	;
 
 protected
 NESTED_ARG_ACTION :
-	'['!
-	(
-		NESTED_ARG_ACTION
-	|	'\r' '\n'	{newline();}
+	(	'\r' '\n'	{newline();}
 	|	'\n'		{newline();}
+	|	'\\'! ']'
+	|	'\\' ~']'
 	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
 	|	~']'
 	)*
-	']'!
 	;
 
 ACTION
@@ -1103,7 +1122,12 @@ ACTION
 		{
 			Token t = makeToken(_ttype);
 			String action = $getText;
-			action = action.substring(1,action.length()-1);
+            int n = 1; // num delimiter chars
+            if ( action.startsWith("{{") && action.endsWith("}}") ) {
+                t.setType(FORCED_ACTION);
+                n = 2;
+            }
+			action = action.substring(n,action.length()-n);
 			t.setText(action);
 			t.setLine(actionLine);			// set action line to start
 			t.setColumn(actionColumn);
@@ -1127,6 +1151,7 @@ NESTED_ACTION :
 	|	ACTION_CHAR_LITERAL
 	|	COMMENT
 	|	ACTION_STRING_LITERAL
+	|	ACTION_ESC
 	|	.
 	)*
 	'}'
diff --git a/src/org/antlr/tool/antlr.print.g b/tool/src/main/antlr2/org/antlr/grammar/v2/antlr.print.g
similarity index 94%
rename from src/org/antlr/tool/antlr.print.g
rename to tool/src/main/antlr2/org/antlr/grammar/v2/antlr.print.g
index db55383..8389ba3 100644
--- a/src/org/antlr/tool/antlr.print.g
+++ b/tool/src/main/antlr2/org/antlr/grammar/v2/antlr.print.g
@@ -1,7 +1,7 @@
 header {
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -26,8 +26,9 @@ header {
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-	package org.antlr.tool;
+	package org.antlr.grammar.v2;
 	import java.util.*;
+    import org.antlr.tool.*;
 }
 
 /** Print out a grammar (no pretty printing).
@@ -45,6 +46,7 @@ options {
 }
 
 {
+
 	protected Grammar grammar;
 	protected boolean showActions;
     protected StringBuffer buf = new StringBuffer(300);
@@ -124,6 +126,7 @@ grammarSpec[String gtype]
 	:	 id:ID {out(gtype+"grammar "+#id.getText());}
         (cmt:DOC_COMMENT {out(#cmt.getText()+"\n");} )?
         (optionsSpec)? {out(";\n");}
+        (delegateGrammars)?
         (tokensSpec)?
         (attrScope)*
         (actions)?
@@ -184,6 +187,10 @@ charSetElement
 	;
 */
 
+delegateGrammars
+	:	#( "import" ( #(ASSIGN ID ID) | ID )+ )
+	;
+
 tokensSpec
 	:	#( TOKENS ( tokenSpec )+ )
 	;
@@ -264,7 +271,7 @@ finallyClause
 
 single_rewrite
 	:	#( REWRITE {out(" ->");} (SEMPRED {out(" {"+#SEMPRED.getText()+"}?");})?
-	       ( alternative | rewrite_template | ACTION {out(" {"+#ACTION.getText()+"}");})
+	       ( alternative | rewrite_template | ETC {out("...");} | ACTION {out(" {"+#ACTION.getText()+"}");})
 	     )
 	;
 
@@ -302,6 +309,7 @@ element
     |   tree
     |   #( SYNPRED block[true] ) {out("=>");}
     |   a:ACTION  {if ( showActions ) {out("{"); out(a.getText()); out("}");}}
+    |   a2:FORCED_ACTION  {if ( showActions ) {out("{{"); out(a2.getText()); out("}}");}}
     |   pred:SEMPRED
     	{
     	if ( showActions ) {out("{"); out(pred.getText()); out("}?");}
@@ -339,6 +347,7 @@ atom
 			   (ast_suffix)?
              )
 		|   #( TOKEN_REF		{out(#atom.toString());} 
+               
 			   (targ:ARG_ACTION	{out("["+#targ.toString()+"]");} )?
 			   (ast_suffix)?
              )
@@ -349,11 +358,12 @@ atom
 			   (ast_suffix)?
              )
 		|   #( WILDCARD		{out(#atom.toString());}
-			   (ast_suffix)?
+                (ast_suffix)?
              )
 		)
 		{out(" ");}
     |	LABEL {out(" $"+#LABEL.getText());} // used in -> rewrites
+    |   #(DOT ID {out(#ID.getText()+".");} atom) // scope override on rule
     ;
 
 ast_suffix
diff --git a/src/org/antlr/tool/assign.types.g b/tool/src/main/antlr2/org/antlr/grammar/v2/assign.types.g
similarity index 53%
rename from src/org/antlr/tool/assign.types.g
rename to tool/src/main/antlr2/org/antlr/grammar/v2/assign.types.g
index 4c773d1..7be767a 100644
--- a/src/org/antlr/tool/assign.types.g
+++ b/tool/src/main/antlr2/org/antlr/grammar/v2/assign.types.g
@@ -1,7 +1,7 @@
 header {
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -26,11 +26,12 @@ header {
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-	package org.antlr.tool;
+	package org.antlr.grammar.v2;
 	import java.util.*;
 	import org.antlr.analysis.*;
 	import org.antlr.misc.*;
 	import java.io.*;
+    import org.antlr.tool.*;
 }
 
 /** [Warning: TJP says that this is probably out of date as of 11/19/2005,
@@ -107,208 +108,57 @@ options {
             ex);
     }
 
-protected GrammarAST stringAlias;
-protected GrammarAST charAlias;
-protected GrammarAST stringAlias2;
-protected GrammarAST charAlias2;
 
 protected Grammar grammar;
-protected Map stringLiterals = new LinkedHashMap(); // Map<literal,Integer>
-protected Map tokens = new LinkedHashMap();         // Map<name,Integer>
-/** Track actual lexer rule defs so we don't get repeated token defs in 
- *  generated lexer.
- */
-protected Set tokenRuleDefs = new HashSet();        // Set<name>
-protected Map aliases = new LinkedHashMap();        // Map<name,literal>
 protected String currentRuleName;
-protected static final Integer UNASSIGNED = Utils.integer(-1);
-protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
-
-/** Track string literals in any non-lexer rule (could be in tokens{} section) */
-protected void trackString(GrammarAST t) {
-	// if lexer, don't allow aliasing in tokens section
-	if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
-		ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
-								  grammar,
-								  t.token,
-								  t.getText());
-		return;
-	}
-	// in a plain parser grammar rule, cannot reference literals
-	// (unless defined previously via tokenVocab option)
-	if ( grammar.type==Grammar.PARSER &&
-	     grammar.getTokenType(t.getText())==Label.INVALID )
-    {
-		ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
-								  grammar,
-								  t.token,
-								  t.getText());
-	}
-	// otherwise add literal to token types if referenced from parser rule
-	// or in the tokens{} section
-	if ( (currentRuleName==null ||
-         Character.isLowerCase(currentRuleName.charAt(0))) &&
-         grammar.getTokenType(t.getText())==Label.INVALID )
-	{
-		stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
-	}
-}
-
-protected void trackToken(GrammarAST t) {
-	// imported token names might exist, only add if new
-	if ( grammar.getTokenType(t.getText())==Label.INVALID ) {
-		tokens.put(t.getText(), UNASSIGNED);
-	}
-}
 
-protected void trackTokenRule(GrammarAST t,
-							  GrammarAST modifier,
-							  GrammarAST block)
-{
-	// imported token names might exist, only add if new
-	if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
-		if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
-			return;
-		}
-		int existing = grammar.getTokenType(t.getText());
-		if ( existing==Label.INVALID ) {
-			tokens.put(t.getText(), UNASSIGNED);
-		}
-		// look for "<TOKEN> : <literal> ;" pattern
-        // (can have optional action last)
-		if ( block.hasSameTreeStructure(charAlias) ||
-             block.hasSameTreeStructure(stringAlias) ||
-             block.hasSameTreeStructure(charAlias2) ||
-             block.hasSameTreeStructure(stringAlias2) )
-        {
-			alias(t, (GrammarAST)block.getFirstChild().getFirstChild());
-			tokenRuleDefs.add(t.getText());
-		}
-	}
-	// else error
+protected static GrammarAST stringAlias;
+protected static GrammarAST charAlias;
+protected static GrammarAST stringAlias2;
+protected static GrammarAST charAlias2;
+
+protected void initASTPatterns() {
+	stringAlias =
+		#(#[BLOCK], #(#[ALT], #[STRING_LITERAL], #[EOA]), #[EOB]);
+	charAlias =
+		#(#[BLOCK], #(#[ALT], #[CHAR_LITERAL], #[EOA]), #[EOB]);
+	stringAlias2 =
+		#(#[BLOCK], #(#[ALT], #[STRING_LITERAL], #[ACTION], #[EOA]),#[EOB]);
+	charAlias2 =
+		#(#[BLOCK], #(#[ALT], #[CHAR_LITERAL], #[ACTION], #[EOA]), #[EOB]);
 }
 
-protected void alias(GrammarAST t, GrammarAST s) {
-	aliases.put(t.getText(), s.getText());
-}
-
-protected void assignTypes() {
-	/*
-	System.out.println("stringLiterals="+stringLiterals);
-	System.out.println("tokens="+tokens);
-	System.out.println("aliases="+aliases);
-	*/
-
-	assignTokenIDTypes();
-
-	aliasTokenIDsAndLiterals();
-
-	assignStringTypes();
-
-	/*
-	System.out.println("AFTER:");
-	System.out.println("stringLiterals="+stringLiterals);
-	System.out.println("tokens="+tokens);
-	System.out.println("aliases="+aliases);
-	*/
-
-	notifyGrammarObject();
-}
-
-	protected void assignStringTypes() {
-		// walk string literals assigning types to unassigned ones
-		Set s = stringLiterals.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String lit = (String) it.next();
-			Integer oldTypeI = (Integer)stringLiterals.get(lit);
-			int oldType = oldTypeI.intValue();
-			if ( oldType<Label.MIN_TOKEN_TYPE ) {
-				Integer typeI = Utils.integer(grammar.getNewTokenType());
-				stringLiterals.put(lit, typeI);
-				// if string referenced in combined grammar parser rule,
-				// automatically define in the generated lexer
-				grammar.defineLexerRuleForStringLiteral(lit, typeI.intValue());
-			}
-		}
-	}
-
-	protected void aliasTokenIDsAndLiterals() {
-		if ( grammar.type==Grammar.LEXER ) {
-			return; // strings/chars are never token types in LEXER
-		}
-		// walk aliases if any and assign types to aliased literals if literal
-		// was referenced
-		Set s = aliases.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			String literal = (String)aliases.get(tokenID);
-			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
-				stringLiterals.put(literal, tokens.get(tokenID));
-				// an alias still means you need a lexer rule for it
-				Integer typeI = (Integer)tokens.get(tokenID);
-				if ( !tokenRuleDefs.contains(tokenID) ) {
-					grammar.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI.intValue());
-				}
-			}
-		}
-	}
-
-	protected void assignTokenIDTypes() {
-		// walk token names, assigning values if unassigned
-		Set s = tokens.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			if ( tokens.get(tokenID)==UNASSIGNED ) {
-				tokens.put(tokenID, Utils.integer(grammar.getNewTokenType()));
-			}
-		}
-	}
-
-	protected void notifyGrammarObject() {
-		Set s = tokens.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			int ttype = ((Integer)tokens.get(tokenID)).intValue();
-			grammar.defineToken(tokenID, ttype);
-		}
-		s = stringLiterals.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String lit = (String) it.next();
-			int ttype = ((Integer)stringLiterals.get(lit)).intValue();
-			grammar.defineToken(lit, ttype);
-		}
-	}
-
-	protected void init(Grammar g) {
-		this.grammar = g;
-        stringAlias = 
-            #(#[BLOCK], #(#[ALT], #[STRING_LITERAL], #[EOA]), #[EOB]);
-        charAlias =
-            #(#[BLOCK], #(#[ALT], #[CHAR_LITERAL], #[EOA]), #[EOB]);
-        stringAlias2 =
-            #(#[BLOCK], #(#[ALT], #[STRING_LITERAL], #[ACTION], #[EOA]),#[EOB]);
-        charAlias2 = 
-            #(#[BLOCK], #(#[ALT], #[CHAR_LITERAL], #[ACTION], #[EOA]), #[EOB]);
-	}
+// Behavior moved to AssignTokenTypesBehavior
+protected void trackString(GrammarAST t) {;}
+protected void trackToken(GrammarAST t) {;}
+protected void trackTokenRule(GrammarAST t, GrammarAST modifier, GrammarAST block) {;}
+protected void alias(GrammarAST t, GrammarAST s) {;}
+public    void defineTokens(Grammar root) {;}
+protected void defineStringLiteralsFromDelegates() {;}
+protected void assignStringTypes(Grammar root) {;}
+protected void aliasTokenIDsAndLiterals(Grammar root) {;}
+protected void assignTokenIDTypes(Grammar root) {;}
+protected void defineTokenNamesAndLiteralsInGrammar(Grammar root) {;}
+protected void init(Grammar root) {;}
 }
 
 grammar[Grammar g]
 {
 	init(g);
 }
-    :   ( #( LEXER_GRAMMAR 	  {grammar.type = Grammar.LEXER;} 	  	grammarSpec )
-	    | #( PARSER_GRAMMAR   {grammar.type = Grammar.PARSER;}      grammarSpec )
-	    | #( TREE_GRAMMAR     {grammar.type = Grammar.TREE_PARSER;} grammarSpec )
-	    | #( COMBINED_GRAMMAR {grammar.type = Grammar.COMBINED;}    grammarSpec )
+    :   ( #( LEXER_GRAMMAR 	  grammarSpec )
+	    | #( PARSER_GRAMMAR   grammarSpec )
+	    | #( TREE_GRAMMAR     grammarSpec )
+	    | #( COMBINED_GRAMMAR grammarSpec )
 	    )
-        {assignTypes();}
     ;
 
 grammarSpec
 {Map opts=null;}
-	:	id:ID {grammar.setName(#id.getText());}
+	:	id:ID
 		(cmt:DOC_COMMENT)?
 		(optionsSpec)?
+        (delegateGrammars)?
         (tokensSpec)?
         (attrScope)*
         (AMPERSAND)* // skip actions
@@ -333,7 +183,7 @@ option[Map opts]
         opts.put(key,value);
         // check for grammar-level option to import vocabulary
         if ( currentRuleName==null && key.equals("tokenVocab") ) {
-            grammar.importTokenVocabulary((String)value);
+            grammar.importTokenVocabulary(#id,(String)value);
         }
         }
     ;
@@ -356,6 +206,14 @@ charSetElement
 	|   #( RANGE c3:CHAR_LITERAL c4:CHAR_LITERAL )
 	;
 
+delegateGrammars
+	:	#( "import"
+            (   #(ASSIGN ID ID)
+            |   ID
+            )+
+        )
+	;
+
 tokensSpec
 	:	#( TOKENS ( tokenSpec )+ )
 	;
@@ -426,7 +284,7 @@ finallyClause
     ;
 
 rewrite
-	:	( #( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION) ) )*
+	:	( #( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION|ETC) ) )*
 	;
 
 element
@@ -441,6 +299,7 @@ element
     |   ebnf
     |   tree
     |   #( SYNPRED block ) 
+    |   FORCED_ACTION
     |   ACTION
     |   SEMPRED
     |   SYN_SEMPRED
@@ -459,11 +318,12 @@ tree:   #(TREE_BEGIN  element (element)*  )
     ;
 
 atom
-    :   RULE_REF
-    |   t:TOKEN_REF      {trackToken(t);}
+    :   #( rr:RULE_REF (rarg:ARG_ACTION)? )
+    |   #( t:TOKEN_REF (targ:ARG_ACTION )? ) {trackToken(t);}
     |   c:CHAR_LITERAL   {trackString(c);}
     |   s:STRING_LITERAL {trackString(s);}
     |   WILDCARD
+    |   #(DOT ID atom) // scope override on rule
     ;
 
 ast_suffix
diff --git a/src/org/antlr/tool/buildnfa.g b/tool/src/main/antlr2/org/antlr/grammar/v2/buildnfa.g
similarity index 87%
rename from src/org/antlr/tool/buildnfa.g
rename to tool/src/main/antlr2/org/antlr/grammar/v2/buildnfa.g
index 9793680..c6a128d 100644
--- a/src/org/antlr/tool/buildnfa.g
+++ b/tool/src/main/antlr2/org/antlr/grammar/v2/buildnfa.g
@@ -1,7 +1,7 @@
 header {
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -26,10 +26,11 @@ header {
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.tool;
+package org.antlr.grammar.v2;
 import java.util.*;
 import org.antlr.analysis.*;
 import org.antlr.misc.*;
+import org.antlr.tool.*;
 }
 
 /** Build an NFA from a tree representing an ANTLR grammar. */
@@ -41,6 +42,7 @@ options {
 }
 
 {
+
 /** Factory used to create nodes and submachines */
 protected NFAFactory factory = null;
 
@@ -62,6 +64,7 @@ public TreeToNFAConverter(Grammar g, NFA nfa, NFAFactory factory) {
 	this.factory = factory;
 }
 
+/*
 protected void init() {
     // define all the rule begin/end NFAStates to solve forward reference issues
     Collection rules = grammar.getRules();
@@ -70,20 +73,22 @@ protected void init() {
         String ruleName = r.name;
         NFAState ruleBeginState = factory.newState();
         ruleBeginState.setDescription("rule "+ruleName+" start");
-		ruleBeginState.setEnclosingRuleName(ruleName);
-        grammar.setRuleStartState(ruleName, ruleBeginState);
+		ruleBeginState.enclosingRule = r;
+        r.startState = ruleBeginState;
         NFAState ruleEndState = factory.newState();
         ruleEndState.setDescription("rule "+ruleName+" end");
         ruleEndState.setAcceptState(true);
-		ruleEndState.setEnclosingRuleName(ruleName);
-        grammar.setRuleStopState(ruleName, ruleEndState);
+		ruleEndState.enclosingRule = r;
+        r.stopState = ruleEndState;
     }
 }
+*/
 
 protected void addFollowTransition(String ruleName, NFAState following) {
      //System.out.println("adding follow link to rule "+ruleName);
      // find last link in FOLLOW chain emanating from rule
-     NFAState end = grammar.getRuleStopState(ruleName);
+     Rule r = grammar.getRule(ruleName);
+     NFAState end = r.stopState;
      while ( end.transition(1)!=null ) {
          end = (NFAState)end.transition(1).target;
      }
@@ -129,8 +134,7 @@ protected void finish() {
 }
 
 grammar
-    :   {init();}
-        ( #( LEXER_GRAMMAR grammarSpec )
+    :   ( #( LEXER_GRAMMAR grammarSpec )
 	    | #( PARSER_GRAMMAR grammarSpec )
 	    | #( TREE_GRAMMAR grammarSpec )
 	    | #( COMBINED_GRAMMAR grammarSpec )
@@ -146,6 +150,7 @@ grammarSpec
 	:	ID
 		(cmt:DOC_COMMENT)?
         ( #(OPTIONS .) )?
+        ( #("import" .) )?
         ( #(TOKENS .) )?
         (attrScope)*
         (AMPERSAND)* // skip actions
@@ -163,7 +168,10 @@ rule
     String r=null;
 }
     :   #( RULE id:ID {r=#id.getText();}
-		{currentRuleName = r; factory.currentRuleName = r;}
+		{
+        currentRuleName = r;
+        factory.setCurrentRule(grammar.getLocallyDefinedRule(r));
+        }
 		(modifier)?
         (ARG (ARG_ACTION)?)
         (RET (ARG_ACTION)?)
@@ -175,7 +183,7 @@ rule
            (exceptionGroup)?
            EOR
            {
-                if ( blk.setValue!=null ) {
+                if ( blk.getSetValue() !=null ) {
                     // if block comes back as a set not BLOCK, make it
                     // a single ALT block
                     b = factory.build_AlternativeBlockFromSet(b);
@@ -184,8 +192,9 @@ rule
 					 grammar.type==Grammar.LEXER )
 				{
 					// attach start node to block for this rule
-					NFAState start = grammar.getRuleStartState(r);
-					start.setAssociatedASTNode(#id);
+                    Rule thisR = grammar.getLocallyDefinedRule(r);
+					NFAState start = thisR.startState;
+					start.associatedASTNode = #id;
 					start.addTransition(new Transition(Label.EPSILON, b.left));
 
 					// track decision if > 1 alts
@@ -198,7 +207,7 @@ rule
 					}
 
 					// hook to end of rule node
-					NFAState end = grammar.getRuleStopState(r);
+					NFAState end = thisR.stopState;
 					b.right.addTransition(new Transition(Label.EPSILON,end));
 				}
            }
@@ -277,7 +286,7 @@ rewrite
 										  grammar, #rewrite.token, currentRuleName);
 			}
 			}
-			#( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION) )
+			#( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION|ETC) )
 		)*
 	;
 
@@ -286,7 +295,7 @@ element returns [StateCluster g=null]
     |   #(BANG g=element)
     |	#(ASSIGN ID g=element)
     |	#(PLUS_ASSIGN ID g=element)
-    |   #(RANGE a:atom b:atom)
+    |   #(RANGE a:atom[null] b:atom[null])
         {g = factory.build_Range(grammar.getTokenType(#a.getText()),
                                  grammar.getTokenType(#b.getText()));}
     |   #(CHAR_RANGE c1:CHAR_LITERAL c2:CHAR_LITERAL)
@@ -299,7 +308,8 @@ element returns [StateCluster g=null]
     |   g=ebnf
     |   g=tree
     |   #( SYNPRED block )
-    |   ACTION
+    |   ACTION {g = factory.build_Action(#ACTION);}
+    |   FORCED_ACTION {g = factory.build_Action(#FORCED_ACTION);}
     |   pred:SEMPRED {g = factory.build_SemanticPredicate(#pred);}
     |   spred:SYN_SEMPRED {g = factory.build_SemanticPredicate(#spred);}
     |   bpred:BACKTRACK_SEMPRED {g = factory.build_SemanticPredicate(#bpred);}
@@ -332,7 +342,7 @@ ebnf returns [StateCluster g=null]
         }
     |   #( OPTIONAL b=block )
         {
-        if ( blk.setValue!=null ) {
+        if ( blk.getSetValue() !=null ) {
             // if block comes back SET not BLOCK, make it
             // a single ALT block
             b = factory.build_AlternativeBlockFromSet(b);
@@ -347,7 +357,7 @@ ebnf returns [StateCluster g=null]
     	}
     |   #( CLOSURE b=block )
         {
-        if ( blk.setValue!=null ) {
+        if (  blk.getSetValue() !=null ) {
             b = factory.build_AlternativeBlockFromSet(b);
         }
         g = factory.build_Astar(b);
@@ -366,7 +376,7 @@ ebnf returns [StateCluster g=null]
     	}
     |   #( POSITIVE_CLOSURE b=block )
         {
-        if ( blk.setValue!=null ) {
+        if ( blk.getSetValue() !=null ) {
             b = factory.build_AlternativeBlockFromSet(b);
         }
         g = factory.build_Aplus(b);
@@ -394,14 +404,14 @@ StateCluster down=null, up=null;
 		   {el=(GrammarAST)_t;}
 		   g=element
 		   {
-           down = factory.build_Atom(Label.DOWN);
+           down = factory.build_Atom(Label.DOWN, el);
            // TODO set following states for imaginary nodes?
            //el.followingNFAState = down.right;
 		   g = factory.build_AB(g,down);
 		   }
 		   ( {el=(GrammarAST)_t;} e=element {g = factory.build_AB(g,e);} )*
 		   {
-           up = factory.build_Atom(Label.UP);
+           up = factory.build_Atom(Label.UP, el);
            //el.followingNFAState = up.right;
 		   g = factory.build_AB(g,up);
 		   // tree roots point at right edge of DOWN for LOOK computation later
@@ -411,7 +421,7 @@ StateCluster down=null, up=null;
     ;
 
 atom_or_notatom returns [StateCluster g=null]
-	:	g=atom
+	:	g=atom[null]
 	|	#(  n:NOT
             (  c:CHAR_LITERAL (ast1:ast_suffix)?
 	           {
@@ -429,7 +439,7 @@ atom_or_notatom returns [StateCluster g=null]
 								              #c.token,
 									          #c.getText());
                 }
-	            g=factory.build_Set(notAtom);
+	            g=factory.build_Set(notAtom,#n);
 	           }
             |  t:TOKEN_REF (ast3:ast_suffix)?
 	           {
@@ -457,7 +467,7 @@ atom_or_notatom returns [StateCluster g=null]
 							              #t.token,
 								          #t.getText());
                }
-	           g=factory.build_Set(notAtom);
+	           g=factory.build_Set(notAtom,#n);
 	           }
             |  g=set
 	           {
@@ -474,21 +484,22 @@ atom_or_notatom returns [StateCluster g=null]
 				  			              grammar,
 							              #n.token);
                }
-	           g=factory.build_Set(s);
+	           g=factory.build_Set(s,#n);
 	           }
             )
         	{#n.followingNFAState = g.right;}
          )
 	;
 
-atom returns [StateCluster g=null]
+atom[String scopeName] returns [StateCluster g=null]
     :   #( r:RULE_REF (rarg:ARG_ACTION)? (as1:ast_suffix)? )
         {
-        NFAState start = grammar.getRuleStartState(r.getText());
+        NFAState start = grammar.getRuleStartState(scopeName,r.getText());
         if ( start!=null ) {
-            int ruleIndex = grammar.getRuleIndex(r.getText());
-            g = factory.build_RuleRef(ruleIndex, start);
+            Rule rr = grammar.getRule(scopeName,r.getText());
+            g = factory.build_RuleRef(rr, start);
             r.followingNFAState = g.right;
+            r.NFAStartState = g.left;
             if ( g.left.transition(0) instanceof RuleClosureTransition
             	 && grammar.type!=Grammar.LEXER )
             {
@@ -498,60 +509,60 @@ atom returns [StateCluster g=null]
         }
         }
 
-    |   #( t:TOKEN_REF (targ:ARG_ACTION)? (as2:ast_suffix)? )
+    |   #( t:TOKEN_REF  (targ:ARG_ACTION)? (as2:ast_suffix)? )
         {
         if ( grammar.type==Grammar.LEXER ) {
-            NFAState start = grammar.getRuleStartState(t.getText());
+            NFAState start = grammar.getRuleStartState(scopeName,t.getText());
             if ( start!=null ) {
-                int ruleIndex = grammar.getRuleIndex(t.getText());
-                g = factory.build_RuleRef(ruleIndex, start);
+                Rule rr = grammar.getRule(scopeName,t.getText());
+                g = factory.build_RuleRef(rr, start);
+            	t.NFAStartState = g.left;
                 // don't add FOLLOW transitions in the lexer;
                 // only exact context should be used.
             }
         }
         else {
-            int tokenType = grammar.getTokenType(t.getText());
-            g = factory.build_Atom(tokenType);
+            g = factory.build_Atom(t);
             t.followingNFAState = g.right;
         }
         }
 
-    |   #( c:CHAR_LITERAL (as3:ast_suffix)? )
+    |   #( c:CHAR_LITERAL  (as3:ast_suffix)? )
     	{
     	if ( grammar.type==Grammar.LEXER ) {
-    		g = factory.build_CharLiteralAtom(c.getText());
+    		g = factory.build_CharLiteralAtom(c);
     	}
     	else {
-            int tokenType = grammar.getTokenType(c.getText());
-            g = factory.build_Atom(tokenType);
+            g = factory.build_Atom(c);
             c.followingNFAState = g.right;
     	}
     	}
 
-    |   #( s:STRING_LITERAL (as4:ast_suffix)? )
+    |   #( s:STRING_LITERAL  (as4:ast_suffix)? )
     	{
      	if ( grammar.type==Grammar.LEXER ) {
-     		g = factory.build_StringLiteralAtom(s.getText());
+     		g = factory.build_StringLiteralAtom(s);
      	}
      	else {
-             int tokenType = grammar.getTokenType(s.getText());
-             g = factory.build_Atom(tokenType);
+             g = factory.build_Atom(s);
              s.followingNFAState = g.right;
      	}
      	}
 
-    |   #( w:WILDCARD (as5:ast_suffix)? )    {g = factory.build_Wildcard();}
+    |   #( w:WILDCARD (as5:ast_suffix)? )
+        {
+        if ( nfa.grammar.type==Grammar.TREE_PARSER ) {
+            g = factory.build_WildcardTree(#w);
+        }
+        else {
+            g = factory.build_Wildcard(#w);
+        }
+        }
 
-	//|	g=set
+    |   #( DOT scope:ID g=atom[#scope.getText()] ) // scope override
 	;
 
 ast_suffix
-{
-if ( grammar.getOption("output")==null ) {
-	ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
-							  grammar, #ast_suffix.token, currentRuleName);
-}
-}
 	:	ROOT
 	|	BANG
 	;
@@ -566,9 +577,9 @@ IntSet elements=new IntervalSet();
            EOB
          )
         {
-        g = factory.build_Set(elements);
+        g = factory.build_Set(elements,#b);
         #b.followingNFAState = g.right;
-        #b.setValue = elements; // track set value of this block
+        #b.setSetValue(elements); // track set value of this block
         }
 		//{System.out.println("set elements="+elements.toString(grammar));}
 	;
@@ -578,7 +589,7 @@ setRule returns [IntSet elements=new IntervalSet()]
 	:	#( RULE id:ID (modifier)? ARG RET ( OPTIONS )? ( ruleScopeSpec )?
 		   	(AMPERSAND)*
            	#( BLOCK ( OPTIONS )?
-           	   ( #(ALT setElement[elements] EOA) )+
+           	   ( #(ALT (BACKTRACK_SEMPRED)? setElement[elements] EOA) )+
            	   EOB
            	 )
            	(exceptionGroup)?
@@ -679,7 +690,7 @@ setElement[IntSet elements]
 testBlockAsSet
 {
     int nAlts=0;
-    Rule r = grammar.getRule(currentRuleName);
+    Rule r = grammar.getLocallyDefinedRule(currentRuleName);
 }
 	:   #( BLOCK
            (   #(ALT (BACKTRACK_SEMPRED)? testSetElement {nAlts++;} EOA)
diff --git a/src/org/antlr/codegen/codegen.g b/tool/src/main/antlr2/org/antlr/grammar/v2/codegen.g
similarity index 83%
rename from src/org/antlr/codegen/codegen.g
rename to tool/src/main/antlr2/org/antlr/grammar/v2/codegen.g
index f69ba45..4fa6f73 100644
--- a/src/org/antlr/codegen/codegen.g
+++ b/tool/src/main/antlr2/org/antlr/grammar/v2/codegen.g
@@ -1,7 +1,7 @@
 header {
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -26,7 +26,7 @@ header {
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-	package org.antlr.codegen;
+	package org.antlr.grammar.v2;
     import org.antlr.tool.*;
     import org.antlr.analysis.*;
     import org.antlr.misc.*;
@@ -34,6 +34,7 @@ header {
 	import org.antlr.stringtemplate.*;
     import antlr.TokenWithIndex;
     import antlr.CommonToken;
+    import org.antlr.codegen.*;
 }
 
 /** Walk a grammar and generate code by gradually building up
@@ -58,6 +59,22 @@ options {
 	protected static final int RULE_BLOCK_NESTING_LEVEL = 0;
 	protected static final int OUTER_REWRITE_NESTING_LEVEL = 0;
 
+    public String getCurrentRuleName() {
+        return currentRuleName;
+    }
+
+    public void setCurrentRuleName(String currentRuleName) {
+        this.currentRuleName = currentRuleName;
+    }
+
+    public int getOuterAltNum() {
+        return outerAltNum;
+    }
+
+    public void setOuterAltNum(int outerAltNum) {
+        this.outerAltNum = outerAltNum;
+    }
+
     protected String currentRuleName = null;
     protected int blockNestingLevel = 0;
     protected int rewriteBlockNestingLevel = 0;
@@ -110,12 +127,12 @@ options {
 	}
 
 	protected StringTemplate getRuleElementST(String name,
-										      String elementName,
+										      String ruleTargetName,
 											  GrammarAST elementAST,
     										  GrammarAST ast_suffix,
     										  String label)
 	{
-		String suffix = getSTSuffix(ast_suffix,label);
+		String suffix = getSTSuffix(elementAST,ast_suffix,label);
 		name += suffix;
 		// if we're building trees and there is no label, gen a label
 		// unless we're in a synpred rule.
@@ -124,7 +141,7 @@ options {
 		     (r==null || !r.isSynPred) )
 		{
 			// we will need a label to do the AST or tracking, make one
-			label = generator.createUniqueLabel(elementName);
+			label = generator.createUniqueLabel(ruleTargetName);
 			CommonToken labelTok = new CommonToken(ANTLRParser.ID, label);
 			grammar.defineRuleRefLabel(currentRuleName, labelTok, elementAST);
 		}
@@ -141,7 +158,7 @@ options {
 											   GrammarAST ast_suffix,
 											   String label)
 	{
-		String suffix = getSTSuffix(ast_suffix,label);
+		String suffix = getSTSuffix(elementAST,ast_suffix,label);
 		name += suffix;
 		// if we're building trees and there is no label, gen a label
 		// unless we're in a synpred rule.
@@ -169,7 +186,8 @@ options {
 				Grammar.LabelElementPair pair = r.getLabel(label);
 				if ( pair!=null &&
 					 (pair.type==Grammar.TOKEN_LIST_LABEL||
-					  pair.type==Grammar.RULE_LIST_LABEL) )
+					  pair.type==Grammar.RULE_LIST_LABEL||
+					  pair.type==Grammar.WILDCARD_TREE_LIST_LABEL) )
 				{
 					hasListLabel=true;
 				}
@@ -181,20 +199,17 @@ options {
 	/** Return a non-empty template name suffix if the token is to be
 	 *  tracked, added to a tree, or both.
 	 */
-	protected String getSTSuffix(GrammarAST ast_suffix, String label) {
+	protected String getSTSuffix(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
 		if ( grammar.type==Grammar.LEXER ) {
 			return "";
 		}
 		// handle list label stuff; make element use "Track"
 
-		String astPart = "";
 		String operatorPart = "";
 		String rewritePart = "";
 		String listLabelPart = "";
-		if ( grammar.buildAST() ) {
-			astPart = "AST";
-		}
-		if ( ast_suffix!=null ) {
+		Rule ruleDescr = grammar.getRule(currentRuleName);
+		if ( ast_suffix!=null && !ruleDescr.isSynPred ) {
 			if ( ast_suffix.getType()==ANTLRParser.ROOT ) {
     			operatorPart = "RuleRoot";
     		}
@@ -202,7 +217,7 @@ options {
     			operatorPart = "Bang";
     		}
    		}
-		if ( currentAltHasASTRewrite ) {
+		if ( currentAltHasASTRewrite && elementAST.getType()!=WILDCARD ) {
 			rewritePart = "Track";
 		}
 		if ( isListLabel(label) ) {
@@ -238,7 +253,7 @@ options {
         return labels;
     }
 
-    protected void init(Grammar g) {
+    public void init(Grammar g) {
         this.grammar = g;
         this.generator = grammar.getCodeGenerator();
         this.templates = generator.getTemplates();
@@ -294,15 +309,14 @@ grammarSpec
 		 }
 		)?
 		{
-		String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
-        String n = #name.getText()+suffix;
-		recognizerST.setAttribute("name", n);
-		outputFileST.setAttribute("name", n);
-		headerFileST.setAttribute("name", n);
+		recognizerST.setAttribute("name", grammar.getRecognizerName());
+		outputFileST.setAttribute("name", grammar.getRecognizerName());
+		headerFileST.setAttribute("name", grammar.getRecognizerName());
 		recognizerST.setAttribute("scopes", grammar.getGlobalScopes());
 		headerFileST.setAttribute("scopes", grammar.getGlobalScopes());
 		}
 		( #(OPTIONS .) )?
+		( #(IMPORT .) )?
 		( #(TOKENS .) )?
         (attrScope)*
         (AMPERSAND)*
@@ -318,8 +332,7 @@ StringTemplate rST;
     			Rule r = grammar.getRule(ruleName);
     			}
      		:
-     			// if synpred, only gen if used in a DFA
-    			{!r.isSynPred || grammar.synPredNamesUsedInDFA.contains(ruleName)}?
+                {grammar.generateMethodForRule(ruleName)}?
     			rST=rule
 				{
 				if ( rST!=null ) {
@@ -394,7 +407,7 @@ rule returns [StringTemplate code=null]
 					Boolean.valueOf(grammar.isEmptyRule(block)));
 			}
 			code.setAttribute("ruleDescriptor", ruleDescr);
-			String memo = (String)#rule.getOption("memoize");
+			String memo = (String)grammar.getBlockOption(#rule,"memoize");
 			if ( memo==null ) {
 				memo = (String)grammar.getOption("memoize");
 			}
@@ -491,10 +504,13 @@ block[String blockTemplateName, DFA dfa]
               	this.outerAltNum++;
               }
               // add the rewrite code as just another element in the alt :)
-    		  if ( rew!=null ) {
-    		  	alt.setAttribute("elements.{el,line,pos}",
-    		  		rew, Utils.integer(r.getLine()), Utils.integer(r.getColumn()));
-    		  }
+              // (unless it's a " -> ..." rewrite
+              // ( -> ... )
+              boolean etc =
+              	r.getType()==REWRITE &&
+              	r.getFirstChild()!=null &&
+		  		r.getFirstChild().getType()==ETC;
+    		  if ( rew!=null && !etc ) { alt.setAttribute("rew", rew); }
     		  // add this alt to the list of alts for this block
               code.setAttribute("alts",alt);
               alt.setAttribute("altNum", Utils.integer(altNum));
@@ -510,6 +526,7 @@ block[String blockTemplateName, DFA dfa]
 
 setBlock returns [StringTemplate code=null]
 {
+StringTemplate setcode = null;
 if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
     Rule r = grammar.getRule(currentRuleName);
     currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
@@ -520,9 +537,13 @@ if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
 }
     :   s:BLOCK
         {
-        StringTemplate setcode =
-            getTokenElementST("matchSet", "set", #s, null, null);
         int i = ((TokenWithIndex)#s.getToken()).getIndex();
+		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+			setcode = getTokenElementST("matchRuleBlockSet", "set", #s, null, null);
+		}
+		else {
+			setcode = getTokenElementST("matchSet", "set", #s, null, null);
+		}
 		setcode.setAttribute("elementIndex", i);
 		if ( grammar.type!=Grammar.LEXER ) {
 			generator.generateLocalFOLLOW(#s,"set",currentRuleName,i);
@@ -541,6 +562,7 @@ if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
         if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
             altcode.setAttribute("autoAST", Boolean.valueOf(true));
         }
+        altcode.setAttribute("treeLevel", rewriteTreeNestingLevel);
         code = altcode;
         }
     ;
@@ -591,6 +613,7 @@ if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() ) {
 String description = grammar.grammarTreeToString(#alternative, false);
 description = generator.target.getTargetStringLiteralFromString(description);
 code.setAttribute("description", description);
+code.setAttribute("treeLevel", rewriteTreeNestingLevel);
 if ( !currentAltHasASTRewrite && grammar.buildAST() ) {
 	code.setAttribute("autoAST", Boolean.valueOf(true));
 }
@@ -643,7 +666,7 @@ element[GrammarAST label, GrammarAST astSuffix] returns [StringTemplate code=nul
 
     |   {#element.getSetValue()==null}? code=ebnf
 
-    |   code=atom[label, astSuffix]
+    |   code=atom[null, label, astSuffix]
 
     |   code=tree
 
@@ -671,6 +694,11 @@ element_action returns [StringTemplate code=null]
         code = templates.getInstanceOf("execAction");
         code.setAttribute("action", generator.translateAction(currentRuleName,#act));
         }
+    |   act2:FORCED_ACTION
+        {
+        code = templates.getInstanceOf("execForcedAction");
+        code.setAttribute("action", generator.translateAction(currentRuleName,#act2));
+        }
     ;
 
 notElement[GrammarAST n, GrammarAST label, GrammarAST astSuffix]
@@ -764,9 +792,17 @@ if ( s.member(Label.UP) ) {
 	// the child list.
 	code.setAttribute("nullableChildList", "true");
 }
+rewriteTreeNestingLevel++;
+code.setAttribute("enclosingTreeLevel", rewriteTreeNestingLevel-1);
+code.setAttribute("treeLevel", rewriteTreeNestingLevel);
+Rule r = grammar.getRule(currentRuleName);
+GrammarAST rootSuffix = null;
+if ( grammar.buildAST() && !r.hasRewrite(outerAltNum) ) {
+	rootSuffix = new GrammarAST(ROOT,"ROOT");
+}
 }
     :   #( TREE_BEGIN {elAST=(GrammarAST)_t;}
-    	   el=element[null,null]
+    	   el=element[null,rootSuffix]
            {
            code.setAttribute("root.{el,line,pos}",
 							  el,
@@ -799,26 +835,65 @@ if ( s.member(Label.UP) ) {
 			 }
            )*
          )
+         {rewriteTreeNestingLevel--;}
     ;
 
-atom[GrammarAST label, GrammarAST astSuffix] 
+atom[GrammarAST scope, GrammarAST label, GrammarAST astSuffix] 
     returns [StringTemplate code=null]
 {
 String labelText=null;
 if ( label!=null ) {
     labelText = label.getText();
 }
+if ( grammar.type!=Grammar.LEXER &&
+     (#atom.getType()==RULE_REF||#atom.getType()==TOKEN_REF||
+      #atom.getType()==CHAR_LITERAL||#atom.getType()==STRING_LITERAL) )
+{
+	Rule encRule = grammar.getRule(((GrammarAST)#atom).enclosingRuleName);
+	if ( encRule!=null && encRule.hasRewrite(outerAltNum) && astSuffix!=null ) {
+		ErrorManager.grammarError(ErrorManager.MSG_AST_OP_IN_ALT_WITH_REWRITE,
+								  grammar,
+								  ((GrammarAST)#atom).getToken(),
+								  ((GrammarAST)#atom).enclosingRuleName,
+								  new Integer(outerAltNum));
+		astSuffix = null;
+	}
+}
 }
     :   #( r:RULE_REF (rarg:ARG_ACTION)? )
         {
-        grammar.checkRuleReference(#r, #rarg, currentRuleName);
-        Rule rdef = grammar.getRule(#r.getText());
+        grammar.checkRuleReference(scope, #r, #rarg, currentRuleName);
+        String scopeName = null;
+        if ( scope!=null ) {
+            scopeName = scope.getText();
+        }
+        Rule rdef = grammar.getRule(scopeName, #r.getText());
         // don't insert label=r() if $label.attr not used, no ret value, ...
         if ( !rdef.getHasReturnValue() ) {
             labelText = null;
         }
         code = getRuleElementST("ruleRef", #r.getText(), #r, astSuffix, labelText);
-		code.setAttribute("rule", r.getText());
+		code.setAttribute("rule", rdef);
+        if ( scope!=null ) { // scoped rule ref
+            Grammar scopeG = grammar.composite.getGrammar(scope.getText());
+            code.setAttribute("scope", scopeG);
+        }
+        else if ( rdef.grammar != this.grammar ) { // nonlocal
+            // if rule definition is not in this grammar, it's nonlocal
+			List<Grammar> rdefDelegates = rdef.grammar.getDelegates();
+			if ( rdefDelegates.contains(this.grammar) ) {
+				code.setAttribute("scope", rdef.grammar);
+			}
+			else {
+				// defining grammar is not a delegate, scope all the
+				// back to root, which has delegate methods for all
+				// rules.  Don't use scope if we are root.
+				if ( this.grammar != rdef.grammar.composite.delegateGrammarTreeRoot.grammar ) {
+					code.setAttribute("scope",
+									  rdef.grammar.composite.delegateGrammarTreeRoot.grammar);
+				}
+			}
+        }
 
 		if ( #rarg!=null ) {
 			List args = generator.translateAction(currentRuleName,#rarg);
@@ -832,7 +907,14 @@ if ( label!=null ) {
 
     |   #( t:TOKEN_REF (targ:ARG_ACTION)? )
         {
-           grammar.checkRuleReference(#t, #targ, currentRuleName);
+           if ( currentAltHasASTRewrite && #t.terminalOptions!=null &&
+                #t.terminalOptions.get(Grammar.defaultTokenOption)!=null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_HETERO_ILLEGAL_IN_REWRITE_ALT,
+									  grammar,
+									  ((GrammarAST)(#t)).getToken(),
+									  #t.getText());
+           }
+           grammar.checkRuleReference(scope, #t, #targ, currentRuleName);
 		   if ( grammar.type==Grammar.LEXER ) {
 				if ( grammar.getTokenType(t.getText())==Label.EOF ) {
 					code = templates.getInstanceOf("lexerMatchEOF");
@@ -842,7 +924,20 @@ if ( label!=null ) {
                     if ( isListLabel(labelText) ) {
                         code = templates.getInstanceOf("lexerRuleRefAndListLabel");
                     }
-					code.setAttribute("rule", t.getText());
+                    String scopeName = null;
+                    if ( scope!=null ) {
+                        scopeName = scope.getText();
+                    }
+                    Rule rdef2 = grammar.getRule(scopeName, #t.getText());
+					code.setAttribute("rule", rdef2);
+                    if ( scope!=null ) { // scoped rule ref
+                        Grammar scopeG = grammar.composite.getGrammar(scope.getText());
+                        code.setAttribute("scope", scopeG);
+                    }
+                    else if ( rdef2.grammar != this.grammar ) { // nonlocal
+                        // if rule definition is not in this grammar, it's nonlocal
+                        code.setAttribute("scope", rdef2.grammar);
+                    }
 					if ( #targ!=null ) {
 						List args = generator.translateAction(currentRuleName,#targ);
 						code.setAttribute("args", args);
@@ -857,6 +952,9 @@ if ( label!=null ) {
 				String tokenLabel =
 				   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
 				code.setAttribute("token",tokenLabel);
+				if ( !currentAltHasASTRewrite && #t.terminalOptions!=null ) { 
+                    code.setAttribute("hetero",#t.terminalOptions.get(Grammar.defaultTokenOption));
+                }
                 int i = ((TokenWithIndex)#t.getToken()).getIndex();
 			    code.setAttribute("elementIndex", i);
 			    generator.generateLocalFOLLOW(#t,tokenLabel,currentRuleName,i);
@@ -864,7 +962,7 @@ if ( label!=null ) {
 		   #t.code = code;
 		}
 
-    |   c:CHAR_LITERAL
+    |   c:CHAR_LITERAL 
         {
 		if ( grammar.type==Grammar.LEXER ) {
 			code = templates.getInstanceOf("charRef");
@@ -878,6 +976,9 @@ if ( label!=null ) {
 			code = getTokenElementST("tokenRef", "char_literal", #c, astSuffix, labelText);
 			String tokenLabel = generator.getTokenTypeAsTargetLabel(grammar.getTokenType(c.getText()));
 			code.setAttribute("token",tokenLabel);
+            if ( #c.terminalOptions!=null ) {
+                code.setAttribute("hetero",#c.terminalOptions.get(Grammar.defaultTokenOption));
+            }
             int i = ((TokenWithIndex)#c.getToken()).getIndex();
 			code.setAttribute("elementIndex", i);
 			generator.generateLocalFOLLOW(#c,tokenLabel,currentRuleName,i);
@@ -899,6 +1000,9 @@ if ( label!=null ) {
 			String tokenLabel =
 			   generator.getTokenTypeAsTargetLabel(grammar.getTokenType(#s.getText()));
 			code.setAttribute("token",tokenLabel);
+            if ( #s.terminalOptions!=null ) {
+                code.setAttribute("hetero",#s.terminalOptions.get(Grammar.defaultTokenOption));
+            }
             int i = ((TokenWithIndex)#s.getToken()).getIndex();
 			code.setAttribute("elementIndex", i);
 			generator.generateLocalFOLLOW(#s,tokenLabel,currentRuleName,i);
@@ -911,6 +1015,8 @@ if ( label!=null ) {
 		code.setAttribute("elementIndex", ((TokenWithIndex)#w.getToken()).getIndex());
 		}
 
+    |   #(DOT ID code=atom[#ID, label, astSuffix]) // scope override on rule or token
+
     |	code=set[label,astSuffix]
     ;
 
@@ -969,6 +1075,10 @@ if ( #rewrite.getType()==REWRITE ) {
             grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.RULE_LABEL);
         Set<String> ruleListLabels =
             grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.RULE_LIST_LABEL);
+        Set<String> wildcardLabels =
+            grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.WILDCARD_TREE_LABEL);
+        Set<String> wildcardListLabels =
+            grammar.getLabels(#rewrite.rewriteRefsDeep, Grammar.WILDCARD_TREE_LIST_LABEL);
         // just in case they ref $r for "previous value", make a stream
         // from retval.tree
         StringTemplate retvalST = templates.getInstanceOf("prevRuleRootRef");
@@ -977,8 +1087,15 @@ if ( #rewrite.getType()==REWRITE ) {
         code.setAttribute("referencedTokenListLabels", tokenListLabels);
         code.setAttribute("referencedRuleLabels", ruleLabels);
         code.setAttribute("referencedRuleListLabels", ruleListLabels);
+        code.setAttribute("referencedWildcardLabels", wildcardLabels);
+        code.setAttribute("referencedWildcardListLabels", wildcardListLabels);
 	}
 }
+else {
+		code = templates.getInstanceOf("noRewrite");
+		code.setAttribute("treeLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
+		code.setAttribute("rewriteBlockLevel", Utils.integer(OUTER_REWRITE_NESTING_LEVEL));
+}
 }
 	:	(
 			{rewriteRuleRefs = new HashSet();}
@@ -1054,7 +1171,11 @@ StringTemplate el,st;
     		)
     		EOA
     	 )
+
     |	{generator.grammar.buildTemplate()}? code=rewrite_template
+
+    |	// reproduce same input (only AST at moment)
+    	ETC
     ;
 
 rewrite_element returns [StringTemplate code=null]
@@ -1159,20 +1280,29 @@ rewrite_atom[boolean isRoot] returns [StringTemplate code=null]
 		}
     	}
 
-    |   ( #(TOKEN_REF (arg:ARG_ACTION)?) | CHAR_LITERAL | STRING_LITERAL )
+    |   {GrammarAST term=(GrammarAST)_t;}
+		( #(tk:TOKEN_REF (arg:ARG_ACTION)?)
+        | cl:CHAR_LITERAL
+        | sl:STRING_LITERAL
+        )
     	{
     	String tokenName = #rewrite_atom.getText();
     	String stName = "rewriteTokenRef";
     	Rule rule = grammar.getRule(currentRuleName);
     	Set tokenRefsInAlt = rule.getTokenRefsInAlt(outerAltNum);
-    	boolean imaginary = !tokenRefsInAlt.contains(tokenName);
-    	if ( imaginary ) {
+    	boolean createNewNode = !tokenRefsInAlt.contains(tokenName) || #arg!=null;
+        Object hetero = null;
+		if ( term.terminalOptions!=null ) {
+			hetero = term.terminalOptions.get(Grammar.defaultTokenOption);
+		}
+    	if ( createNewNode ) {
     		stName = "rewriteImaginaryTokenRef";
     	}
     	if ( isRoot ) {
     		stName += "Root";
     	}
     	code = templates.getInstanceOf(stName);
+		code.setAttribute("hetero", hetero);
     	if ( #arg!=null ) {
 			List args = generator.translateAction(currentRuleName,#arg);
 			code.setAttribute("args", args);
@@ -1197,6 +1327,14 @@ rewrite_atom[boolean isRoot] returns [StringTemplate code=null]
     	Grammar.LabelElementPair pair = rule.getLabel(labelName);
     	if ( labelName.equals(currentRuleName) ) {
     		// special case; ref to old value via $rule
+			if ( rule.hasRewrite(outerAltNum) &&
+				 rule.getRuleRefsInAlt(outerAltNum).contains(labelName) )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT,
+										  grammar,
+										  ((GrammarAST)(#LABEL)).getToken(),
+										  labelName);
+    		}
     		StringTemplate labelST = templates.getInstanceOf("prevRuleRootRef");
     		code = templates.getInstanceOf("rewriteRuleLabelRef"+(isRoot?"Root":""));
     		code.setAttribute("label", labelST);
@@ -1214,6 +1352,12 @@ rewrite_atom[boolean isRoot] returns [StringTemplate code=null]
 				case Grammar.TOKEN_LABEL :
 					stName = "rewriteTokenLabelRef";
 					break;
+				case Grammar.WILDCARD_TREE_LABEL :
+					stName = "rewriteWildcardLabelRef";
+					break;
+				case Grammar.WILDCARD_TREE_LIST_LABEL :
+					stName = "rewriteRuleListLabelRef"; // acts like rule ref list for ref
+					break;
 				case Grammar.RULE_LABEL :
 					stName = "rewriteRuleLabelRef";
 					break;
diff --git a/src/org/antlr/tool/define.g b/tool/src/main/antlr2/org/antlr/grammar/v2/define.g
similarity index 88%
rename from src/org/antlr/tool/define.g
rename to tool/src/main/antlr2/org/antlr/grammar/v2/define.g
index 92b2bc0..8552886 100644
--- a/src/org/antlr/tool/define.g
+++ b/tool/src/main/antlr2/org/antlr/grammar/v2/define.g
@@ -1,7 +1,7 @@
 header {
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -26,9 +26,10 @@ header {
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-	package org.antlr.tool;
+	package org.antlr.grammar.v2;
 	import java.util.*;
 	import org.antlr.misc.*;
+    import org.antlr.tool.*;
 }
 
 class DefineGrammarItemsWalker extends TreeParser;
@@ -39,7 +40,8 @@ options {
     codeGenBitsetTestThreshold=999;
 }
 
-{
+{ 
+
 protected Grammar grammar;
 protected GrammarAST root;
 protected String currentRuleName;
@@ -129,7 +131,7 @@ attrScope
 		{
 		AttributeScope scope = grammar.defineGlobalScope(name.getText(),#attrs.token);
 		scope.isDynamicGlobalScope = true;
-		scope.addAttributes(attrs.getText(), ";");
+		scope.addAttributes(attrs.getText(), ';');
 		}
 	;
 
@@ -144,6 +146,7 @@ Token optionsStartToken=null;
         ( {optionsStartToken=((GrammarAST)_t).getToken();}
           optionsSpec
         )?
+        (delegateGrammars)?
         (tokensSpec)?
         (attrScope)*
         (actions)?
@@ -175,6 +178,10 @@ optionsSpec
 	:	OPTIONS
 	;
 
+delegateGrammars
+	:	#( "import" ( #(ASSIGN ID ID) | ID )+ )
+	;
+
 tokensSpec
 	:	#( TOKENS ( tokenSpec )+ )
 	;
@@ -200,7 +207,7 @@ String name=null;
 Map opts=null;
 Rule r = null;
 }
-    :   #( RULE id:ID {opts = #RULE.options;}
+    :   #( RULE id:ID {opts = #RULE.getBlockOptions();}
            (mod=modifier)?
            #( ARG (args:ARG_ACTION)? )
            #( RET (ret:ARG_ACTION)? )
@@ -220,11 +227,11 @@ Rule r = null;
 				r = grammar.getRule(name);
 				if ( #args!=null ) {
 					r.parameterScope = grammar.createParameterScope(name,#args.token);
-					r.parameterScope.addAttributes(#args.getText(), ",");
+					r.parameterScope.addAttributes(#args.getText(), ',');
 				}
 				if ( #ret!=null ) {
 					r.returnScope = grammar.createReturnScope(name,#ret.token);
-					r.returnScope.addAttributes(#ret.getText(), ",");
+					r.returnScope.addAttributes(#ret.getText(), ',');
 				}
 			}
 			}
@@ -237,7 +244,7 @@ Rule r = null;
            {
            // copy rule options into the block AST, which is where
            // the analysis will look for k option etc...
-           #b.options = opts;
+           #b.setBlockOptions(opts);
            }
          )
     ;
@@ -270,7 +277,7 @@ ruleScopeSpec[Rule r]
  	         {
  	         r.ruleScope = grammar.createRuleScope(r.name,#attrs.token);
 			 r.ruleScope.isDynamicRuleScope = true;
-			 r.ruleScope.addAttributes(#attrs.getText(), ";");
+			 r.ruleScope.addAttributes(#attrs.getText(), ';');
 			 }
 		   )?
  	       ( uses:ID
@@ -314,10 +321,15 @@ blockAction
 alternative
 {
 if ( grammar.type!=Grammar.LEXER && grammar.getOption("output")!=null && blockLevel==1 ) {
-	GrammarAST aRewriteNode = #alternative.findFirstType(REWRITE);
+	GrammarAST aRewriteNode = #alternative.findFirstType(REWRITE); // alt itself has rewrite?
+	GrammarAST rewriteAST = (GrammarAST)#alternative.getNextSibling();
+	// we have a rewrite if alt uses it inside subrule or this alt has one
+	// but don't count -> ... rewrites, which mean "do default auto construction"
 	if ( aRewriteNode!=null||
-		 (#alternative.getNextSibling()!=null &&
-		  #alternative.getNextSibling().getType()==REWRITE) )
+		 (rewriteAST!=null &&
+		  rewriteAST.getType()==REWRITE &&
+		  rewriteAST.getFirstChild()!=null &&
+		  rewriteAST.getFirstChild().getType()!=ETC) )
 	{
 		Rule r = grammar.getRule(currentRuleName);
 		r.trackAltsWithRewrites(#alternative,this.outerAltNum);
@@ -343,10 +355,10 @@ finallyClause
 element
     :   #(ROOT element)
     |   #(BANG element)
-    |   atom
+    |   atom[null]
     |   #(NOT element)
-    |   #(RANGE atom atom)
-    |   #(CHAR_RANGE atom atom)
+    |   #(RANGE atom[null] atom[null])
+    |   #(CHAR_RANGE atom[null] atom[null])
     |	#(ASSIGN id:ID el:element)
     	{
 		if ( #el.getType()==ANTLRParser.ROOT ||
@@ -357,6 +369,9 @@ element
     	if ( #el.getType()==RULE_REF) {
     		grammar.defineRuleRefLabel(currentRuleName,#id.getToken(),#el);
     	}
+    	else if ( #el.getType()==WILDCARD && grammar.type==Grammar.TREE_PARSER ) {
+    		grammar.defineWildcardTreeLabel(currentRuleName,#id.getToken(),#el);
+    	}
     	else {
     		grammar.defineTokenRefLabel(currentRuleName,#id.getToken(),#el);
     	}
@@ -371,6 +386,9 @@ element
     	    if ( #a2.getType()==RULE_REF ) {
     	    	grammar.defineRuleListLabel(currentRuleName,#id2.getToken(),#a2);
     	    }
+            else if ( #a2.getType()==WILDCARD && grammar.type==Grammar.TREE_PARSER ) {
+                grammar.defineWildcardTreeListLabel(currentRuleName,#id2.getToken(),#a2);
+            }
     	    else {
     	    	grammar.defineTokenListLabel(currentRuleName,#id2.getToken(),#a2);
     	    }
@@ -384,6 +402,11 @@ element
         #act.outerAltNum = this.outerAltNum;
 		trackInlineAction(#act);
         }
+    |   act2:FORCED_ACTION
+        {
+        #act2.outerAltNum = this.outerAltNum;
+		trackInlineAction(#act2);
+        }
     |   SEMPRED
         {
         #SEMPRED.outerAltNum = this.outerAltNum;
@@ -434,23 +457,23 @@ dotBlock
 tree:   #(TREE_BEGIN element (element)*)
     ;
 
-atom
+atom[GrammarAST scope]
     :   #( rr:RULE_REF (rarg:ARG_ACTION)? )
     	{
-        grammar.altReferencesRule(currentRuleName, #rr, this.outerAltNum);
+        grammar.altReferencesRule(currentRuleName, scope, #rr, this.outerAltNum);
 		if ( #rarg!=null ) {
             #rarg.outerAltNum = this.outerAltNum;
             trackInlineAction(#rarg);
         }
         }
-    |   #( t:TOKEN_REF (targ:ARG_ACTION )? )
+    |   #( t:TOKEN_REF  (targ:ARG_ACTION )? )
     	{
 		if ( #targ!=null ) {
             #targ.outerAltNum = this.outerAltNum;
             trackInlineAction(#targ);
         }
     	if ( grammar.type==Grammar.LEXER ) {
-    		grammar.altReferencesRule(currentRuleName, #t, this.outerAltNum);
+    		grammar.altReferencesRule(currentRuleName, scope, #t, this.outerAltNum);
     	}
     	else {
     		grammar.altReferencesTokenID(currentRuleName, #t, this.outerAltNum);
@@ -465,7 +488,7 @@ atom
     		}
     	}
     	}
-    |   s:STRING_LITERAL
+    |   s:STRING_LITERAL 
     	{
     	if ( grammar.type!=Grammar.LEXER ) {
     		Rule rule = grammar.getRule(currentRuleName);
@@ -475,6 +498,7 @@ atom
     	}
     	}
     |   WILDCARD
+    |   #(DOT ID atom[#ID]) // scope override on rule
     ;
 
 ast_suffix
@@ -526,6 +550,8 @@ rewrite_alternative
     :   {grammar.buildAST()}?
     	#( a:ALT ( ( rewrite_element )+ | EPSILON ) EOA )
     |	{grammar.buildTemplate()}? rewrite_template
+	|	ETC {this.blockLevel==1}? // only valid as outermost rewrite
+
     ;
 
 rewrite_element
@@ -567,7 +593,12 @@ if ( !imaginary && grammar.buildAST() &&
 }
 }
     :   RULE_REF 
-    |   ( #(TOKEN_REF (arg:ARG_ACTION)?) | CHAR_LITERAL | STRING_LITERAL )
+    |   ( #(TOKEN_REF
+            (arg:ARG_ACTION)?
+           )
+        | CHAR_LITERAL
+        | STRING_LITERAL
+        )
         {
         if ( #arg!=null ) {
             #arg.outerAltNum = this.outerAltNum;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g
new file mode 100644
index 0000000..b3dc152
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g
@@ -0,0 +1,625 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** ANTLR v3 grammar written in ANTLR v3 with AST construction */
+grammar ANTLRv3;
+
+options {
+	output=AST;
+	ASTLabelType=CommonTree;
+}
+
+tokens {
+	DOC_COMMENT;
+	PARSER;	
+    LEXER;
+    RULE;
+    BLOCK;
+    OPTIONAL;
+    CLOSURE;
+    POSITIVE_CLOSURE;
+    SYNPRED;
+    RANGE;
+    CHAR_RANGE;
+    EPSILON;
+    ALT;
+    EOR;
+    EOB;
+    EOA; // end of alt
+    ID;
+    ARG;
+    ARGLIST;
+    RET='returns';
+    LEXER_GRAMMAR;
+    PARSER_GRAMMAR;
+    TREE_GRAMMAR;
+    COMBINED_GRAMMAR;
+    LABEL; // $x used in rewrite rules
+    TEMPLATE;
+    SCOPE='scope';
+    SEMPRED;
+    GATED_SEMPRED; // {p}? =>
+    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
+    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
+    FRAGMENT='fragment';
+    TREE_BEGIN='^(';
+    ROOT='^';
+    BANG='!';
+    RANGE='..';
+    REWRITE='->';
+    AT='@';
+    LABEL_ASSIGN='=';
+    LIST_LABEL_ASSIGN='+=';
+}
+
+ at parser::header
+{
+    package org.antlr.grammar.v3;
+}
+ at lexer::header
+{
+    package org.antlr.grammar.v3;
+}
+
+ at members {
+	int gtype;
+}
+
+grammarDef
+    :   DOC_COMMENT?
+    	(	'lexer'  {gtype=LEXER_GRAMMAR;}    // pure lexer
+    	|   'parser' {gtype=PARSER_GRAMMAR;}   // pure parser
+    	|   'tree'   {gtype=TREE_GRAMMAR;}     // a tree parser
+    	|		     {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
+    	)
+    	g='grammar' id ';' optionsSpec? tokensSpec? attrScope* action*
+    	rule+
+    	EOF
+    	-> ^( {adaptor.create(gtype,$g)}
+    		  id DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+
+    		)
+    ;
+
+tokensSpec
+	:	TOKENS tokenSpec+ '}' -> ^(TOKENS tokenSpec+)
+	;
+
+tokenSpec
+	:	TOKEN_REF
+		(	'=' (lit=STRING_LITERAL|lit=CHAR_LITERAL)	-> ^('=' TOKEN_REF $lit)
+		|												-> TOKEN_REF
+		)
+		';'
+	;
+
+attrScope
+	:	'scope' id ACTION -> ^('scope' id ACTION)
+	;
+
+/** Match stuff like @parser::members {int i;} */
+action
+	:	'@' (actionScopeName '::')? id ACTION -> ^('@' actionScopeName? id ACTION)
+	;
+
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+actionScopeName
+	:	id
+	|	l='lexer'	-> ID[$l]
+    |   p='parser'	-> ID[$p]
+	;
+
+optionsSpec
+	:	OPTIONS (option ';')+ '}' -> ^(OPTIONS option+)
+	;
+
+option
+    :   id '=' optionValue -> ^('=' id optionValue)
+ 	;
+ 	
+optionValue
+    :   qid
+    |   STRING_LITERAL
+    |   CHAR_LITERAL
+    |   INT
+    |	s='*' -> STRING_LITERAL[$s]  // used for k=*
+    ;
+
+rule
+scope {
+	String name;
+}
+	:	DOC_COMMENT?
+		( modifier=('protected'|'public'|'private'|'fragment') )?
+		id {$rule::name = $id.text;}
+		'!'?
+		( arg=ARG_ACTION )?
+		( 'returns' rt=ARG_ACTION  )?
+		throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+		':'	altList	';'
+		exceptionGroup?
+	    -> ^( RULE id {modifier!=null?adaptor.create(modifier):null} ^(ARG[$arg] $arg)? ^('returns' $rt)?
+	    	  throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+	    	  altList
+	    	  exceptionGroup?
+	    	  EOR["EOR"]
+	    	)
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	'@' id ACTION -> ^('@' id ACTION)
+	;
+
+throwsSpec
+	:	'throws' id ( ',' id )* -> ^('throws' id+)
+	;
+
+ruleScopeSpec
+	:	'scope' ACTION -> ^('scope' ACTION)
+	|	'scope' id (',' id)* ';' -> ^('scope' id+)
+	|	'scope' ACTION
+		'scope' id (',' id)* ';'
+		-> ^('scope' ACTION id+ )
+	;
+
+block
+    :   lp='('
+		( (opts=optionsSpec)? ':' )?
+		altpair ( '|' altpair )*
+        rp=')'
+        -> ^( BLOCK[$lp,"BLOCK"] optionsSpec? altpair+ EOB[$rp,"EOB"] )
+    ;
+
+altpair : alternative rewrite ;
+
+altList
+ at init {
+	// must create root manually as it's used by invoked rules in real antlr tool.
+	// leave here to demonstrate use of {...} in rewrite rule
+	// it's really BLOCK[firstToken,"BLOCK"]; set line/col to previous ( or : token.
+    CommonTree blkRoot = (CommonTree)adaptor.create(BLOCK,input.LT(-1),"BLOCK");
+}
+    :   altpair ( '|' altpair )* -> ^( {blkRoot} altpair+ EOB["EOB"] )
+    ;
+
+alternative
+ at init {
+	Token firstToken = input.LT(1);
+	Token prevToken = input.LT(-1); // either : or | I think
+}
+    :   element+ -> ^(ALT[firstToken,"ALT"] element+ EOA["EOA"])
+    |   -> ^(ALT[prevToken,"ALT"] EPSILON[prevToken,"EPSILON"] EOA["EOA"])
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ ( finallyClause )?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    'catch' ARG_ACTION ACTION -> ^('catch' ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    'finally' ACTION -> ^('finally' ACTION)
+    ;
+
+element
+	:	id (labelOp='='|labelOp='+=') atom
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id atom) EOA["EOA"]) EOB["EOB"]))
+		|				-> ^($labelOp id atom)
+		)
+	|	id (labelOp='='|labelOp='+=') block
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id block) EOA["EOA"]) EOB["EOB"]))
+		|				-> ^($labelOp id block)
+		)
+	|	atom
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] atom EOA["EOA"]) EOB["EOB"]) )
+		|				-> atom
+		)
+	|	ebnf
+	|   ACTION
+	|   SEMPRED ( g='=>' -> GATED_SEMPRED[$g] | -> SEMPRED )
+	|   treeSpec
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] treeSpec EOA["EOA"]) EOB["EOB"]) )
+		|				-> treeSpec
+		)
+	;
+
+atom:   terminal
+	|	range 
+		(	(op='^'|op='!')	-> ^($op range)
+		|					-> range
+		)
+    |	notSet
+		(	(op='^'|op='!')	-> ^($op notSet)
+		|					-> notSet
+		)
+    |   RULE_REF ARG_ACTION?
+		(	(op='^'|op='!')	-> ^($op RULE_REF ARG_ACTION?)
+		|					-> ^(RULE_REF ARG_ACTION?)
+		)
+    ;
+
+notSet
+	:	'~'
+		(	notTerminal elementOptions?	-> ^('~' notTerminal elementOptions?)
+		|	block elementOptions?		-> ^('~' block elementOptions?)
+		)
+	;
+
+notTerminal
+	:   CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+	
+elementOptions
+	:	'<' qid '>'					 -> ^(OPTIONS qid)
+	|	'<' option (';' option)* '>' -> ^(OPTIONS option+)
+	;
+
+elementOption
+	:	id '=' optionValue -> ^('=' id optionValue)
+	;
+	
+treeSpec
+	:	'^(' element ( element )+ ')' -> ^(TREE_BEGIN element+)
+	;
+
+range!
+	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL elementOptions?
+		-> ^(CHAR_RANGE[$c1,".."] $c1 $c2 elementOptions?)
+	;
+
+terminal
+    :   (	CHAR_LITERAL elementOptions?    	  -> ^(CHAR_LITERAL elementOptions?)
+	    	// Args are only valid for lexer rules
+		|   TOKEN_REF ARG_ACTION? elementOptions? -> ^(TOKEN_REF ARG_ACTION? elementOptions?)
+		|   STRING_LITERAL elementOptions?		  -> ^(STRING_LITERAL elementOptions?)
+		|   '.' elementOptions?		 			  -> ^('.' elementOptions?)
+		)
+		(	'^'							-> ^('^' $terminal)
+		|	'!' 						-> ^('!' $terminal)
+		)?
+	;
+
+/** Matches ENBF blocks (and token sets via block rule) */
+ebnf
+ at init {
+    Token firstToken = input.LT(1);
+}
+ at after {
+	$ebnf.tree.getToken().setLine(firstToken.getLine());
+	$ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
+}
+	:	block
+		(	op='?'	-> ^(OPTIONAL[op] block)
+		|	op='*'	-> ^(CLOSURE[op] block)
+		|	op='+'	-> ^(POSITIVE_CLOSURE[op] block)
+		|   '=>'	// syntactic predicate
+					-> {gtype==COMBINED_GRAMMAR &&
+					    Character.isUpperCase($rule::name.charAt(0))}?
+					   // if lexer rule in combined, leave as pred for lexer
+					   ^(SYNPRED["=>"] block)
+					// in real antlr tool, text for SYN_SEMPRED is predname
+					-> SYN_SEMPRED
+        |			-> block
+		)
+	;
+
+ebnfSuffix
+ at init {
+	Token op = input.LT(1);
+}
+	:	'?'	-> OPTIONAL[op]
+  	|	'*' -> CLOSURE[op]
+   	|	'+' -> POSITIVE_CLOSURE[op]
+	;
+	
+
+
+// R E W R I T E  S Y N T A X
+
+rewrite
+ at init {
+	Token firstToken = input.LT(1);
+}
+	:	(rew+='->' preds+=SEMPRED predicated+=rewrite_alternative)*
+		rew2='->' last=rewrite_alternative
+        -> ^($rew $preds $predicated)* ^($rew2 $last)
+	|
+	;
+
+rewrite_alternative
+options {backtrack=true;}
+	:	rewrite_template
+	|	rewrite_tree_alternative
+   	|   /* empty rewrite */ -> ^(ALT["ALT"] EPSILON["EPSILON"] EOA["EOA"])
+	;
+	
+rewrite_tree_block
+    :   lp='(' rewrite_tree_alternative ')'
+    	-> ^(BLOCK[$lp,"BLOCK"] rewrite_tree_alternative EOB[$lp,"EOB"])
+    ;
+
+rewrite_tree_alternative
+    :	rewrite_tree_element+ -> ^(ALT["ALT"] rewrite_tree_element+ EOA["EOA"])
+    ;
+
+rewrite_tree_element
+	:	rewrite_tree_atom
+	|	rewrite_tree_atom ebnfSuffix
+		-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree_atom EOA["EOA"]) EOB["EOB"]))
+	|   rewrite_tree
+		(	ebnfSuffix
+			-> ^(ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree EOA["EOA"]) EOB["EOB"]))
+		|	-> rewrite_tree
+		)
+	|   rewrite_tree_ebnf
+	;
+
+rewrite_tree_atom
+    :   CHAR_LITERAL
+	|   TOKEN_REF ARG_ACTION? -> ^(TOKEN_REF ARG_ACTION?) // for imaginary nodes
+    |   RULE_REF
+	|   STRING_LITERAL
+	|   d='$' id -> LABEL[$d,$id.text] // reference to a label in a rewrite rule
+	|	ACTION
+	;
+
+rewrite_tree_ebnf
+ at init {
+    Token firstToken = input.LT(1);
+}
+ at after {
+	$rewrite_tree_ebnf.tree.getToken().setLine(firstToken.getLine());
+	$rewrite_tree_ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
+}
+	:	rewrite_tree_block ebnfSuffix -> ^(ebnfSuffix rewrite_tree_block)
+	;
+	
+rewrite_tree
+	:	'^(' rewrite_tree_atom rewrite_tree_element* ')'
+		-> ^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
+	;
+
+/** Build a tree for a template rewrite:
+      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+    where ARGLIST is always there even if no args exist.
+    ID can be "template" keyword.  If first child is ACTION then it's
+    an indirect template ref
+
+    -> foo(a={...}, b={...})
+    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
+    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+rewrite_template
+	:   // -> template(a={...},...) "..."    inline template
+		id lp='(' rewrite_template_args	')'
+		( str=DOUBLE_QUOTE_STRING_LITERAL | str=DOUBLE_ANGLE_STRING_LITERAL )
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args $str)
+
+	|	// -> foo(a={...}, ...)
+		rewrite_template_ref
+
+	|	// -> ({expr})(a={...}, ...)
+		rewrite_indirect_template_head
+
+	|	// -> {...}
+		ACTION
+	;
+
+/** -> foo(a={...}, ...) */
+rewrite_template_ref
+	:	id lp='(' rewrite_template_args	')'
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args)
+	;
+
+/** -> ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	lp='(' ACTION ')' '(' rewrite_template_args ')'
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewrite_template_args)
+	;
+
+rewrite_template_args
+	:	rewrite_template_arg (',' rewrite_template_arg)*
+		-> ^(ARGLIST rewrite_template_arg+)
+	|	-> ARGLIST
+	;
+
+rewrite_template_arg
+	:   id '=' ACTION -> ^(ARG[$id.start] id ACTION)
+	;
+
+qid :	id ('.' id)* ;
+	
+id	:	TOKEN_REF -> ID[$TOKEN_REF]
+	|	RULE_REF  -> ID[$RULE_REF]
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//'
+ 	 	(	' $ANTLR ' SRC // src directive
+ 		|	~('\r'|'\n')*
+		)
+		'\r'? '\n'
+		{$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {if (input.LA(1)=='*') $type=DOC_COMMENT; else $channel=HIDDEN;} .* '*/'
+	;
+
+CHAR_LITERAL
+	:	'\'' LITERAL_CHAR '\''
+	;
+
+STRING_LITERAL
+	:	'\'' LITERAL_CHAR LITERAL_CHAR* '\''
+	;
+
+fragment
+LITERAL_CHAR
+	:	ESC
+	|	~('\''|'\\')
+	;
+
+DOUBLE_QUOTE_STRING_LITERAL
+	:	'"' (ESC | ~('\\'|'"'))* '"'
+	;
+
+DOUBLE_ANGLE_STRING_LITERAL
+	:	'<<' .* '>>'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+INT	:	'0'..'9'+
+	;
+
+ARG_ACTION
+	:	NESTED_ARG_ACTION
+	;
+
+fragment
+NESTED_ARG_ACTION :
+	'['
+	(	options {greedy=false; k=1;}
+	:	NESTED_ARG_ACTION
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	']'
+	//{setText(getText().substring(1, getText().length()-1));}
+	;
+
+ACTION
+	:	NESTED_ACTION ( '?' {$type = SEMPRED;} )?
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=2;}
+	:	NESTED_ACTION
+	|	SL_COMMENT
+	|	ML_COMMENT
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	'}'
+   ;
+
+fragment
+ACTION_CHAR_LITERAL
+	:	'\'' (ACTION_ESC|~('\\'|'\'')) '\''
+	;
+
+fragment
+ACTION_STRING_LITERAL
+	:	'"' (ACTION_ESC|~('\\'|'"'))* '"'
+	;
+
+fragment
+ACTION_ESC
+	:	'\\\''
+	|	'\\' '"' // ANTLR doesn't like: '\\"'
+	|	'\\' ~('\''|'"')
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+/** Match the start of an options section.  Don't allow normal
+ *  action processing on the {...} as it's not a action.
+ */
+OPTIONS
+	:	'options' WS_LOOP '{'
+	;
+	
+TOKENS
+	:	'tokens' WS_LOOP '{'
+	;
+
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+fragment
+SRC	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
+
+fragment
+WS_LOOP
+	:	(	WS
+		|	SL_COMMENT
+		|	ML_COMMENT
+		)*
+	;
+
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g
new file mode 100644
index 0000000..62da566
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g
@@ -0,0 +1,261 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** ANTLR v3 tree grammar to walk trees created by ANTLRv3.g */
+tree grammar ANTLRv3Tree;
+
+options {
+	tokenVocab = ANTLRv3;
+	ASTLabelType = CommonTree;
+}
+
+ at header {
+package org.antlr.grammar.v3;
+}
+
+grammarDef
+    :   ^( grammarType ID DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+ )
+    ;
+
+grammarType
+	:	LEXER_GRAMMAR
+    |	PARSER_GRAMMAR
+    |	TREE_GRAMMAR
+    |	COMBINED_GRAMMAR
+    ;
+
+tokensSpec
+	:	^(TOKENS tokenSpec+)
+	;
+
+tokenSpec
+	:	^('=' TOKEN_REF STRING_LITERAL)
+	|	^('=' TOKEN_REF CHAR_LITERAL)
+	|	TOKEN_REF
+	;
+
+attrScope
+	:	^('scope' ID ACTION)
+	;
+
+action
+	:	^('@' ID ID ACTION)
+	|	^('@' ID ACTION)
+	;
+
+optionsSpec
+	:	^(OPTIONS option+)
+	;
+
+option
+    :   qid // only allowed in element options
+    |	^('=' ID optionValue)
+ 	;
+ 	
+optionValue
+    :   ID
+    |   STRING_LITERAL
+    |   CHAR_LITERAL
+    |   INT
+    ;
+
+rule
+	:	^( RULE ID modifier? (^(ARG ARG_ACTION))? (^(RET ARG_ACTION))?
+	       throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+	       altList
+	       exceptionGroup? EOR
+	     )
+	;
+
+modifier
+	:	'protected'|'public'|'private'|'fragment'
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	^('@' ID ACTION)
+	;
+
+throwsSpec
+	:	^('throws' ID+)
+	;
+
+ruleScopeSpec
+	:	^('scope' ACTION)
+	|	^('scope' ACTION ID+)
+	|	^('scope' ID+)
+	;
+
+block
+    :   ^( BLOCK optionsSpec? (alternative rewrite)+ EOB )
+    ;
+
+altList
+    :   ^( BLOCK (alternative rewrite)+ EOB )
+    ;
+
+alternative
+    :   ^(ALT element+ EOA)
+    |   ^(ALT EPSILON EOA)
+    ;
+
+exceptionGroup
+	:	exceptionHandler+ finallyClause?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    ^('catch' ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    ^('finally' ACTION)
+    ;
+
+element
+	:	^(('='|'+=') ID block)
+	|	^(('='|'+=') ID atom)
+	|	atom
+	|	ebnf
+	|   ACTION
+	|   SEMPRED
+	|	GATED_SEMPRED
+	|   ^(TREE_BEGIN element+)
+	;
+
+atom:   ^(('^'|'!') atom)
+	|	^(CHAR_RANGE CHAR_LITERAL CHAR_LITERAL optionsSpec?)
+	|	^('~' notTerminal optionsSpec?)
+	|	^('~' block optionsSpec?)
+    |	^(RULE_REF ARG_ACTION)
+    |	RULE_REF
+    |   CHAR_LITERAL
+    |   ^(CHAR_LITERAL optionsSpec)
+    |	TOKEN_REF
+    |	^(TOKEN_REF optionsSpec)
+    |	^(TOKEN_REF ARG_ACTION optionsSpec)
+    |	^(TOKEN_REF ARG_ACTION)
+    |	STRING_LITERAL
+    |	^(STRING_LITERAL optionsSpec)
+    |	'.'
+    |	^('.' optionsSpec?)
+    ;
+
+/** Matches ENBF blocks (and token sets via block rule) */
+ebnf
+	:	^(SYNPRED block)
+	|	^(OPTIONAL block)
+  	|	^(CLOSURE block)
+   	|	^(POSITIVE_CLOSURE block)
+	|	SYN_SEMPRED
+	|	block
+	;
+
+notTerminal
+	:   CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+		
+// R E W R I T E  S Y N T A X
+
+rewrite
+	:	(^('->' SEMPRED rewrite_alternative))* ^('->' rewrite_alternative)
+	|
+	;
+
+rewrite_alternative
+	:	rewrite_template
+	|	rewrite_tree_alternative
+   	|   ^(ALT EPSILON EOA)
+	;
+	
+rewrite_tree_block
+    :   ^(BLOCK rewrite_tree_alternative EOB)
+    ;
+
+rewrite_tree_alternative
+    :	^(ALT rewrite_tree_element+ EOA)
+    ;
+
+rewrite_tree_element
+	:	rewrite_tree_atom
+	|	rewrite_tree
+	|   rewrite_tree_block
+	|   rewrite_tree_ebnf
+	;
+
+rewrite_tree_atom
+    :   CHAR_LITERAL
+	|   TOKEN_REF
+	|   ^(TOKEN_REF ARG_ACTION) // for imaginary nodes
+    |   RULE_REF
+	|   STRING_LITERAL
+	|   LABEL
+	|	ACTION
+	;
+
+rewrite_tree_ebnf
+	:	^(OPTIONAL rewrite_tree_block)
+  	|	^(CLOSURE rewrite_tree_block)
+   	|	^(POSITIVE_CLOSURE rewrite_tree_block)
+	;
+	
+rewrite_tree
+	:	^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
+	;
+
+rewrite_template
+	:   ^( TEMPLATE ID rewrite_template_args
+		   (DOUBLE_QUOTE_STRING_LITERAL | DOUBLE_ANGLE_STRING_LITERAL)
+		 )
+	|	rewrite_template_ref
+	|	rewrite_indirect_template_head
+	|	ACTION
+	;
+
+/** foo(a={...}, ...) */
+rewrite_template_ref
+	:	^(TEMPLATE ID rewrite_template_args)
+	;
+
+/** ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	^(TEMPLATE ACTION rewrite_template_args)
+	;
+
+rewrite_template_args
+	:	^(ARGLIST rewrite_template_arg+)
+	|	ARGLIST
+	;
+
+rewrite_template_arg
+	:   ^(ARG ID ACTION)
+	;
+
+qid	:	ID ('.' ID)* ;
diff --git a/src/org/antlr/tool/ActionAnalysis.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
similarity index 91%
rename from src/org/antlr/tool/ActionAnalysis.g
rename to tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
index c07308a..f69d7b9 100644
--- a/src/org/antlr/tool/ActionAnalysis.g
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -39,7 +39,7 @@ options {
 }
 
 @header {
-package org.antlr.codegen;
+package org.antlr.grammar.v3;
 import org.antlr.runtime.*;
 import org.antlr.tool.*;
 }
@@ -50,11 +50,11 @@ Grammar grammar;
 antlr.Token actionToken;
 int outerAltNum = 0;
 
-	public ActionAnalysisLexer(Grammar grammar, String ruleName, GrammarAST actionAST)
+	public ActionAnalysis(Grammar grammar, String ruleName, GrammarAST actionAST)
 	{
 		this(new ANTLRStringStream(actionAST.token.getText()));
 		this.grammar = grammar;
-	    this.enclosingRule = grammar.getRule(ruleName);
+	    this.enclosingRule = grammar.getLocallyDefinedRule(ruleName);
 	    this.actionToken = actionAST.token;
 	    this.outerAltNum = actionAST.outerAltNum;
 	}
@@ -86,13 +86,17 @@ X_Y :	'$' x=ID '.' y=ID {enclosingRule!=null}?
 			pair.actionReferencesLabel = true;
 			refdRuleName = pair.referencedRuleName;
 			Rule refdRule = grammar.getRule(refdRuleName);
-			scope = refdRule.getLocalAttributeScope($y.text);
+			if ( refdRule!=null ) {
+				scope = refdRule.getLocalAttributeScope($y.text);
+			}
 		}
 		else if ( enclosingRule.getRuleRefsInAlt(x.getText(), outerAltNum)!=null ) {
 			// ref to rule referenced in this alt
 			refdRuleName = $x.text;
 			Rule refdRule = grammar.getRule(refdRuleName);
-			scope = refdRule.getLocalAttributeScope($y.text);
+			if ( refdRule!=null ) {
+				scope = refdRule.getLocalAttributeScope($y.text);
+			}
 		}
 		if ( scope!=null &&
 			 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
diff --git a/src/org/antlr/codegen/ActionTranslator.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
similarity index 95%
rename from src/org/antlr/codegen/ActionTranslator.g
rename to tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
index e9ccf3f..5f51cce 100644
--- a/src/org/antlr/codegen/ActionTranslator.g
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -33,10 +33,17 @@ options {
 }
 
 @header {
-package org.antlr.codegen;
+package org.antlr.grammar.v3;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.runtime.*;
 import org.antlr.tool.*;
+import org.antlr.codegen.*;
+
+import org.antlr.runtime.*;
+import java.util.List;
+import java.util.ArrayList;
+import org.antlr.grammar.v2.ANTLRParser;
+
 }
 
 @members {
@@ -47,19 +54,19 @@ Grammar grammar;
 CodeGenerator generator;
 antlr.Token actionToken;
 
-	public ActionTranslatorLexer(CodeGenerator generator,
+	public ActionTranslator(CodeGenerator generator,
 								 String ruleName,
 								 GrammarAST actionAST)
 	{
 		this(new ANTLRStringStream(actionAST.token.getText()));
 		this.generator = generator;
 		this.grammar = generator.grammar;
-	    this.enclosingRule = grammar.getRule(ruleName);
+	    this.enclosingRule = grammar.getLocallyDefinedRule(ruleName);
 	    this.actionToken = actionAST.token;
 	    this.outerAltNum = actionAST.outerAltNum;
 	}
 
-	public ActionTranslatorLexer(CodeGenerator generator,
+	public ActionTranslator(CodeGenerator generator,
 								 String ruleName,
 								 antlr.Token actionToken,
 								 int outerAltNum)
@@ -71,33 +78,6 @@ antlr.Token actionToken;
 	    this.actionToken = actionToken;
 		this.outerAltNum = outerAltNum;
 	}
-	// BACKWARD COMPATIBILITY UNTIL REGENERATING WITH 3.0b7
-	public Token emit(int tokenType,
-					  int line, int charPosition,
-					  int channel,
-					  int start, int stop)
-	{
-		Token t = new CommonToken(input, tokenType, channel, start, stop);
-		t.setLine(line);
-		t.setText(text);
-		t.setCharPositionInLine(charPosition);
-		emit(t);
-		return t;
-	}
-
-/*
-public ActionTranslatorLexer(CharStream input, CodeGenerator generator,
-                             Grammar grammar, Rule enclosingRule,
-                             antlr.Token actionToken, int outerAltNum)
-{
-    this(input);
-    this.grammar = grammar;
-    this.generator = generator;
-    this.enclosingRule = enclosingRule;
-    this.actionToken = actionToken;
-    this.outerAltNum = outerAltNum;
-}
-*/
 
 /** Return a list of strings and StringTemplate objects that
  *  represent the translated action.
@@ -124,10 +104,14 @@ public String translate() {
 }
 
 public List translateAction(String action) {
-    ActionTranslatorLexer translator =
-        new ActionTranslatorLexer(generator,
-                                  enclosingRule.name,
-                                  new antlr.CommonToken(ANTLRParser.ACTION,action),outerAltNum);
+	String rname = null;
+	if ( enclosingRule!=null ) {
+		rname = enclosingRule.name;
+	}
+	ActionTranslator translator =
+		new ActionTranslator(generator,
+								  rname,
+								  new antlr.CommonToken(ANTLRParser.ACTION,action),outerAltNum);
     return translator.translateToChunks();
 }
 
@@ -247,6 +231,12 @@ ENCLOSING_RULE_SCOPE_ATTR
 	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
 		//{System.out.println("found \$rule.attr");}
 		{
+		if ( isRuleRefInAlt($x.text)  ) {
+			ErrorManager.grammarError(ErrorManager.MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT,
+									  grammar,
+									  actionToken,
+									  $x.text);
+		}
 		StringTemplate st = null;
 		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
 		if ( scope.isPredefinedRuleScope ) {
@@ -427,13 +417,14 @@ LABEL_REF
 		{
 		StringTemplate st;
 		Grammar.LabelElementPair pair = getElementLabel($ID.text);
-		if ( pair.type==Grammar.TOKEN_LABEL ||
-              pair.type==Grammar.CHAR_LABEL )
+		if ( pair.type==Grammar.RULE_LIST_LABEL ||
+             pair.type==Grammar.TOKEN_LIST_LABEL ||
+             pair.type==Grammar.WILDCARD_TREE_LIST_LABEL )
         {
-			st = template("tokenLabelRef");
+			st = template("listLabelRef");
 		}
 		else {
-			st = template("listLabelRef");
+			st = template("tokenLabelRef");
 		}
 		st.setAttribute("label", $ID.text);
 		}
@@ -814,5 +805,5 @@ INT :	'0'..'9'+
 	;
 
 fragment
-WS	:	(' '|'\t'|'\n')+
+WS	:	(' '|'\t'|'\n'|'\r')+
 	;
diff --git a/tool/src/main/java/org/antlr/Tool.java b/tool/src/main/java/org/antlr/Tool.java
new file mode 100644
index 0000000..9bb31a6
--- /dev/null
+++ b/tool/src/main/java/org/antlr/Tool.java
@@ -0,0 +1,1382 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr;
+
+import antlr.TokenStreamException;
+import antlr.RecognitionException;
+import antlr.ANTLRException;
+import org.antlr.analysis.*;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.runtime.misc.Stats;
+import org.antlr.tool.*;
+import org.antlr.misc.Graph;
+
+import java.io.*;
+import java.util.*;
+
+/** The main ANTLR entry point.  Read a grammar and generate a parser. */
+public class Tool {
+
+    public final Properties antlrSettings = new Properties();
+    public String VERSION = "!Unknown version!";
+    //public static final String VERSION = "${project.version}";
+    public static final String UNINITIALIZED_DIR = "<unset-dir>";
+    private List<String> grammarFileNames = new ArrayList<String>();
+    private boolean generate_NFA_dot = false;
+    private boolean generate_DFA_dot = false;
+    private String outputDirectory = ".";
+    private boolean haveOutputDir = false;
+    private String inputDirectory = null;
+    private String parentGrammarDirectory;
+    private String grammarOutputDirectory;
+    private boolean haveInputDir = false;
+    private String libDirectory = ".";
+    private boolean debug = false;
+    private boolean trace = false;
+    private boolean profile = false;
+    private boolean report = false;
+    private boolean printGrammar = false;
+    private boolean depend = false;
+    private boolean forceAllFilesToOutputDir = false;
+    private boolean forceRelativeOutput = false;
+    protected boolean deleteTempLexer = true;
+    private boolean verbose = false;
+    /** Don't process grammar file if generated files are newer than grammar */
+    private boolean make = false;
+    private boolean showBanner = true;
+    private static boolean exitNow = false;
+
+    // The internal options are for my use on the command line during dev
+    //
+    public static boolean internalOption_PrintGrammarTree = false;
+    public static boolean internalOption_PrintDFA = false;
+    public static boolean internalOption_ShowNFAConfigsInDFA = false;
+    public static boolean internalOption_watchNFAConversion = false;
+
+    /**
+     * A list of dependency generators that are accumulated aaaas (and if) the
+     * tool is required to sort the provided grammars into build dependency order.
+    protected Map<String, BuildDependencyGenerator> buildDependencyGenerators;
+     */
+
+    public static void main(String[] args) {
+        Tool antlr = new Tool(args);
+
+        if (!exitNow) {
+            antlr.process();
+            if (ErrorManager.getNumErrors() > 0) {
+                System.exit(1);
+            }
+            System.exit(0);
+        }
+    }
+
+    /**
+     * Load the properties file org/antlr/antlr.properties and populate any
+     * variables that must be initialized from it, such as the version of ANTLR.
+     */
+    private void loadResources() {
+        InputStream in = null;
+        in = this.getClass().getResourceAsStream("antlr.properties");
+
+        // If we found the resource, then load it, otherwise revert to the
+        // defaults.
+        //
+        if (in != null) {
+            try {
+                // Load the resources into the map
+                //
+                antlrSettings.load(in);
+
+                // Set any variables that we need to populate from the resources
+                //
+                VERSION = antlrSettings.getProperty("antlr.version");
+
+            } catch (Exception e) {
+                // Do nothing, just leave the defaults in place
+            }
+        }
+    }
+
+    public Tool() {
+        loadResources();
+    }
+
+    public Tool(String[] args) {
+
+        loadResources();
+
+        // Set all the options and pick up all the named grammar files
+        //
+        processArgs(args);
+
+
+    }
+
+    public void processArgs(String[] args) {
+
+        if (isVerbose()) {
+            ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
+            showBanner = false;
+        }
+
+        if (args == null || args.length == 0) {
+            help();
+            return;
+        }
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-o") || args[i].equals("-fo")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing output directory with -fo/-o option; ignoring");
+                }
+                else {
+                    if (args[i].equals("-fo")) { // force output into dir
+                        setForceAllFilesToOutputDir(true);
+                    }
+                    i++;
+                    outputDirectory = args[i];
+                    if (outputDirectory.endsWith("/") ||
+                        outputDirectory.endsWith("\\")) {
+                        outputDirectory =
+                            outputDirectory.substring(0, getOutputDirectory().length() - 1);
+                    }
+                    File outDir = new File(outputDirectory);
+                    haveOutputDir = true;
+                    if (outDir.exists() && !outDir.isDirectory()) {
+                        ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE, outputDirectory);
+                        setLibDirectory(".");
+                    }
+                }
+            }
+            else if (args[i].equals("-lib")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing library directory with -lib option; ignoring");
+                }
+                else {
+                    i++;
+                    setLibDirectory(args[i]);
+                    if (getLibraryDirectory().endsWith("/") ||
+                        getLibraryDirectory().endsWith("\\")) {
+                        setLibDirectory(getLibraryDirectory().substring(0, getLibraryDirectory().length() - 1));
+                    }
+                    File outDir = new File(getLibraryDirectory());
+                    if (!outDir.exists()) {
+                        ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND, getLibraryDirectory());
+                        setLibDirectory(".");
+                    }
+                }
+            }
+            else if (args[i].equals("-nfa")) {
+                setGenerate_NFA_dot(true);
+            }
+            else if (args[i].equals("-dfa")) {
+                setGenerate_DFA_dot(true);
+            }
+            else if (args[i].equals("-debug")) {
+                setDebug(true);
+            }
+            else if (args[i].equals("-trace")) {
+                setTrace(true);
+            }
+            else if (args[i].equals("-report")) {
+                setReport(true);
+            }
+            else if (args[i].equals("-profile")) {
+                setProfile(true);
+            }
+            else if (args[i].equals("-print")) {
+                setPrintGrammar(true);
+            }
+            else if (args[i].equals("-depend")) {
+                setDepend(true);
+            }
+            else if (args[i].equals("-verbose")) {
+                setVerbose(true);
+            }
+            else if (args[i].equals("-version")) {
+                version();
+                exitNow = true;
+            }
+            else if (args[i].equals("-make")) {
+                setMake(true);
+            }
+            else if (args[i].equals("-message-format")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing output format with -message-format option; using default");
+                }
+                else {
+                    i++;
+                    ErrorManager.setFormat(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xgrtree")) {
+                internalOption_PrintGrammarTree = true; // print grammar tree
+            }
+            else if (args[i].equals("-Xdfa")) {
+                internalOption_PrintDFA = true;
+            }
+            else if (args[i].equals("-Xnoprune")) {
+                DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
+            }
+            else if (args[i].equals("-Xnocollapse")) {
+                DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
+            }
+            else if (args[i].equals("-Xdbgconversion")) {
+                NFAToDFAConverter.debug = true;
+            }
+            else if (args[i].equals("-Xmultithreaded")) {
+                NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
+            }
+            else if (args[i].equals("-Xnomergestopstates")) {
+                DFAOptimizer.MERGE_STOP_STATES = false;
+            }
+            else if (args[i].equals("-Xdfaverbose")) {
+                internalOption_ShowNFAConfigsInDFA = true;
+            }
+            else if (args[i].equals("-Xwatchconversion")) {
+                internalOption_watchNFAConversion = true;
+            }
+            else if (args[i].equals("-XdbgST")) {
+                CodeGenerator.EMIT_TEMPLATE_DELIMITERS = true;
+            }
+            else if (args[i].equals("-Xmaxinlinedfastates")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
+                }
+                else {
+                    i++;
+                    CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xmaxswitchcaselabels")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
+                }
+                else {
+                    i++;
+                    CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xminswitchalts")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
+                }
+                else {
+                    i++;
+                    CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xm")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max recursion with -Xm option; ignoring");
+                }
+                else {
+                    i++;
+                    NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xmaxdfaedges")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
+                }
+                else {
+                    i++;
+                    DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xconversiontimeout")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
+                }
+                else {
+                    i++;
+                    DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xnfastates")) {
+                DecisionProbe.verbose = true;
+            }
+            else if (args[i].equals("-X")) {
+                Xhelp();
+            }
+            else {
+                if (args[i].charAt(0) != '-') {
+                    // Must be the grammar file
+                    addGrammarFile(args[i]);
+                }
+            }
+        }
+    }
+
+    /*
+    protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
+    // check for invalid command line args
+    for (int a = 0; a < args.length; a++) {
+    if (!cmdLineArgValid.member(a)) {
+    System.err.println("invalid command-line argument: " + args[a] + "; ignored");
+    }
+    }
+    }
+     */
+    
+    /**
+     * Checks to see if the list of outputFiles all exist, and have
+     * last-modified timestamps which are later than the last-modified
+     * timestamp of all the grammar files involved in build the output
+     * (imports must be checked). If these conditions hold, the method
+     * returns false, otherwise, it returns true.
+     *
+     * @param grammarFileName The grammar file we are checking
+     */
+    public boolean buildRequired(String grammarFileName)
+        throws IOException, ANTLRException
+    {
+        BuildDependencyGenerator bd =
+            new BuildDependencyGenerator(this, grammarFileName);
+
+        List<File> outputFiles = bd.getGeneratedFileList();
+        List<File> inputFiles = bd.getDependenciesFileList();
+        // Note that input directory must be set to use buildRequired
+        File grammarFile;
+        if (haveInputDir) {
+            grammarFile = new File(inputDirectory, grammarFileName);
+        }
+        else {
+            grammarFile = new File(grammarFileName);
+        }
+        long grammarLastModified = grammarFile.lastModified();
+        for (File outputFile : outputFiles) {
+            if (!outputFile.exists() || grammarLastModified > outputFile.lastModified()) {
+                // One of the output files does not exist or is out of date, so we must build it
+                return true;
+            }
+            // Check all of the imported grammars and see if any of these are younger
+            // than any of the output files.
+            if (inputFiles != null) {
+                for (File inputFile : inputFiles) {
+
+                    if (inputFile.lastModified() > outputFile.lastModified()) {
+                        // One of the imported grammar files has been updated so we must build
+                        return true;
+                    }
+                }
+            }
+        }
+        if (isVerbose()) {
+            System.out.println("Grammar " + grammarFile + " is up to date - build skipped");
+        }
+        return false;
+    }
+
+    public void process() {
+        boolean exceptionWhenWritingLexerFile = false;
+        String lexerGrammarFileName = null;		// necessary at this scope to have access in the catch below
+
+        // Have to be tricky here when Maven or build tools call in and must new Tool()
+        // before setting options. The banner won't display that way!
+        if (isVerbose() && showBanner) {
+            ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
+            showBanner = false;
+        }
+
+        try {
+            sortGrammarFiles(); // update grammarFileNames
+        }
+        catch (Exception e) {
+            ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
+        }
+        catch (Error e) {
+            ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
+        }
+
+        for (String grammarFileName : grammarFileNames) {
+            // If we are in make mode (to support build tools like Maven) and the
+            // file is already up to date, then we do not build it (and in verbose mode
+            // we will say so).
+            if (make) {
+                try {
+                    if ( !buildRequired(grammarFileName) ) continue;
+                }
+                catch (Exception e) {
+                    ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
+                }
+            }
+
+            if (isVerbose() && !isDepend()) {
+                System.out.println(grammarFileName);
+            }
+            try {
+                if (isDepend()) {
+                    BuildDependencyGenerator dep =
+                        new BuildDependencyGenerator(this, grammarFileName);
+                    /*
+                    List outputFiles = dep.getGeneratedFileList();
+                    List dependents = dep.getDependenciesFileList();
+                    System.out.println("output: "+outputFiles);
+                    System.out.println("dependents: "+dependents);
+                     */
+                    System.out.println(dep.getDependencies());
+                    continue;
+                }
+
+                Grammar grammar = getRootGrammar(grammarFileName);
+                // we now have all grammars read in as ASTs
+                // (i.e., root and all delegates)
+                grammar.composite.assignTokenTypes();
+                grammar.composite.defineGrammarSymbols();
+                grammar.composite.createNFAs();
+
+                generateRecognizer(grammar);
+
+                if (isPrintGrammar()) {
+                    grammar.printGrammar(System.out);
+                }
+
+                if (isReport()) {
+                    GrammarReport greport = new GrammarReport(grammar);
+                    System.out.println(greport.toString());
+                    // print out a backtracking report too (that is not encoded into log)
+                    System.out.println(greport.getBacktrackingReport());
+                    // same for aborted NFA->DFA conversions
+                    System.out.println(greport.getAnalysisTimeoutReport());
+                }
+                if (isProfile()) {
+                    GrammarReport greport = new GrammarReport(grammar);
+                    Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
+                                      greport.toNotifyString());
+                }
+
+                // now handle the lexer if one was created for a merged spec
+                String lexerGrammarStr = grammar.getLexerGrammar();
+                //System.out.println("lexer grammar:\n"+lexerGrammarStr);
+                if (grammar.type == Grammar.COMBINED && lexerGrammarStr != null) {
+                    lexerGrammarFileName = grammar.getImplicitlyGeneratedLexerFileName();
+                    try {
+                        Writer w = getOutputFile(grammar, lexerGrammarFileName);
+                        w.write(lexerGrammarStr);
+                        w.close();
+                    }
+                    catch (IOException e) {
+                        // emit different error message when creating the implicit lexer fails
+                        // due to write permission error
+                        exceptionWhenWritingLexerFile = true;
+                        throw e;
+                    }
+                    try {
+                        StringReader sr = new StringReader(lexerGrammarStr);
+                        Grammar lexerGrammar = new Grammar();
+                        lexerGrammar.composite.watchNFAConversion = internalOption_watchNFAConversion;
+                        lexerGrammar.implicitLexer = true;
+                        lexerGrammar.setTool(this);
+                        File lexerGrammarFullFile =
+                            new File(getFileDirectory(lexerGrammarFileName), lexerGrammarFileName);
+                        lexerGrammar.setFileName(lexerGrammarFullFile.toString());
+
+                        lexerGrammar.importTokenVocabulary(grammar);
+                        lexerGrammar.parseAndBuildAST(sr);
+
+                        sr.close();
+
+                        lexerGrammar.composite.assignTokenTypes();
+                        lexerGrammar.composite.defineGrammarSymbols();
+                        lexerGrammar.composite.createNFAs();
+
+                        generateRecognizer(lexerGrammar);
+                    }
+                    finally {
+                        // make sure we clean up
+                        if (deleteTempLexer) {
+                            File outputDir = getOutputDirectory(lexerGrammarFileName);
+                            File outputFile = new File(outputDir, lexerGrammarFileName);
+                            outputFile.delete();
+                        }
+                    }
+                }
+            }
+            catch (IOException e) {
+                if (exceptionWhenWritingLexerFile) {
+                    ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE,
+                                       lexerGrammarFileName, e);
+                }
+                else {
+                    ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
+                                       grammarFileName);
+                }
+            }
+            catch (Exception e) {
+                ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
+            }
+            /*
+           finally {
+           System.out.println("creates="+ Interval.creates);
+           System.out.println("hits="+ Interval.hits);
+           System.out.println("misses="+ Interval.misses);
+           System.out.println("outOfRange="+ Interval.outOfRange);
+           }
+            */
+        }
+    }
+
+    public void sortGrammarFiles() throws IOException {
+        //System.out.println("Grammar names "+getGrammarFileNames());
+        Graph g = new Graph();
+        List<String> missingFiles = new ArrayList<String>();
+        for (String gfile : grammarFileNames) {
+            try {
+                GrammarSpelunker grammar = new GrammarSpelunker(inputDirectory, gfile);
+                grammar.parse();
+                String vocabName = grammar.getTokenVocab();
+                String grammarName = grammar.getGrammarName();
+                // Make all grammars depend on any tokenVocab options
+                if ( vocabName!=null ) g.addEdge(gfile, vocabName+CodeGenerator.VOCAB_FILE_EXTENSION);
+                // Make all generated tokens files depend on their grammars
+                g.addEdge(grammarName+CodeGenerator.VOCAB_FILE_EXTENSION, gfile);
+            }
+            catch (FileNotFoundException fnfe) {
+                ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, gfile);
+                missingFiles.add(gfile);
+            }
+        }
+        List<Object> sorted = g.sort();
+        //System.out.println("sorted="+sorted);
+        grammarFileNames.clear(); // wipe so we can give new ordered list
+        for (int i = 0; i < sorted.size(); i++) {
+            String f = (String)sorted.get(i);
+            if ( missingFiles.contains(f) ) continue;
+            if ( !(f.endsWith(".g") || f.endsWith(".g3")) ) continue;
+            grammarFileNames.add(f);
+        }
+        //System.out.println("new grammars="+grammarFileNames);
+    }
+
+    /** Get a grammar mentioned on the command-line and any delegates */
+    public Grammar getRootGrammar(String grammarFileName)
+        throws IOException
+    {
+        //StringTemplate.setLintMode(true);
+        // grammars mentioned on command line are either roots or single grammars.
+        // create the necessary composite in case it's got delegates; even
+        // single grammar needs it to get token types.
+        CompositeGrammar composite = new CompositeGrammar();
+        Grammar grammar = new Grammar(this, grammarFileName, composite);
+        composite.setDelegationRoot(grammar);
+        FileReader fr = null;
+        File f = null;
+
+        if (haveInputDir) {
+            f = new File(inputDirectory, grammarFileName);
+        }
+        else {
+            f = new File(grammarFileName);
+        }
+
+        // Store the location of this grammar as if we import files, we can then
+        // search for imports in the same location as the original grammar as well as in
+        // the lib directory.
+        //
+        parentGrammarDirectory = f.getParent();
+
+        if (grammarFileName.lastIndexOf(File.separatorChar) == -1) {
+            grammarOutputDirectory = ".";
+        }
+        else {
+            grammarOutputDirectory = grammarFileName.substring(0, grammarFileName.lastIndexOf(File.separatorChar));
+        }
+        fr = new FileReader(f);
+        BufferedReader br = new BufferedReader(fr);
+        grammar.parseAndBuildAST(br);
+        composite.watchNFAConversion = internalOption_watchNFAConversion;
+        br.close();
+        fr.close();
+        return grammar;
+    }
+
+    /** Create NFA, DFA and generate code for grammar.
+     *  Create NFA for any delegates first.  Once all NFA are created,
+     *  it's ok to create DFA, which must check for left-recursion.  That check
+     *  is done by walking the full NFA, which therefore must be complete.
+     *  After all NFA, comes DFA conversion for root grammar then code gen for
+     *  root grammar.  DFA and code gen for delegates comes next.
+     */
+    protected void generateRecognizer(Grammar grammar) {
+        String language = (String) grammar.getOption("language");
+        if (language != null) {
+            CodeGenerator generator = new CodeGenerator(this, grammar, language);
+            grammar.setCodeGenerator(generator);
+            generator.setDebug(isDebug());
+            generator.setProfile(isProfile());
+            generator.setTrace(isTrace());
+
+            // generate NFA early in case of crash later (for debugging)
+            if (isGenerate_NFA_dot()) {
+                generateNFAs(grammar);
+            }
+
+            // GENERATE CODE
+            generator.genRecognizer();
+
+            if (isGenerate_DFA_dot()) {
+                generateDFAs(grammar);
+            }
+
+            List<Grammar> delegates = grammar.getDirectDelegates();
+            for (int i = 0; delegates != null && i < delegates.size(); i++) {
+                Grammar delegate = (Grammar) delegates.get(i);
+                if (delegate != grammar) { // already processing this one
+                    generateRecognizer(delegate);
+                }
+            }
+        }
+    }
+
+    public void generateDFAs(Grammar g) {
+        for (int d = 1; d <= g.getNumberOfDecisions(); d++) {
+            DFA dfa = g.getLookaheadDFA(d);
+            if (dfa == null) {
+                continue; // not there for some reason, ignore
+            }
+            DOTGenerator dotGenerator = new DOTGenerator(g);
+            String dot = dotGenerator.getDOT(dfa.startState);
+            String dotFileName = g.name + "." + "dec-" + d;
+            if (g.implicitLexer) {
+                dotFileName = g.name + Grammar.grammarTypeToFileNameSuffix[g.type] + "." + "dec-" + d;
+            }
+            try {
+                writeDOTFile(g, dotFileName, dot);
+            } catch (IOException ioe) {
+                ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
+                                   dotFileName,
+                                   ioe);
+            }
+        }
+    }
+
+    protected void generateNFAs(Grammar g) {
+        DOTGenerator dotGenerator = new DOTGenerator(g);
+        Collection rules = g.getAllImportedRules();
+        rules.addAll(g.getRules());
+
+        for (Iterator itr = rules.iterator(); itr.hasNext();) {
+            Rule r = (Rule) itr.next();
+            try {
+                String dot = dotGenerator.getDOT(r.startState);
+                if (dot != null) {
+                    writeDOTFile(g, r, dot);
+                }
+            } catch (IOException ioe) {
+                ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
+            }
+        }
+    }
+
+    protected void writeDOTFile(Grammar g, Rule r, String dot) throws IOException {
+        writeDOTFile(g, r.grammar.name + "." + r.name, dot);
+    }
+
+    protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
+        Writer fw = getOutputFile(g, name + ".dot");
+        fw.write(dot);
+        fw.close();
+    }
+
+    private static void version() {
+        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
+    }
+
+    private static void help() {
+        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
+        System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
+        System.err.println("  -o outputDir          specify output directory where all output is generated");
+        System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
+        System.err.println("  -lib dir              specify location of token files");
+        System.err.println("  -depend               generate file dependencies");
+        System.err.println("  -report               print out a report about the grammar(s) processed");
+        System.err.println("  -print                print out the grammar without actions");
+        System.err.println("  -debug                generate a parser that emits debugging events");
+        System.err.println("  -profile              generate a parser that computes profiling information");
+        System.err.println("  -nfa                  generate an NFA for each rule");
+        System.err.println("  -dfa                  generate a DFA for each decision point");
+        System.err.println("  -message-format name  specify output style for messages");
+        System.err.println("  -verbose              generate ANTLR version and other information");
+        System.err.println("  -make                 only build if generated files older than grammar");
+        System.err.println("  -version              print the version of ANTLR and exit.");
+        System.err.println("  -X                    display extended argument list");
+    }
+
+    private static void Xhelp() {
+        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
+        System.err.println("  -Xgrtree                print the grammar AST");
+        System.err.println("  -Xdfa                   print DFA as text ");
+        System.err.println("  -Xnoprune               test lookahead against EBNF block exit branches");
+        System.err.println("  -Xnocollapse            collapse incident edges into DFA states");
+        System.err.println("  -Xdbgconversion         dump lots of info during NFA conversion");
+        System.err.println("  -Xmultithreaded         run the analysis in 2 threads");
+        System.err.println("  -Xnomergestopstates     do not merge stop states");
+        System.err.println("  -Xdfaverbose            generate DFA states in DOT with NFA configs");
+        System.err.println("  -Xwatchconversion       print a message for each NFA before converting");
+        System.err.println("  -XdbgST                 put tags at start/stop of all templates in output");
+        System.err.println("  -Xnfastates             for nondeterminisms, list NFA states for each path");
+        System.err.println("  -Xm m                   max number of rule invocations during conversion           [" + NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK + "]");
+        System.err.println("  -Xmaxdfaedges m         max \"comfortable\" number of edges for single DFA state     [" + DFA.MAX_STATE_TRANSITIONS_FOR_TABLE + "]");
+        System.err.println("  -Xconversiontimeout t   set NFA conversion timeout (ms) for each decision          [" + DFA.MAX_TIME_PER_DFA_CREATION + "]");
+        System.err.println("  -Xmaxinlinedfastates m  max DFA states before table used rather than inlining      [" + CodeGenerator.MADSI_DEFAULT +"]");
+        System.err.println("  -Xmaxswitchcaselabels m don't generate switch() statements for dfas bigger  than m [" + CodeGenerator.MSCL_DEFAULT +"]");
+        System.err.println("  -Xminswitchalts m       don't generate switch() statements for dfas smaller than m [" + CodeGenerator.MSA_DEFAULT + "]");
+    }
+
+    /**
+     * Set the threshold of case labels beyond which ANTLR will not instruct the target template
+     * to generate switch() { case xxx: ... 
+     * 
+     * @param maxSwitchCaseLabels Maximum number of case lables that ANTLR should allow the target code
+     */
+    public void setMaxSwitchCaseLabels(int maxSwitchCaseLabels) {
+        CodeGenerator.MAX_SWITCH_CASE_LABELS = maxSwitchCaseLabels;
+    }
+
+    /**
+     * Set the threshold of the number alts, below which ANTLR will not instruct the target
+     * template to use a switch statement.
+     *
+     * @param minSwitchAlts the minimum number of alts required to use a switch staement
+     */
+    public void setMinSwitchAlts(int minSwitchAlts) {
+        CodeGenerator.MIN_SWITCH_ALTS = minSwitchAlts;
+    }
+    
+    /**
+     * Set the location (base directory) where output files should be produced
+     * by the ANTLR tool.
+     * @param outputDirectory
+     */
+    public void setOutputDirectory(String outputDirectory) {
+        haveOutputDir = true;
+        this.outputDirectory = outputDirectory;
+    }
+
+    /**
+     * Used by build tools to force the output files to always be
+     * relative to the base output directory, even though the tool
+     * had to set the output directory to an absolute path as it
+     * cannot rely on the workign directory like command line invocation
+     * can.
+     *
+     * @param forceRelativeOutput true if output files hould always be relative to base output directory
+     */
+    public void setForceRelativeOutput(boolean forceRelativeOutput) {
+        this.forceRelativeOutput = forceRelativeOutput;
+    }
+
+    /**
+     * Set the base location of input files. Normally (when the tool is
+     * invoked from the command line), the inputDirectory is not set, but
+     * for build tools such as Maven, we need to be able to locate the input
+     * files relative to the base, as the working directory could be anywhere and
+     * changing workig directories is not a valid concept for JVMs because of threading and
+     * so on. Setting the directory just means that the getFileDirectory() method will
+     * try to open files relative to this input directory.
+     *
+     * @param inputDirectory Input source base directory
+     */
+    public void setInputDirectory(String inputDirectory) {
+        this.inputDirectory = inputDirectory;
+        haveInputDir = true;
+    }
+
+    /** This method is used by all code generators to create new output
+     *  files. If the outputDir set by -o is not present it will be created.
+     *  The final filename is sensitive to the output directory and
+     *  the directory where the grammar file was found.  If -o is /tmp
+     *  and the original grammar file was foo/t.g then output files
+     *  go in /tmp/foo.
+     *
+     *  The output dir -o spec takes precedence if it's absolute.
+     *  E.g., if the grammar file dir is absolute the output dir is given
+     *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
+     *  output (assuming t.g holds T.java).
+     *
+     *  If no -o is specified, then just write to the directory where the
+     *  grammar file was found.
+     *
+     *  If outputDirectory==null then write a String.
+     */
+    public Writer getOutputFile(Grammar g, String fileName) throws IOException {
+        if (getOutputDirectory() == null) {
+            return new StringWriter();
+        }
+        // output directory is a function of where the grammar file lives
+        // for subdir/T.g, you get subdir here.  Well, depends on -o etc...
+        // But, if this is a .tokens file, then we force the output to
+        // be the base output directory (or current directory if there is not a -o)
+        //
+        File outputDir;
+        if (fileName.endsWith(CodeGenerator.VOCAB_FILE_EXTENSION)) {
+            if (haveOutputDir) {
+                outputDir = new File(getOutputDirectory());
+            }
+            else {
+                outputDir = new File(".");
+            }
+        }
+        else {
+            outputDir = getOutputDirectory(g.getFileName());
+        }
+        File outputFile = new File(outputDir, fileName);
+
+        if (!outputDir.exists()) {
+            outputDir.mkdirs();
+        }
+        FileWriter fw = new FileWriter(outputFile);
+        return new BufferedWriter(fw);
+    }
+
+    /**
+     * Return the location where ANTLR will generate output files for a given file. This is a
+     * base directory and output files will be relative to here in some cases
+     * such as when -o option is used and input files are given relative
+     * to the input directory.
+     *
+     * @param fileNameWithPath path to input source
+     * @return
+     */
+    public File getOutputDirectory(String fileNameWithPath) {
+
+        File outputDir = new File(getOutputDirectory());
+        String fileDirectory;
+
+        // Some files are given to us without a PATH but should should
+        // still be written to the output directory in the relative path of
+        // the output directory. The file directory is either the set of sub directories
+        // or just or the relative path recorded for the parent grammar. This means
+        // that when we write the tokens files, or the .java files for imported grammars
+        // taht we will write them in the correct place.
+        //
+        if (fileNameWithPath.lastIndexOf(File.separatorChar) == -1) {
+
+            // No path is included in the file name, so make the file
+            // directory the same as the parent grammar (which might sitll be just ""
+            // but when it is not, we will write the file in the correct place.
+            //
+            fileDirectory = grammarOutputDirectory;
+
+        }
+        else {
+            fileDirectory = fileNameWithPath.substring(0, fileNameWithPath.lastIndexOf(File.separatorChar));
+        }
+        if (haveOutputDir) {
+            // -o /tmp /var/lib/t.g => /tmp/T.java
+            // -o subdir/output /usr/lib/t.g => subdir/output/T.java
+            // -o . /usr/lib/t.g => ./T.java
+            if ((fileDirectory != null && !forceRelativeOutput) &&
+                (new File(fileDirectory).isAbsolute() ||
+                 fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
+                isForceAllFilesToOutputDir()) {
+                // somebody set the dir, it takes precendence; write new file there
+                outputDir = new File(getOutputDirectory());
+            }
+            else {
+                // -o /tmp subdir/t.g => /tmp/subdir/t.g
+                if (fileDirectory != null) {
+                    outputDir = new File(getOutputDirectory(), fileDirectory);
+                }
+                else {
+                    outputDir = new File(getOutputDirectory());
+                }
+            }
+        }
+        else {
+            // they didn't specify a -o dir so just write to location
+            // where grammar is, absolute or relative, this will only happen
+            // with command line invocation as build tools will always
+            // supply an output directory.
+            //
+            outputDir = new File(fileDirectory);
+        }
+        return outputDir;
+    }
+
+    /**
+     * Name a file from the -lib dir.  Imported grammars and .tokens files
+     *
+     * If we do not locate the file in the library directory, then we try
+     * the location of the originating grammar.
+     *
+     * @param fileName input name we are looking for
+     * @return Path to file that we think shuold be the import file
+     *
+     * @throws java.io.IOException
+     */
+    public String getLibraryFile(String fileName) throws IOException {
+
+        // First, see if we can find the file in the library directory
+        //
+        File f = new File(getLibraryDirectory() + File.separator + fileName);
+
+        if (f.exists()) {
+
+            // Found in the library directory
+            //
+            return f.getAbsolutePath();
+        }
+
+        // Need to assume it is in the same location as the input file. Note that
+        // this is only relevant for external build tools and when the input grammar
+        // was specified relative to the source directory (working directory if using
+        // the command line.
+        //
+        return parentGrammarDirectory + File.separator + fileName;
+    }
+
+    /** Return the directory containing the grammar file for this grammar.
+     *  normally this is a relative path from current directory.  People will
+     *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
+     *  "grammars/foo.g3" etc...  This method returns "grammars".
+     *
+     *  If we have been given a specific input directory as a base, then
+     *  we must find the directory relative to this directory, unless the
+     *  file name is given to us in absolute terms.
+     */
+    public String getFileDirectory(String fileName) {
+
+        File f;
+        if (haveInputDir && !fileName.startsWith(File.separator)) {
+            f = new File(inputDirectory, fileName);
+        }
+        else {
+            f = new File(fileName);
+        }
+        // And ask Java what the base directory of this location is
+        //
+        return f.getParent();
+    }
+
+    /** Return a File descriptor for vocab file.  Look in library or
+     *  in -o output path.  antlr -o foo T.g U.g where U needs T.tokens
+     *  won't work unless we look in foo too. If we do not find the
+     *  file in the lib directory then must assume that the .tokens file
+     *  is going to be generated as part of this build and we have defined
+     *  .tokens files so that they ALWAYS are generated in the base output
+     *  directory, which means the current directory for the command line tool if there
+     *  was no output directory specified.
+     */
+    public File getImportedVocabFile(String vocabName) {
+
+        File f = new File(getLibraryDirectory(),
+                          File.separator +
+                          vocabName +
+                          CodeGenerator.VOCAB_FILE_EXTENSION);
+        if (f.exists()) {
+            return f;
+        }
+
+        // We did not find the vocab file in the lib directory, so we need
+        // to look for it in the output directory which is where .tokens
+        // files are generated (in the base, not relative to the input
+        // location.)
+        //
+        if (haveOutputDir) {
+            f = new File(getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
+        }
+        else {
+            f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
+        }
+        return f;
+    }
+
+    /** If the tool needs to panic/exit, how do we do that?
+     */
+    public void panic() {
+        throw new Error("ANTLR panic");
+    }
+
+    /** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss
+     */
+    public static String getCurrentTimeStamp() {
+        GregorianCalendar calendar = new java.util.GregorianCalendar();
+        int y = calendar.get(Calendar.YEAR);
+        int m = calendar.get(Calendar.MONTH) + 1; // zero-based for months
+        int d = calendar.get(Calendar.DAY_OF_MONTH);
+        int h = calendar.get(Calendar.HOUR_OF_DAY);
+        int min = calendar.get(Calendar.MINUTE);
+        int sec = calendar.get(Calendar.SECOND);
+        String sy = String.valueOf(y);
+        String sm = m < 10 ? "0" + m : String.valueOf(m);
+        String sd = d < 10 ? "0" + d : String.valueOf(d);
+        String sh = h < 10 ? "0" + h : String.valueOf(h);
+        String smin = min < 10 ? "0" + min : String.valueOf(min);
+        String ssec = sec < 10 ? "0" + sec : String.valueOf(sec);
+        return new StringBuffer().append(sy).append("-").append(sm).append("-").append(sd).append(" ").append(sh).append(":").append(smin).append(":").append(ssec).toString();
+    }
+
+    /**
+     * Provide the List of all grammar file names that the ANTLR tool will
+     * process or has processed.
+     *
+     * @return the grammarFileNames
+     */
+    public List<String> getGrammarFileNames() {
+        return grammarFileNames;
+    }
+
+    /**
+     * Indicates whether ANTLR has gnerated or will generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @return the generate_NFA_dot
+     */
+    public boolean isGenerate_NFA_dot() {
+        return generate_NFA_dot;
+    }
+
+    /**
+     * Indicates whether ANTLR has generated or will generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @return the generate_DFA_dot
+     */
+    public boolean isGenerate_DFA_dot() {
+        return generate_DFA_dot;
+    }
+
+    /**
+     * Return the Path to the base output directory, where ANTLR
+     * will generate all the output files for the current language target as
+     * well as any ancillary files such as .tokens vocab files.
+     *
+     * @return the output Directory
+     */
+    public String getOutputDirectory() {
+        return outputDirectory;
+    }
+
+    /**
+     * Return the Path to the directory in which ANTLR will search for ancillary
+     * files such as .tokens vocab files and imported grammar files.
+     *
+     * @return the lib Directory
+     */
+    public String getLibraryDirectory() {
+        return libDirectory;
+    }
+
+    /**
+     * Indicate if ANTLR has generated, or will generate a debug version of the
+     * recognizer. Debug versions of a parser communicate with a debugger such
+     * as that contained in ANTLRWorks and at start up will 'hang' waiting for
+     * a connection on an IP port (49100 by default).
+     *
+     * @return the debug flag
+     */
+    public boolean isDebug() {
+        return debug;
+    }
+
+    /**
+     * Indicate whether ANTLR has generated, or will generate a version of the
+     * recognizer that prints trace messages on entry and exit of each rule.
+     *
+     * @return the trace flag
+     */
+    public boolean isTrace() {
+        return trace;
+    }
+
+    /**
+     * Indicates whether ANTLR has generated or will generate a version of the
+     * recognizer that gathers statistics about its execution, which it prints when
+     * it terminates.
+     *
+     * @return the profile
+     */
+    public boolean isProfile() {
+        return profile;
+    }
+
+    /**
+     * Indicates whether ANTLR has generated or will generate a report of various
+     * elements of the grammar analysis, once it it has finished analyzing a grammar
+     * file.
+     *
+     * @return the report flag
+     */
+    public boolean isReport() {
+        return report;
+    }
+
+    /**
+     * Indicates whether ANTLR has printed, or will print, a version of the input grammar
+     * file(s) that is stripped of any action code embedded within.
+     *
+     * @return the printGrammar flag
+     */
+    public boolean isPrintGrammar() {
+        return printGrammar;
+    }
+
+    /**
+     * Indicates whether ANTLR has supplied, or will supply, a list of all the things
+     * that the input grammar depends upon and all the things that will be generated
+     * when that grammar is successfully analyzed.
+     *
+     * @return the depend flag
+     */
+    public boolean isDepend() {
+        return depend;
+    }
+
+    /**
+     * Indicates whether ANTLR will force all files to the output directory, even
+     * if the input files have relative paths from the input directory.
+     *
+     * @return the forceAllFilesToOutputDir flag
+     */
+    public boolean isForceAllFilesToOutputDir() {
+        return forceAllFilesToOutputDir;
+    }
+
+    /**
+     * Indicates whether ANTLR will be verbose when analyzing grammar files, such as
+     * displaying the names of the files it is generating and similar information.
+     *
+     * @return the verbose flag
+     */
+    public boolean isVerbose() {
+        return verbose;
+    }
+
+    /**
+     * Provide the current setting of the conversion timeout on DFA creation.
+     *
+     * @return DFA creation timeout value in milliseconds
+     */
+    public int getConversionTimeout() {
+        return DFA.MAX_TIME_PER_DFA_CREATION;
+    }
+
+    /**
+     * Returns the current setting of the message format descriptor
+     * @return Current message format
+     */
+    public String getMessageFormat() {
+        return ErrorManager.getMessageFormat().toString();
+    }
+
+    /**
+     * Returns the number of errors that the analysis/processing threw up.
+     * @return Error count
+     */
+    public int getNumErrors() {
+        return ErrorManager.getNumErrors();
+    }
+
+    /**
+     * Indicate whether the tool will analyze the dependencies of the provided grammar
+     * file list and ensure that grammars with dependencies are built
+     * after any of the other gramamrs in the list that they are dependent on. Setting
+     * this option also has the side effect that any grammars that are includes for other
+     * grammars in the list are excluded from individual analysis, which allows the caller
+     * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
+     * of grammars that are just includes for other grammars or what order the grammars
+     * appear on the command line.
+     *
+     * This option was coded to make life easier for tool integration (such as Maven) but
+     * may also be useful at the command line.
+     *
+     * @return true if the tool is currently configured to analyze and sort grammar files.
+     */
+    public boolean getMake() {
+        return make;
+    }
+
+    /**
+     * Set the message format to one of ANTLR, gnu, vs2005
+     *
+     * @param format
+     */
+    public void setMessageFormat(String format) {
+        ErrorManager.setFormat(format);
+    }
+
+    /**
+     * Set the timeout value (in milliseconds) after which DFA creation stops
+     *
+     * @param timeout value in milliseconds
+     */
+    public void setConversionTimeout(int timeout) {
+        DFA.MAX_TIME_PER_DFA_CREATION = timeout;
+    }
+
+    /** Provide the List of all grammar file names that the ANTLR tool should process.
+     *
+     * @param grammarFileNames The list of grammar files to process
+     */
+    public void setGrammarFileNames(List<String> grammarFileNames) {
+        this.grammarFileNames = grammarFileNames;
+    }
+
+    public void addGrammarFile(String grammarFileName) {
+        if (!grammarFileNames.contains(grammarFileName)) {
+            grammarFileNames.add(grammarFileName);
+        }
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @param generate_NFA_dot True to generate dot descriptions
+     */
+    public void setGenerate_NFA_dot(boolean generate_NFA_dot) {
+        this.generate_NFA_dot = generate_NFA_dot;
+    }
+
+    /**
+     * Indicates whether ANTLR should generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @param generate_DFA_dot True to generate dot descriptions
+     */
+    public void setGenerate_DFA_dot(boolean generate_DFA_dot) {
+        this.generate_DFA_dot = generate_DFA_dot;
+    }
+
+    /**
+     * Set the Path to the directory in which ANTLR will search for ancillary
+     * files such as .tokens vocab files and imported grammar files.
+     *
+     * @param libDirectory the libDirectory to set
+     */
+    public void setLibDirectory(String libDirectory) {
+        this.libDirectory = libDirectory;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a debug version of the
+     * recognizer. Debug versions of a parser communicate with a debugger such
+     * as that contained in ANTLRWorks and at start up will 'hang' waiting for
+     * a connection on an IP port (49100 by default).
+     *
+     * @param debug true to generate a debug mode parser
+     */
+    public void setDebug(boolean debug) {
+        this.debug = debug;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a version of the
+     * recognizer that prints trace messages on entry and exit of each rule
+     *
+     * @param trace true to generate a tracing parser
+     */
+    public void setTrace(boolean trace) {
+        this.trace = trace;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a version of the
+     * recognizer that gathers statistics about its execution, which it prints when
+     * it terminates.
+     *
+     * @param profile true to generate a profiling parser
+     */
+    public void setProfile(boolean profile) {
+        this.profile = profile;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a report of various
+     * elements of the grammar analysis, once it it has finished analyzing a grammar
+     * file.
+     *
+     * @param report true to generate the analysis report
+     */
+    public void setReport(boolean report) {
+        this.report = report;
+    }
+
+    /**
+     * Indicate whether ANTLR should print a version of the input grammar
+     * file(s) that is stripped of any action code embedded within.
+     *
+     * @param printGrammar true to generate a stripped file
+     */
+    public void setPrintGrammar(boolean printGrammar) {
+        this.printGrammar = printGrammar;
+    }
+
+    /**
+     * Indicate whether ANTLR should supply a list of all the things
+     * that the input grammar depends upon and all the things that will be generated
+     * when that gramamr is successfully analyzed.
+     *
+     * @param depend true to get depends set rather than process the grammar
+     */
+    public void setDepend(boolean depend) {
+        this.depend = depend;
+    }
+
+    /**
+     * Indicates whether ANTLR will force all files to the output directory, even
+     * if the input files have relative paths from the input directory.
+     *
+     * @param forceAllFilesToOutputDir true to force files to output directory
+     */
+    public void setForceAllFilesToOutputDir(boolean forceAllFilesToOutputDir) {
+        this.forceAllFilesToOutputDir = forceAllFilesToOutputDir;
+    }
+
+    /**
+     * Indicate whether ANTLR should be verbose when analyzing grammar files, such as
+     * displaying the names of the files it is generating and similar information.
+     *
+     * @param verbose true to be verbose
+     */
+    public void setVerbose(boolean verbose) {
+        this.verbose = verbose;
+    }
+
+    /**
+     * Indicate whether the tool should analyze the dependencies of the provided grammar
+     * file list and ensure that the grammars with dependencies are built
+     * after any of the other gramamrs in the list that they are dependent on. Setting
+     * this option also has the side effect that any grammars that are includes for other
+     * grammars in the list are excluded from individual analysis, which allows the caller
+     * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
+     * of grammars that are just includes for other grammars or what order the grammars
+     * appear on the command line.
+     *
+     * This option was coded to make life easier for tool integration (such as Maven) but
+     * may also be useful at the command line.
+     *
+     * @param make
+     */
+    public void setMake(boolean make) {
+        this.make = make;
+    }
+
+}
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java b/tool/src/main/java/org/antlr/analysis/ActionLabel.java
similarity index 73%
copy from runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
copy to tool/src/main/java/org/antlr/analysis/ActionLabel.java
index 97a7d34..1265364 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
+++ b/tool/src/main/java/org/antlr/analysis/ActionLabel.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,20 +25,32 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.analysis;
 
-public class MismatchedTokenException extends RecognitionException {
-	public int expecting;
+import org.antlr.tool.GrammarAST;
+import org.antlr.tool.Grammar;
 
-	public MismatchedTokenException() {
+public class ActionLabel extends Label {
+	public GrammarAST actionAST;
+	
+	public ActionLabel(GrammarAST actionAST) {
+		super(ACTION);
+		this.actionAST = actionAST;
 	}
 
-	public MismatchedTokenException(int expecting, IntStream input) {
-		super(input);
-		this.expecting = expecting;
+	public boolean isEpsilon() {
+		return true; // we are to be ignored by analysis 'cept for predicates
+	}
+
+	public boolean isAction() {
+		return true;
 	}
 
 	public String toString() {
-		return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
+		return "{"+actionAST+"}";
+	}
+
+	public String toString(Grammar g) {
+		return toString();
 	}
 }
diff --git a/src/org/antlr/analysis/StateCluster.java b/tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java
similarity index 75%
copy from src/org/antlr/analysis/StateCluster.java
copy to tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java
index c31e9e2..6403ea9 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,15 +27,14 @@
 */
 package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
- */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
-
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
-    }
+/** An NFA configuration context stack overflowed. */
+public class AnalysisRecursionOverflowException extends RuntimeException {
+	public DFAState ovfState;
+	public NFAConfiguration proposedNFAConfiguration;
+	public AnalysisRecursionOverflowException(DFAState ovfState,
+											  NFAConfiguration proposedNFAConfiguration)
+	{
+		this.ovfState = ovfState;
+		this.proposedNFAConfiguration = proposedNFAConfiguration;
+	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java
similarity index 81%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java
index 815b4e6..392b316 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,12 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.analysis;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
+/** Analysis took too long; bail out of entire DFA construction. */
+public class AnalysisTimeoutException extends RuntimeException {
+	public DFA abortedDFA;
+	public AnalysisTimeoutException(DFA abortedDFA) {
+		this.abortedDFA = abortedDFA;
 	}
 }
diff --git a/src/org/antlr/analysis/DFA.java b/tool/src/main/java/org/antlr/analysis/DFA.java
similarity index 84%
rename from src/org/antlr/analysis/DFA.java
rename to tool/src/main/java/org/antlr/analysis/DFA.java
index 0fd09dd..e69b99e 100644
--- a/src/org/antlr/analysis/DFA.java
+++ b/tool/src/main/java/org/antlr/analysis/DFA.java
@@ -27,10 +27,9 @@
 */
 package org.antlr.analysis;
 
-import org.antlr.Tool;
 import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.IntervalSet;
 import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
 import org.antlr.misc.Utils;
 import org.antlr.runtime.IntStream;
 import org.antlr.stringtemplate.StringTemplate;
@@ -50,10 +49,10 @@ public class DFA {
 
 	/** Prevent explosion of DFA states during conversion. The max number
 	 *  of states per alt in a single decision's DFA.
-	 */
 	public static final int MAX_STATES_PER_ALT_IN_DFA = 450;
+	 */
 
-	/** Set to 0 to not terminate early */
+	/** Set to 0 to not terminate early (time in ms) */
 	public static int MAX_TIME_PER_DFA_CREATION = 1*1000;
 
 	/** How many edges can each DFA state have before a "special" state
@@ -80,7 +79,7 @@ public class DFA {
 	 *  Not used during fixed k lookahead as it's a waste to fill it with
 	 *  a dup of states array.
      */
-    protected Map uniqueStates = new HashMap();
+    protected Map<DFAState, DFAState> uniqueStates = new HashMap<DFAState, DFAState>();
 
 	/** Maps the state number to the actual DFAState.  Use a Vector as it
 	 *  grows automatically when I set the ith element.  This contains all
@@ -93,9 +92,9 @@ public class DFA {
 	 *  a way to go from state number to DFAState rather than via a
 	 *  hash lookup.
 	 */
-	protected Vector states = new Vector();
+	protected Vector<DFAState> states = new Vector<DFAState>();
 
-	/** Unique state numbers */
+	/** Unique state numbers per DFA */
 	protected int stateCounter = 0;
 
 	/** count only new states not states that were rejected as already present */
@@ -117,7 +116,16 @@ public class DFA {
 	 */
     protected boolean cyclic = false;
 
-    /** Each alt in an NFA derived from a grammar must have a DFA state that
+	/** Track whether this DFA has at least one sem/syn pred encountered
+	 *  during a closure operation.  This is useful for deciding whether
+	 *  to retry a non-LL(*) with k=1.  If no pred, it will not work w/o
+	 *  a pred so don't bother.  It would just give another error message.
+	 */
+	public boolean predicateVisible = false;
+
+	public boolean hasPredicateBlockedByAction = false;
+
+	/** Each alt in an NFA derived from a grammar must have a DFA state that
      *  predicts it lest the parser not know what to do.  Nondeterminisms can
      *  lead to this situation (assuming no semantic predicates can resolve
      *  the problem) and when for some reason, I cannot compute the lookahead
@@ -126,7 +134,7 @@ public class DFA {
      *  and then in method doesStateReachAcceptState() I remove the alts I
      *  know to be uniquely predicted.
      */
-    protected List unreachableAlts;
+    protected List<Integer> unreachableAlts;
 
 	protected int nAlts = 0;
 
@@ -136,7 +144,7 @@ public class DFA {
 	/** Track whether an alt discovers recursion for each alt during
 	 *  NFA to DFA conversion; >1 alt with recursion implies nonregular.
 	 */
-	protected IntSet recursiveAltSet = new IntervalSet();
+	public IntSet recursiveAltSet = new IntervalSet();
 
 	/** Which NFA are we converting (well, which piece of the NFA)? */
     public NFA nfa;
@@ -206,6 +214,11 @@ public class DFA {
 	public Vector transitionEdgeTables; // not used by java yet
 	protected int uniqueCompressedSpecialStateNum = 0;
 
+	/** Which generator to use if we're building state tables */
+	protected CodeGenerator generator = null;
+
+	protected DFA() {;}
+
 	public DFA(int decisionNumber, NFAState decisionStartState) {
 		this.decisionNumber = decisionNumber;
         this.decisionNFAStartState = decisionStartState;
@@ -216,31 +229,36 @@ public class DFA {
 
 		//long start = System.currentTimeMillis();
         nfaConverter = new NFAToDFAConverter(this);
-		nfaConverter.convert();
+		try {
+			nfaConverter.convert();
 
-		// figure out if there are problems with decision
-		verify();
+			// figure out if there are problems with decision
+			verify();
 
-		if ( !probe.isDeterministic() ||
-			 probe.analysisAborted() ||
-			 probe.analysisOverflowed() )
-		{
-			probe.issueWarnings();
-		}
-
-		// must be after verify as it computes cyclic, needed by this routine
-		// should be after warnings because early termination or something
-		// will not allow the reset to operate properly in some cases.
-		resetStateNumbersToBeContiguous();
+			if ( !probe.isDeterministic() || probe.analysisOverflowed() ) {
+				probe.issueWarnings();
+			}
 
-		//long stop = System.currentTimeMillis();
-		//System.out.println("verify cost: "+(int)(stop-start)+" ms");
+			// must be after verify as it computes cyclic, needed by this routine
+			// should be after warnings because early termination or something
+			// will not allow the reset to operate properly in some cases.
+			resetStateNumbersToBeContiguous();
 
-		if ( Tool.internalOption_PrintDFA ) {
-			System.out.println("DFA d="+decisionNumber);
-			FASerializer serializer = new FASerializer(nfa.grammar);
-			String result = serializer.serialize(startState);
-			System.out.println(result);
+			//long stop = System.currentTimeMillis();
+			//System.out.println("verify cost: "+(int)(stop-start)+" ms");
+		}
+		catch (AnalysisTimeoutException at) {
+			probe.reportAnalysisTimeout();
+			if ( !okToRetryDFAWithK1() ) {
+				probe.issueWarnings();
+			}
+		}
+		catch (NonLLStarDecisionException nonLL) {
+			probe.reportNonLLStarDecision(this);
+			// >1 alt recurses, k=* and no auto backtrack nor manual sem/syn
+			if ( !okToRetryDFAWithK1() ) {
+				probe.issueWarnings();
+			}
 		}
     }
 
@@ -263,68 +281,6 @@ public class DFA {
 			// all numbers are unique already; no states are thrown out.
 			return;
 		}
-        /*
-        if ( decisionNumber==14 ) {
-			System.out.println("DFA :"+decisionNumber+" "+this);
-            //System.out.println("DFA start state :"+startState);
-			System.out.println("unique state numbers: ");
-			Set s = getUniqueStates().keySet();
-			for (Iterator it = s.iterator(); it.hasNext();) {
-				DFAState d = (DFAState) it.next();
-				System.out.print(d.stateNumber+" ");
-			}
-			System.out.println();
-
-			System.out.println("size="+s.size());
-			System.out.println("continguous states: ");
-			for (Iterator it = states.iterator(); it.hasNext();) {
-				DFAState d = (DFAState) it.next();
-				if ( d!=null ) {
-                    System.out.print(d.stateNumber+" ");
-                }
-			}
-			System.out.println();
-
-			//Set a = new HashSet();
-			List a = new ArrayList();
-			System.out.println("unique set from states table: ");
-			for (int i = 0; i <= getMaxStateNumber(); i++) {
-				DFAState d = getState(i);
-                if ( d==null ) {
-                    continue;
-                }
-                boolean found=false;
-				for (int j=0; j<a.size(); j++) {
-					DFAState old = (DFAState)a.get(j);
-					if ( old.equals(d) ) {
-						if ( old.stateNumber!=d.stateNumber ) {
-							System.out.println("WHAT! state["+i+"]="+d+" prev in list as "+old);
-						}
-						found=true;
-					}
-				}
-				if ( !found ) {
-					a.add(d);
-				}
-			}
-			for (Iterator it = a.iterator(); it.hasNext();) {
-				DFAState d = (DFAState) it.next();
-                if ( d!=null ) {
-                    System.out.print(d.stateNumber+" ");
-                }
-            }
-			System.out.println();
-			System.out.println("size="+a.size());
-
-			if ( a.equals(s) ) {
-				System.out.println("both sets same");
-			}
-			else {
-				System.out.println("sets NOT same");
-			}
-			System.out.println("stateCounter="+stateCounter);
-		}
-        */
 
         // walk list of DFAState objects by state number,
 		// setting state numbers to 0..n-1
@@ -349,28 +305,6 @@ public class DFA {
 				snum++;
 			}
 		}
-        /*
-        if ( decisionNumber==14 ) {
-			//System.out.println("max state num: "+maxStateNumber);
-			System.out.println("after renum, DFA :"+decisionNumber+" "+this);
-			System.out.println("uniq states.size="+uniqueStates.size());
-
-			Set a = new HashSet();
-			System.out.println("after renumber; unique set from states table: ");
-			for (int i = 0; i <= getMaxStateNumber(); i++) {
-				DFAState d = getState(i);
-				a.add(d);
-			}
-			for (Iterator it = a.iterator(); it.hasNext();) {
-				DFAState d = (DFAState) it.next();
-				if ( d!=null ) {
-                    System.out.print(d.stateNumber+" ");
-                }
-			}
-			System.out.println();
-			System.out.println("size="+a.size());
-		}
-        */
         if ( snum!=getNumberOfStates() ) {
 			ErrorManager.internalError("DFA "+decisionNumber+": "+
 				decisionNFAStartState.getDescription()+" num unique states "+getNumberOfStates()+
@@ -443,8 +377,8 @@ public class DFA {
 					break;
 				}
 			}
-			encoded.add(encodeIntAsCharEscape((char)n));
-			encoded.add(encodeIntAsCharEscape((char)I.intValue()));
+			encoded.add(generator.target.encodeIntAsCharEscape((char)n));
+			encoded.add(generator.target.encodeIntAsCharEscape((char)I.intValue()));
 			i+=n;
 		}
 		return encoded;
@@ -452,7 +386,7 @@ public class DFA {
 
 	public void createStateTables(CodeGenerator generator) {
 		//System.out.println("createTables:\n"+this);
-
+		this.generator = generator;
 		description = getNFADecisionStartState().getDescription();
 		description =
 			generator.target.getTargetStringLiteralFromString(description);
@@ -641,14 +575,6 @@ public class DFA {
 			transitionEdgeTables.set(s.stateNumber, edgeClass);
 		}
 		else {
-			/*
-			if ( stateTransitions.size()>255 ) {
-				System.out.println("edge edgeTable "+stateTransitions.size()+" s"+s.stateNumber+": "+Utils.integer(edgeTransitionClass));
-			}
-			else {
-				System.out.println("stateTransitions="+stateTransitions);
-			}
-			*/
 			edgeClass = Utils.integer(edgeTransitionClass);
 			transitionEdgeTables.set(s.stateNumber, edgeClass);
 			edgeTransitionClassMap.put(stateTransitions, edgeClass);
@@ -720,14 +646,6 @@ public class DFA {
 		}
 	}
 
-	public static String encodeIntAsCharEscape(int v) {
-		if ( v<=127 ) {
-			return "\\"+Integer.toOctalString(v);
-		}
-		String hex = Integer.toHexString(v|0x10000).substring(1,5);
-		return "\\u"+hex;
-	}
-
 	public int predict(IntStream input) {
 		Interpreter interp = new Interpreter(nfa.grammar, input);
 		return interp.predict(this);
@@ -741,11 +659,6 @@ public class DFA {
 	 *  indicates it's a new state.
      */
     protected DFAState addState(DFAState d) {
-		/*
-		if ( decisionNumber==14 ) {
-            System.out.println("addState: "+d.stateNumber);
-        }
-        */
 		if ( getUserMaxLookahead()>0 ) {
 			return d;
 		}
@@ -774,7 +687,7 @@ public class DFA {
 		}
 	}
 
-	public Map getUniqueStates() {
+	public Map<DFAState, DFAState> getUniqueStates() {
 		return uniqueStates;
 	}
 
@@ -813,10 +726,9 @@ public class DFA {
     }
 
 	public boolean canInlineDecision() {
-		// TODO: and ! too big
-		return CodeGenerator.GEN_ACYCLIC_DFA_INLINE &&
-			!isCyclic() &&
-		    !probe.isNonLLStarDecision();
+		return !isCyclic() &&
+		    !probe.isNonLLStarDecision() &&
+			getNumberOfStates() < CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE;
 	}
 
 	/** Is this DFA derived from the NFA for the Tokens rule? */
@@ -825,10 +737,10 @@ public class DFA {
 			return false;
 		}
 		NFAState nfaStart = getNFADecisionStartState();
-		NFAState TokensRuleStart =
-			nfa.grammar.getRuleStartState(Grammar.ARTIFICIAL_TOKENS_RULENAME);
+		Rule r = nfa.grammar.getLocallyDefinedRule(Grammar.ARTIFICIAL_TOKENS_RULENAME);
+		NFAState TokensRuleStart = r.startState;
 		NFAState TokensDecisionStart =
-			(NFAState)TokensRuleStart.transition(0).target;
+			(NFAState)TokensRuleStart.transition[0].target;
 		return nfaStart == TokensDecisionStart;
 	}
 
@@ -840,32 +752,12 @@ public class DFA {
 		if ( user_k>=0 ) { // cache for speed
 			return user_k;
 		}
-		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decisionNumber);
-		Object k = blockAST.getOption("k");
-		if ( k==null ) {
-			user_k = nfa.grammar.getGrammarMaxLookahead();
-			return user_k;
-		}
-		if (k instanceof Integer) {
-			Integer kI = (Integer)k;
-			user_k = kI.intValue();
-		}
-		else {
-			// must be String "*"
-			if ( k.equals("*") ) {
-				user_k = 0;
-			}
-		}
+		user_k = nfa.grammar.getUserMaxLookahead(decisionNumber);
 		return user_k;
 	}
 
 	public boolean getAutoBacktrackMode() {
-		String autoBacktrack =
-			(String)decisionNFAStartState.getAssociatedASTNode().getOption("backtrack");
-		if ( autoBacktrack==null ) {
-			autoBacktrack = (String)nfa.grammar.getOption("backtrack");
-		}
-		return autoBacktrack!=null&&autoBacktrack.equals("true");
+		return nfa.grammar.getAutoBacktrackMode(decisionNumber);
 	}
 
 	public void setUserMaxLookahead(int k) {
@@ -884,7 +776,7 @@ public class DFA {
      *  be computed or for which no single DFA accept state predicts those
      *  alts.  Must call verify() first before this makes sense.
      */
-    public List getUnreachableAlts() {
+    public List<Integer> getUnreachableAlts() {
         return unreachableAlts;
     }
 
@@ -897,11 +789,11 @@ public class DFA {
 	 *
 	 *  3. alts i and j have disjoint lookahead if no sem preds
 	 *  4. if sem preds, nondeterministic alts must be sufficiently covered
+	 *
+	 *  This is avoided if analysis bails out for any reason.
 	 */
 	public void verify() {
-		if ( !probe.nonLLStarDecision ) { // avoid if non-LL(*)
-			doesStateReachAcceptState(startState);
-		}
+		doesStateReachAcceptState(startState);
 	}
 
     /** figure out if this state eventually reaches an accept state and
@@ -960,18 +852,39 @@ public class DFA {
             d.setAcceptStateReachable(REACHABLE_YES);
         }
         else {
-			/*
-			if ( d.getNumberOfTransitions()==0 ) {
-				probe.reportDanglingState(d);
-			}
-			*/
             d.setAcceptStateReachable(REACHABLE_NO);
 			reduced = false;
         }
         return anEdgeReachesAcceptState;
     }
 
-    public NFAState getNFADecisionStartState() {
+	/** Walk all accept states and find the manually-specified synpreds.
+	 *  Gated preds are not always hoisted
+	 *  I used to do this in the code generator, but that is too late.
+	 *  This converter tries to avoid computing DFA for decisions in
+	 *  syntactic predicates that are not ever used such as those
+	 *  created by autobacktrack mode.
+	 */
+	public void findAllGatedSynPredsUsedInDFAAcceptStates() {
+		int nAlts = getNumberOfAlts();
+		for (int i=1; i<=nAlts; i++) {
+			DFAState a = getAcceptState(i);
+			//System.out.println("alt "+i+": "+a);
+			if ( a!=null ) {
+				Set synpreds = a.getGatedSyntacticPredicatesInNFAConfigurations();
+				if ( synpreds!=null ) {
+					// add all the predicates we find (should be just one, right?)
+					for (Iterator it = synpreds.iterator(); it.hasNext();) {
+						SemanticContext semctx = (SemanticContext) it.next();
+						// System.out.println("synpreds: "+semctx);
+						nfa.grammar.synPredUsedInDFA(this, semctx);
+					}
+				}
+			}
+		}
+	}
+
+	public NFAState getNFADecisionStartState() {
         return decisionNFAStartState;
     }
 
@@ -991,22 +904,64 @@ public class DFA {
         return decisionNFAStartState.getDecisionNumber();
     }
 
-    /** What GrammarAST node (derived from the grammar) is this DFA
+	/** If this DFA failed to finish during construction, we might be
+	 *  able to retry with k=1 but we need to know whether it will
+	 *  potentially succeed.  Can only succeed if there is a predicate
+	 *  to resolve the issue.  Don't try if k=1 already as it would
+	 *  cycle forever.  Timeout can retry with k=1 even if no predicate
+	 *  if k!=1.
+	 */
+	public boolean okToRetryDFAWithK1() {
+		boolean nonLLStarOrOverflowAndPredicateVisible =
+			(probe.isNonLLStarDecision()||probe.analysisOverflowed()) &&
+		    predicateVisible; // auto backtrack or manual sem/syn
+		return getUserMaxLookahead()!=1 &&
+			 (analysisTimedOut() || nonLLStarOrOverflowAndPredicateVisible);
+	}
+
+	public String getReasonForFailure() {
+		StringBuffer buf = new StringBuffer();
+		if ( probe.isNonLLStarDecision() ) {
+			buf.append("non-LL(*)");
+			if ( predicateVisible ) {
+				buf.append(" && predicate visible");
+			}
+		}
+		if ( probe.analysisOverflowed() ) {
+			buf.append("recursion overflow");
+			if ( predicateVisible ) {
+				buf.append(" && predicate visible");
+			}
+		}
+		if ( analysisTimedOut() ) {
+			if ( buf.length()>0 ) {
+				buf.append(" && ");
+			}
+			buf.append("timed out (>");
+			buf.append(DFA.MAX_TIME_PER_DFA_CREATION);
+			buf.append("ms)");
+		}
+		buf.append("\n");
+		return buf.toString();
+	}
+
+	/** What GrammarAST node (derived from the grammar) is this DFA
      *  associated with?  It will point to the start of a block or
      *  the loop back of a (...)+ block etc...
      */
     public GrammarAST getDecisionASTNode() {
-        return decisionNFAStartState.getAssociatedASTNode();
+        return decisionNFAStartState.associatedASTNode;
     }
 
     public boolean isGreedy() {
 		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decisionNumber);
-		String v = (String)blockAST.getOption("greedy");
+		Object v = nfa.grammar.getBlockOption(blockAST,"greedy");
 		if ( v!=null && v.equals("false") ) {
 			return false;
 		}
         return true;
-    }
+
+	}
 
     public DFAState newState() {
         DFAState n = new DFAState(this);
@@ -1029,8 +984,8 @@ public class DFA {
 		return nAlts;
 	}
 
-	public boolean analysisAborted() {
-		return probe.analysisAborted();
+	public boolean analysisTimedOut() {
+		return probe.analysisTimedOut();
 	}
 
     protected void initAltRelatedInfo() {
diff --git a/src/org/antlr/analysis/DFAOptimizer.java b/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
similarity index 98%
rename from src/org/antlr/analysis/DFAOptimizer.java
rename to tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
index 8d865be..52e0847 100644
--- a/src/org/antlr/analysis/DFAOptimizer.java
+++ b/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
@@ -122,6 +122,8 @@ public class DFAOptimizer {
 
 	/** Used by DFA state machine generator to avoid infinite recursion
 	 *  resulting from cycles int the DFA.  This is a set of int state #s.
+	 *  This is a side-effect of calling optimize; can't clear after use
+	 *  because code gen needs it.
 	 */
 	protected Set visited = new HashSet();
 
diff --git a/src/org/antlr/analysis/DFAState.java b/tool/src/main/java/org/antlr/analysis/DFAState.java
similarity index 77%
rename from src/org/antlr/analysis/DFAState.java
rename to tool/src/main/java/org/antlr/analysis/DFAState.java
index 115f471..4c2085b 100644
--- a/src/org/antlr/analysis/DFAState.java
+++ b/tool/src/main/java/org/antlr/analysis/DFAState.java
@@ -28,6 +28,7 @@
 package org.antlr.analysis;
 
 import org.antlr.misc.IntSet;
+import org.antlr.misc.MultiMap;
 import org.antlr.misc.OrderedHashSet;
 import org.antlr.misc.Utils;
 import org.antlr.tool.Grammar;
@@ -73,7 +74,8 @@ public class DFAState extends State {
     /** Track the transitions emanating from this DFA state.  The List
      *  elements are Transition objects.
      */
-    protected List transitions = new ArrayList(INITIAL_NUM_TRANSITIONS);
+    protected List<Transition> transitions =
+		new ArrayList<Transition>(INITIAL_NUM_TRANSITIONS);
 
 	/** When doing an acyclic DFA, this is the number of lookahead symbols
 	 *  consumed to reach this state.  This value may be nonzero for most
@@ -101,7 +103,7 @@ public class DFAState extends State {
 	 *  rule too many times (stack would grow beyond a threshold), it
 	 *  marks the state has aborted and notifies the DecisionProbe.
 	 */
-	protected boolean abortedDueToRecursionOverflow = false;
+	public boolean abortedDueToRecursionOverflow = false;
 
 	/** If we detect recursion on more than one alt, decision is non-LL(*),
 	 *  but try to isolate it to only those states whose closure operations
@@ -112,6 +114,12 @@ public class DFAState extends State {
 	 *    | X Y  // LL(2) decision; don't abort and use k=1 plus backtracking
 	 *    | X Z
 	 *    ;
+	 *
+	 *  12/13/2007: Actually this has caused problems.  If k=*, must terminate
+	 *  and throw out entire DFA; retry with k=1.  Since recursive, do not
+	 *  attempt more closure ops as it may take forever.  Exception thrown
+	 *  now and we simply report the problem.  If synpreds exist, I'll retry
+	 *  with k=1.
 	 */
 	protected boolean abortedDueToMultipleRecursiveAlts = false;
 
@@ -122,16 +130,27 @@ public class DFAState extends State {
 
 	protected int cachedUniquelyPredicatedAlt = PREDICTED_ALT_UNSET;
 
-    /** The set of NFA configurations (state,alt,context) for this DFA state */
-    protected Set nfaConfigurations = new HashSet();
+	public int minAltInConfigurations=Integer.MAX_VALUE;
+
+	public boolean atLeastOneConfigurationHasAPredicate = false;
+
+	/** The set of NFA configurations (state,alt,context) for this DFA state */
+    public OrderedHashSet<NFAConfiguration> nfaConfigurations =
+		new OrderedHashSet<NFAConfiguration>();
 
-    /** Used to prevent the closure operation from looping to itself and
+	public List<NFAConfiguration> configurationsWithLabeledEdges =
+		new ArrayList<NFAConfiguration>();
+
+	/** Used to prevent the closure operation from looping to itself and
      *  hence looping forever.  Sensitive to the NFA state, the alt, and
-     *  the context.  This just the nfa config set because we want to
+     *  the stack context.  This just the nfa config set because we want to
 	 *  prevent closures only on states contributed by closure not reach
 	 *  operations.
+	 *
+	 *  Two configurations identical including semantic context are
+	 *  considered the same closure computation.  @see NFAToDFAConverter.closureBusy().
      */
-	protected Set closureBusy = new HashSet();
+	protected Set<NFAConfiguration> closureBusy = new HashSet<NFAConfiguration>();
 
 	/** As this state is constructed (i.e., as NFA states are added), we
      *  can easily check for non-epsilon transitions because the only
@@ -140,15 +159,21 @@ public class DFAState extends State {
      *  for all possible transitions.  That is of the order: size(label space)
      *  times size(nfa states), which can be pretty damn big.  It's better
      *  to simply track possible labels.
-     *  This is type List<Label>.
      */
-    protected OrderedHashSet reachableLabels = new OrderedHashSet();
+    protected OrderedHashSet<Label> reachableLabels;
 
     public DFAState(DFA dfa) {
         this.dfa = dfa;
     }
 
-    public Transition transition(int i) {
+	public void reset() {
+		//nfaConfigurations = null; // getGatedPredicatesInNFAConfigurations needs
+		configurationsWithLabeledEdges = null;
+		closureBusy = null;
+		reachableLabels = null;
+	}
+
+	public Transition transition(int i) {
         return (Transition)transitions.get(i);
     }
 
@@ -164,12 +189,12 @@ public class DFAState extends State {
 	 *  the transition number from 0..n-1.
 	 */
     public int addTransition(DFAState target, Label label) {
-        transitions.add( new Transition(label, target) );
+		transitions.add( new Transition(label, target) );
 		return transitions.size()-1;
     }
 
     public Transition getTransition(int trans) {
-        return (Transition)transitions.get(trans);
+        return transitions.get(trans);
     }
 
 	public void removeTransition(int trans) {
@@ -200,28 +225,48 @@ public class DFAState extends State {
 
         nfaConfigurations.add(c);
 
-        // update hashCode; for some reason using context.hashCode() also
+		// track min alt rather than compute later
+		if ( c.alt < minAltInConfigurations ) {
+			minAltInConfigurations = c.alt;
+		}
+
+		if ( c.semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
+			atLeastOneConfigurationHasAPredicate = true;
+		}
+
+		// update hashCode; for some reason using context.hashCode() also
         // makes the GC take like 70% of the CPU and is slow!
         cachedHashCode += c.state + c.alt;
 
-        // update reachableLabels
-        if ( state.transition(0)!=null ) {
-            Label label = state.transition(0).label;
-            if ( !(label.isEpsilon()||label.isSemanticPredicate()) ) {
-                if ( state.transition(1)==null ) {
-                    c.singleAtomTransitionEmanating = true;
-                }
-                addReachableLabel(label);
-            }
-        }
+		// update reachableLabels
+		// We're adding an NFA state; check to see if it has a non-epsilon edge
+		if ( state.transition[0] != null ) {
+			Label label = state.transition[0].label;
+			if ( !(label.isEpsilon()||label.isSemanticPredicate()) ) {
+				// this NFA state has a non-epsilon edge, track for fast
+				// walking later when we do reach on this DFA state we're
+				// building.
+				configurationsWithLabeledEdges.add(c);
+				if ( state.transition[1] ==null ) {
+					// later we can check this to ignore o-A->o states in closure
+					c.singleAtomTransitionEmanating = true;
+				}
+				addReachableLabel(label);
+			}
+		}
     }
 
-	public void addNFAConfiguration(NFAState state, int alt, NFAContext context, SemanticContext semanticContext) {
+	public NFAConfiguration addNFAConfiguration(NFAState state,
+												int alt,
+												NFAContext context,
+												SemanticContext semanticContext)
+	{
 		NFAConfiguration c = new NFAConfiguration(state.stateNumber,
 												  alt,
 												  context,
 												  semanticContext);
 		addNFAConfiguration(state, c);
+		return c;
 	}
 
 	/** Add label uniquely and disjointly; intersection with
@@ -258,11 +303,14 @@ public class DFAState extends State {
      *  Single element labels are treated as sets to make the code uniform.
      */
     protected void addReachableLabel(Label label) {
-        /*
+		if ( reachableLabels==null ) {
+			reachableLabels = new OrderedHashSet<Label>();
+		}
+		/*
 		System.out.println("addReachableLabel to state "+dfa.decisionNumber+"."+stateNumber+": "+label.getSet().toString(dfa.nfa.grammar));
 		System.out.println("start of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
 				"reachableLabels="+reachableLabels.toString());
-        */
+				*/
 		if ( reachableLabels.contains(label) ) { // exact label present
             return;
         }
@@ -271,31 +319,24 @@ public class DFAState extends State {
         int n = reachableLabels.size(); // only look at initial elements
         // walk the existing list looking for the collision
         for (int i=0; i<n; i++) {
-            Label rl = (Label)reachableLabels.get(i);
-            /*
-            if ( label.equals(rl) ) {
-                // OPTIMIZATION:
-                // exact label already here, just return; previous addition
-                // would have made everything unique/disjoint
-                return;
-            }
-            */
-            IntSet s_i = rl.getSet();
-            IntSet intersection = s_i.and(t);
+			Label rl = reachableLabels.get(i);
             /*
 			System.out.println("comparing ["+i+"]: "+label.toString(dfa.nfa.grammar)+" & "+
                     rl.toString(dfa.nfa.grammar)+"="+
                     intersection.toString(dfa.nfa.grammar));
             */
-			if ( intersection.isNil() ) {
+			if ( !Label.intersect(label, rl) ) {
                 continue;
             }
+			//System.out.println(label+" collides with "+rl);
 
-            // For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
+			// For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
             // (ignoring s_i-t if nil; don't put in list)
 
             // Replace existing s_i with intersection since we
             // know that will always be a non nil character class
+			IntSet s_i = rl.getSet();
+			IntSet intersection = s_i.and(t);
             reachableLabels.set(i, new Label(intersection));
 
             // Compute s_i-t to see what is in current set and not in incoming
@@ -340,20 +381,21 @@ public class DFAState extends State {
         return reachableLabels;
     }
 
-    public Set getNFAConfigurations() {
-        return this.nfaConfigurations;
-    }
-
-    public void setNFAConfigurations(Set configs) {
-        this.nfaConfigurations = configs;
-    }
+	public void setNFAConfigurations(OrderedHashSet<NFAConfiguration> configs) {
+		this.nfaConfigurations = configs;
+	}
 
     /** A decent hash for a DFA state is the sum of the NFA state/alt pairs.
      *  This is used when we add DFAState objects to the DFA.states Map and
      *  when we compare DFA states.  Computed in addNFAConfiguration()
      */
     public int hashCode() {
-        return cachedHashCode;
+		if ( cachedHashCode==0 ) {
+			// LL(1) algorithm doesn't use NFA configurations, which
+			// dynamically compute hashcode; must have something; use super
+			return super.hashCode();
+		}
+		return cachedHashCode;
     }
 
     /** Two DFAStates are equal if their NFA configuration sets are the
@@ -363,33 +405,15 @@ public class DFAState extends State {
      *  finite, there is a finite number of DFA states that can be processed.
      *  This is necessary to show that the algorithm terminates.
 	 *
-	 *  Cannot test the state numbers here because in DFA.addState we need
+	 *  Cannot test the DFA state numbers here because in DFA.addState we need
 	 *  to know if any other state exists that has this exact set of NFA
 	 *  configurations.  The DFAState state number is irrelevant.
      */
     public boolean equals(Object o) {
-        DFAState other = (DFAState)o;
-        if ( o==null ) {
-            return false;
-        }
-        if ( this.hashCode()!=other.hashCode() ) {
-            return false;
-        }
-		// if not same number of NFA configuraitons, cannot be same state
-		if ( this.nfaConfigurations.size() != other.nfaConfigurations.size() ) {
-			return false;
-		}
-
 		// compare set of NFA configurations in this set with other
-        Iterator iter = this.nfaConfigurations.iterator();
-        while (iter.hasNext()) {
-            NFAConfiguration myConfig = (NFAConfiguration) iter.next();
-			if ( !other.nfaConfigurations.contains(myConfig) ) {
-				return false;
-			}
-        }
-        return true;
-    }
+        DFAState other = (DFAState)o;
+		return this.nfaConfigurations.equals(other.nfaConfigurations);
+	}
 
     /** Walk each configuration and if they are all the same alt, return
      *  that alt else return NFA.INVALID_ALT_NUMBER.  Ignore resolved
@@ -403,23 +427,22 @@ public class DFAState extends State {
 			return cachedUniquelyPredicatedAlt;
 		}
         int alt = NFA.INVALID_ALT_NUMBER;
-        Iterator iter = nfaConfigurations.iterator();
-        NFAConfiguration configuration;
-        while (iter.hasNext()) {
-            configuration = (NFAConfiguration) iter.next();
-            // ignore anything we resolved; predicates will still result
-            // in transitions out of this state, so must count those
-            // configurations; i.e., don't ignore resolveWithPredicate configs
-            if ( configuration.resolved ) {
-                continue;
-            }
-            if ( alt==NFA.INVALID_ALT_NUMBER ) {
-                alt = configuration.alt; // found first nonresolved alt
-            }
-            else if ( configuration.alt!=alt ) {
-                return NFA.INVALID_ALT_NUMBER;
-            }
-        }
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
+			// ignore anything we resolved; predicates will still result
+			// in transitions out of this state, so must count those
+			// configurations; i.e., don't ignore resolveWithPredicate configs
+			if ( configuration.resolved ) {
+				continue;
+			}
+			if ( alt==NFA.INVALID_ALT_NUMBER ) {
+				alt = configuration.alt; // found first nonresolved alt
+			}
+			else if ( configuration.alt!=alt ) {
+				return NFA.INVALID_ALT_NUMBER;
+			}
+		}
 		this.cachedUniquelyPredicatedAlt = alt;
         return alt;
     }
@@ -430,10 +453,9 @@ public class DFAState extends State {
 	 */ 
 	public int getUniqueAlt() {
 		int alt = NFA.INVALID_ALT_NUMBER;
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
 			if ( alt==NFA.INVALID_ALT_NUMBER ) {
 				alt = configuration.alt; // found first alt
 			}
@@ -458,10 +480,9 @@ public class DFAState extends State {
 	 */
 	public Set getDisabledAlternatives() {
 		Set disabled = new LinkedHashSet();
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
 			if ( configuration.resolved ) {
 				disabled.add(Utils.integer(configuration.alt));
 			}
@@ -469,22 +490,6 @@ public class DFAState extends State {
 		return disabled;
 	}
 
-	/**
-	public int getNumberOfEOTNFAStates() {
-		int n = 0;
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
-			NFAState s = dfa.nfa.getState(configuration.state);
-			if ( s.isEOTState() ) {
-				n++;
-			}
-		}
-		return n;
-	}
-    */
-	
 	protected Set getNonDeterministicAlts() {
 		int user_k = dfa.getUserMaxLookahead();
 		if ( user_k>0 && user_k==k ) {
@@ -517,35 +522,29 @@ public class DFAState extends State {
 	 *  Don't report conflicts for DFA states that have conflicting Tokens
 	 *  rule NFA states; they will be resolved in favor of the first rule.
      */
-    protected Set getConflictingAlts() {
+    protected Set<Integer> getConflictingAlts() {
 		// TODO this is called multiple times: cache result?
 		//System.out.println("getNondetAlts for DFA state "+stateNumber);
- 		Set nondeterministicAlts = new HashSet();
+ 		Set<Integer> nondeterministicAlts = new HashSet<Integer>();
 
 		// If only 1 NFA conf then no way it can be nondeterministic;
 		// save the overhead.  There are many o-a->o NFA transitions
 		// and so we save a hash map and iterator creation for each
 		// state.
-		if ( nfaConfigurations.size()<=1 ) {
+		int numConfigs = nfaConfigurations.size();
+		if ( numConfigs <=1 ) {
 			return null;
 		}
 
 		// First get a list of configurations for each state.
-		// Most of the time, each state will have one associated configuration
-		Iterator iter = nfaConfigurations.iterator();
-		Map stateToConfigListMap = new HashMap();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+		// Most of the time, each state will have one associated configuration.
+		MultiMap<Integer, NFAConfiguration> stateToConfigListMap =
+			new MultiMap<Integer, NFAConfiguration>();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
 			Integer stateI = Utils.integer(configuration.state);
-			List prevConfigs = (List)stateToConfigListMap.get(stateI);
-			if ( prevConfigs==null ) {
-				prevConfigs = new ArrayList();
-				stateToConfigListMap.put(stateI, prevConfigs);
-			}
-			prevConfigs.add(configuration);
+			stateToConfigListMap.map(stateI, configuration);
 		}
-
 		// potential conflicts are states with > 1 configuration and diff alts
 		Set states = stateToConfigListMap.keySet();
 		int numPotentialConflicts = 0;
@@ -554,7 +553,8 @@ public class DFAState extends State {
 			boolean thisStateHasPotentialProblem = false;
 			List configsForState = (List)stateToConfigListMap.get(stateI);
 			int alt=0;
-			for (int i = 0; i < configsForState.size() && configsForState.size()>1 ; i++) {
+			int numConfigsForState = configsForState.size();
+			for (int i = 0; i < numConfigsForState && numConfigsForState>1 ; i++) {
 				NFAConfiguration c = (NFAConfiguration) configsForState.get(i);
 				if ( alt==0 ) {
 					alt = c.alt;
@@ -568,8 +568,14 @@ public class DFAState extends State {
 					// together in Tokens rule.  We want to silently resolve
 					// to the first token definition ala lex/flex by ignoring
 					// these conflicts.
+					// Also this ensures that lexers look for more and more
+					// characters (longest match) before resorting to predicates.
+					// TestSemanticPredicates.testLexerMatchesLongestThenTestPred()
+					// for example would terminate at state s1 and test predicate
+					// meaning input "ab" would test preds to decide what to
+					// do but it should match rule C w/o testing preds.
 					if ( dfa.nfa.grammar.type!=Grammar.LEXER ||
-						 !dfa.decisionNFAStartState.enclosingRule.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
+						 !dfa.decisionNFAStartState.enclosingRule.name.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
 					{
 						numPotentialConflicts++;
 						thisStateHasPotentialProblem = true;
@@ -607,9 +613,13 @@ public class DFAState extends State {
 			List configsForState = (List)stateToConfigListMap.get(stateI);
 			// compare each configuration pair s, t to ensure:
 			// s.ctx different than t.ctx if s.alt != t.alt
-			for (int i = 0; configsForState!=null && i < configsForState.size(); i++) {
+			int numConfigsForState = 0;
+			if ( configsForState!=null ) {
+				numConfigsForState = configsForState.size();
+			}
+			for (int i = 0; i < numConfigsForState; i++) {
 				NFAConfiguration s = (NFAConfiguration) configsForState.get(i);
-				for (int j = i+1; j < configsForState.size(); j++) {
+				for (int j = i+1; j < numConfigsForState; j++) {
 					NFAConfiguration t = (NFAConfiguration)configsForState.get(j);
 					// conflicts means s.ctx==t.ctx or s.ctx is a stack
 					// suffix of t.ctx or vice versa (if alts differ).
@@ -632,11 +642,10 @@ public class DFAState extends State {
 	 *  DFA state.
 	 */
 	public Set getAltSet() {
+		int numConfigs = nfaConfigurations.size();
 		Set alts = new HashSet();
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
 			alts.add(Utils.integer(configuration.alt));
 		}
 		if ( alts.size()==0 ) {
@@ -645,51 +654,11 @@ public class DFAState extends State {
 		return alts;
 	}
 
-	/** Get the set of all states mentioned by all NFA configurations in this
-	 *  DFA state associated with alt.
-	 */
-	public Set getNFAStatesForAlt(int alt) {
-		Set alts = new HashSet();
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
-			if ( configuration.alt == alt ) {
-				alts.add(Utils.integer(configuration.state));
-			}
-		}
-		return alts;
-	}
-
-	/** For gated productions, we need a list of all predicates for the
-	 *  target of an edge so we can gate the edge based upon the predicates
-	 *  associated with taking that path (if any).
-	 *
-	 *  experimental 11/29/2005
-	 *
-	public Set getGatedPredicatesInNFAConfigurations() {
-		Set preds = new HashSet();
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
-			if ( configuration.semanticContext.isGated() ) {
-				preds.add(configuration.semanticContext);
-			}
-		}
-		if ( preds.size()==0 ) {
-			return null;
-		}
-		return preds;
-	}
-	 */
-
-	public Set getSyntacticPredicatesInNFAConfigurations() {
-		Set synpreds = new HashSet();
-		Iterator iter = nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+	public Set getGatedSyntacticPredicatesInNFAConfigurations() {
+		int numConfigs = nfaConfigurations.size();
+		Set<SemanticContext> synpreds = new HashSet<SemanticContext>();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
 			SemanticContext gatedPredExpr =
 				configuration.semanticContext.getGatedPredicateContext();
 			// if this is a manual syn pred (gated and syn pred), add
@@ -732,11 +701,10 @@ public class DFAState extends State {
 	 *  TODO: cache this as it's called a lot; or at least set bit if >1 present in state
 	 */
 	public SemanticContext getGatedPredicatesInNFAConfigurations() {
-		Iterator iter = nfaConfigurations.iterator();
 		SemanticContext unionOfPredicatesFromAllAlts = null;
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
 			SemanticContext gatedPredExpr =
 				configuration.semanticContext.getGatedPredicateContext();
 			if ( gatedPredExpr==null ) {
@@ -783,16 +751,13 @@ public class DFAState extends State {
     public String toString() {
         StringBuffer buf = new StringBuffer();
         buf.append(stateNumber+":{");
-        Iterator iter = nfaConfigurations.iterator();
-        int i = 1;
-        while (iter.hasNext()) {
-            NFAConfiguration configuration = (NFAConfiguration) iter.next();
-            if ( i>1 ) {
-                buf.append(", ");
-            }
-            buf.append(configuration);
-            i++;
-        }
+		for (int i = 0; i < nfaConfigurations.size(); i++) {
+			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			buf.append(configuration);
+		}
         buf.append("}");
         return buf.toString();
     }
diff --git a/src/org/antlr/analysis/DecisionProbe.java b/tool/src/main/java/org/antlr/analysis/DecisionProbe.java
similarity index 80%
rename from src/org/antlr/analysis/DecisionProbe.java
rename to tool/src/main/java/org/antlr/analysis/DecisionProbe.java
index 3e3107f..2fe4bf1 100644
--- a/src/org/antlr/analysis/DecisionProbe.java
+++ b/tool/src/main/java/org/antlr/analysis/DecisionProbe.java
@@ -30,11 +30,14 @@ package org.antlr.analysis;
 import org.antlr.tool.ErrorManager;
 import org.antlr.tool.Grammar;
 import org.antlr.tool.GrammarAST;
-import org.antlr.tool.ANTLRParser;
+import org.antlr.grammar.v2.ANTLRParser;
 import org.antlr.misc.Utils;
+import org.antlr.misc.MultiMap;
 
 import java.util.*;
 
+import antlr.Token;
+
 /** Collection of information about what is wrong with a decision as
  *  discovered while building the DFA predictor.
  *
@@ -74,47 +77,47 @@ public class DecisionProbe {
 	 *  is able to reach the same NFA state by starting at more than one
 	 *  alternative's left edge.  Though, later, we may find that predicates
 	 *  resolve the issue, but track info anyway.
-	 *  Set<DFAState>.  Note that from the DFA state, you can ask for
+	 *  Note that from the DFA state, you can ask for
 	 *  which alts are nondeterministic.
 	 */
-	protected Set statesWithSyntacticallyAmbiguousAltsSet = new HashSet();
+	protected Set<DFAState> statesWithSyntacticallyAmbiguousAltsSet = new HashSet<DFAState>();
 
 	/** Track just like stateToSyntacticallyAmbiguousAltsMap, but only
 	 *  for nondeterminisms that arise in the Tokens rule such as keyword vs
 	 *  ID rule.  The state maps to the list of Tokens rule alts that are
 	 *  in conflict.
-	 *  Map<DFAState, Set<int>>
 	 */
-	protected Map stateToSyntacticallyAmbiguousTokensRuleAltsMap = new HashMap();
+	protected Map<DFAState, Set<Integer>> stateToSyntacticallyAmbiguousTokensRuleAltsMap =
+		new HashMap<DFAState, Set<Integer>>();
 
 	/** Was a syntactic ambiguity resolved with predicates?  Any DFA
 	 *  state that predicts more than one alternative, must be resolved
 	 *  with predicates or it should be reported to the user.
-	 *  Set<DFAState>
 	 */
-	protected Set statesResolvedWithSemanticPredicatesSet = new HashSet();
+	protected Set<DFAState> statesResolvedWithSemanticPredicatesSet = new HashSet<DFAState>();
 
 	/** Track the predicates for each alt per DFA state;
 	 *  more than one DFA state might have syntactically ambig alt prediction.
-	 *  This is Map<DFAState, Map<int,SemanticContext>>; that is, it
-	 *  maps DFA state to another map, mapping alt number to a
+	 *  Maps DFA state to another map, mapping alt number to a
 	 *  SemanticContext (pred(s) to execute to resolve syntactic ambiguity).
 	 */
-	protected Map stateToAltSetWithSemanticPredicatesMap = new HashMap();
+	protected Map<DFAState, Map<Integer,SemanticContext>> stateToAltSetWithSemanticPredicatesMap =
+		new HashMap<DFAState, Map<Integer,SemanticContext>>();
 
-	/** Map<DFAState,List<int>> Tracks alts insufficiently covered.
+	/** Tracks alts insufficiently covered.
 	 *  For example, p1||true gets reduced to true and so leaves
 	 *  whole alt uncovered.  This maps DFA state to the set of alts
 	 */
-	protected Map stateToIncompletelyCoveredAltsMap = new HashMap();
+	protected Map<DFAState,Map<Integer, Set<Token>>> stateToIncompletelyCoveredAltsMap =
+		new HashMap<DFAState,Map<Integer, Set<Token>>>();
 
 	/** The set of states w/o emanating edges and w/o resolving sem preds. */
-	protected Set danglingStates = new HashSet();
+	protected Set<DFAState> danglingStates = new HashSet<DFAState>();
 
 	/** The overall list of alts within the decision that have at least one
 	 *  conflicting input sequence.
 	 */
-	protected Set altsWithProblem = new HashSet();
+	protected Set<Integer> altsWithProblem = new HashSet<Integer>();
 
 	/** If decision with > 1 alt has recursion in > 1 alt, it's nonregular
 	 *  lookahead.  The decision cannot be made with a DFA.
@@ -124,21 +127,28 @@ public class DecisionProbe {
 
 	/** Recursion is limited to a particular depth.  If that limit is exceeded
 	 *  the proposed new NFAConfiguration is recorded for the associated DFA state.
-	 *  Map<Integer DFA state number,List<NFAConfiguration>>.
 	 */
-	protected Map stateToRecursiveOverflowConfigurationsMap = new HashMap();
+	protected MultiMap<Integer, NFAConfiguration> stateToRecursionOverflowConfigurationsMap =
+		new MultiMap<Integer, NFAConfiguration>();
+	/*
+	protected Map<Integer, List<NFAConfiguration>> stateToRecursionOverflowConfigurationsMap =
+		new HashMap<Integer, List<NFAConfiguration>>();
+		*/
 
 	/** Left recursion discovered.  The proposed new NFAConfiguration
 	 *  is recorded for the associated DFA state.
-	 *  Map<DFAState,List<NFAConfiguration>>.
+	protected Map<Integer,List<NFAConfiguration>> stateToLeftRecursiveConfigurationsMap =
+		new HashMap<Integer,List<NFAConfiguration>>();
 	 */
-	protected Map stateToLeftRecursiveConfigurationsMap = new HashMap();
 
 	/** Did ANTLR have to terminate early on the analysis of this decision? */
-	protected boolean terminated = false;
+	protected boolean timedOut = false;
+
+	/** Used to find paths through syntactically ambiguous DFA. If we've
+	 *  seen statement number before, what did we learn?
+	 */
+	protected Map<Integer, Integer> stateReachable;
 
-	/** Used to find paths through syntactically ambiguous DFA. */
-	protected Map stateReachable;
 	public static final Integer REACHABLE_BUSY = Utils.integer(-1);
 	public static final Integer REACHABLE_NO = Utils.integer(0);
 	public static final Integer REACHABLE_YES = Utils.integer(1);
@@ -150,9 +160,9 @@ public class DecisionProbe {
 	 *  infinite loop.  Stop.  Set<String>.  The strings look like
 	 *  stateNumber_labelIndex.
 	 */
-	protected Set statesVisitedAtInputDepth;
+	protected Set<String> statesVisitedAtInputDepth;
 
-	protected Set statesVisitedDuringSampleSequence;
+	protected Set<Integer> statesVisitedDuringSampleSequence;
 
 	public static boolean verbose = false;
 
@@ -204,14 +214,16 @@ public class DecisionProbe {
 	}
 
 	/** Did the analysis complete it's work? */
-	public boolean analysisAborted() {
-		return terminated;
+	public boolean analysisTimedOut() {
+		return timedOut;
 	}
 
+	/** Took too long to analyze a DFA */
 	public boolean analysisOverflowed() {
-		return stateToRecursiveOverflowConfigurationsMap.size()>0;
+		return stateToRecursionOverflowConfigurationsMap.size()>0;
 	}
 
+	/** Found recursion in > 1 alt */
 	public boolean isNonLLStarDecision() {
 		return nonLLStarDecision;
 	}
@@ -226,7 +238,7 @@ public class DecisionProbe {
 	 *  is the overall list of unreachable alternatives (either due to
 	 *  conflict resolution or alts w/o accept states).
 	 */
-	public List getUnreachableAlts() {
+	public List<Integer> getUnreachableAlts() {
 		return dfa.getUnreachableAlts();
 	}
 
@@ -280,24 +292,20 @@ public class DecisionProbe {
 	 */
 	public void removeRecursiveOverflowState(DFAState d) {
 		Integer stateI = Utils.integer(d.stateNumber);
-		stateToRecursiveOverflowConfigurationsMap.remove(stateI);
-	}
-
-	/*
-	public boolean dfaStateHasRecursionOverflow(DFAState d) {
-		Integer stateI = Utils.integer(d.stateNumber);
-		return stateToRecursiveOverflowConfigurationsMap.get(stateI)!=null;
+		stateToRecursionOverflowConfigurationsMap.remove(stateI);
 	}
-	*/
 
 	/** Return a List<Label> indicating an input sequence that can be matched
 	 *  from the start state of the DFA to the targetState (which is known
 	 *  to have a problem).
 	 */
-	public List getSampleNonDeterministicInputSequence(DFAState targetState) {
+	public List<Label> getSampleNonDeterministicInputSequence(DFAState targetState) {
 		Set dfaStates = getDFAPathStatesToTarget(targetState);
-		statesVisitedDuringSampleSequence = new HashSet();
-		List labels = new ArrayList(); // may access ith element; use array
+		statesVisitedDuringSampleSequence = new HashSet<Integer>();
+		List<Label> labels = new ArrayList<Label>(); // may access ith element; use array
+		if ( dfa==null || dfa.startState==null ) {
+			return labels;
+		}
 		getSampleInputSequenceUsingStateSet(dfa.startState,
 											targetState,
 											dfaStates,
@@ -364,7 +372,7 @@ public class DecisionProbe {
 
 		// add first state of actual alt
 		NFAState altStart = dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,alt);
-		NFAState isolatedAltStart = (NFAState)altStart.transition(0).target;
+		NFAState isolatedAltStart = (NFAState)altStart.transition[0].target;
 		path.add(isolatedAltStart);
 
 		// add the actual path now
@@ -388,6 +396,11 @@ public class DecisionProbe {
 		return (SemanticContext)altToPredMap.get(Utils.integer(alt));
 	}
 
+	/** At least one alt refs a sem or syn pred */
+	public boolean hasPredicate() {
+		return stateToAltSetWithSemanticPredicatesMap.size()>0;
+	}
+
 	public Set getNondeterministicStatesResolvedWithSemanticPredicate() {
 		return statesResolvedWithSemanticPredicatesSet;
 	}
@@ -395,8 +408,8 @@ public class DecisionProbe {
 	/** Return a list of alts whose predicate context was insufficient to
 	 *  resolve a nondeterminism for state d.
 	 */
-    public List getIncompletelyCoveredAlts(DFAState d) {
-		return (List)stateToIncompletelyCoveredAltsMap.get(d);
+	public Map<Integer, Set<Token>> getIncompletelyCoveredAlts(DFAState d) {
+		return stateToIncompletelyCoveredAltsMap.get(d);
 	}
 
 	public void issueWarnings() {
@@ -407,7 +420,7 @@ public class DecisionProbe {
 			ErrorManager.nonLLStarDecision(this);
 		}
 
-		if ( analysisAborted() ) {
+		if ( analysisTimedOut() ) {
 			// only report early termination errors if !backtracking
 			if ( !dfa.getAutoBacktrackMode() ) {
 				ErrorManager.analysisAborted(this);
@@ -426,6 +439,10 @@ public class DecisionProbe {
 				problemStates.iterator();
 			while (	it.hasNext() && !dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() ) {
 				DFAState d = (DFAState) it.next();
+				Map<Integer, Set<Token>> insufficientAltToLocations = getIncompletelyCoveredAlts(d);
+				if ( insufficientAltToLocations!=null && insufficientAltToLocations.size()>0 ) {
+					ErrorManager.insufficientPredicates(this,d,insufficientAltToLocations);
+				}
 				// don't report problem if resolved
 				if ( resolvedStates==null || !resolvedStates.contains(d) ) {
 					// first strip last alt from disableAlts if it's wildcard
@@ -436,10 +453,6 @@ public class DecisionProbe {
 						ErrorManager.nondeterminism(this,d);
 					}
 				}
-				List insufficientAlts = getIncompletelyCoveredAlts(d);
-				if ( insufficientAlts!=null && insufficientAlts.size()>0 ) {
-					ErrorManager.insufficientPredicates(this,insufficientAlts);
-				}
 			}
 		}
 
@@ -453,9 +466,31 @@ public class DecisionProbe {
 		}
 
 		if ( !nonLLStarDecision ) {
-			List unreachableAlts = dfa.getUnreachableAlts();
+			List<Integer> unreachableAlts = dfa.getUnreachableAlts();
 			if ( unreachableAlts!=null && unreachableAlts.size()>0 ) {
-				ErrorManager.unreachableAlts(this,unreachableAlts);
+				// give different msg if it's an empty Tokens rule from delegate
+				boolean isInheritedTokensRule = false;
+				if ( dfa.isTokensRuleDecision() ) {
+					for (Integer altI : unreachableAlts) {
+						GrammarAST decAST = dfa.getDecisionASTNode();
+						GrammarAST altAST = decAST.getChild(altI-1);
+						GrammarAST delegatedTokensAlt =
+							altAST.getFirstChildWithType(ANTLRParser.DOT);
+						if ( delegatedTokensAlt !=null ) {
+							isInheritedTokensRule = true;
+							ErrorManager.grammarWarning(ErrorManager.MSG_IMPORTED_TOKENS_RULE_EMPTY,
+														dfa.nfa.grammar,
+														null,
+														dfa.nfa.grammar.name,
+														delegatedTokensAlt.getFirstChild().getText());
+						}
+					}
+				}
+				if ( isInheritedTokensRule ) {
+				}
+				else {
+					ErrorManager.unreachableAlts(this,unreachableAlts);
+				}
 			}
 		}
 	}
@@ -497,7 +532,7 @@ public class DecisionProbe {
 	protected void issueRecursionWarnings() {
 		// RECURSION OVERFLOW
 		Set dfaStatesWithRecursionProblems =
-			stateToRecursiveOverflowConfigurationsMap.keySet();
+			stateToRecursionOverflowConfigurationsMap.keySet();
 		// now walk truly unique (unaliased) list of dfa states with inf recur
 		// Goal: create a map from alt to map<target,List<callsites>>
 		// Map<Map<String target, List<NFAState call sites>>
@@ -505,10 +540,9 @@ public class DecisionProbe {
 		// track a single problem DFA state for each alt
 		Map altToDFAState = new HashMap();
 		computeAltToProblemMaps(dfaStatesWithRecursionProblems,
-								stateToRecursiveOverflowConfigurationsMap,
+								stateToRecursionOverflowConfigurationsMap,
 								altToTargetToCallSitesMap, // output param
 								altToDFAState);            // output param
-		//System.out.println("altToTargetToCallSitesMap="+altToTargetToCallSitesMap);
 
 		// walk each alt with recursion overflow problems and generate error
 		Set alts = altToTargetToCallSitesMap.keySet();
@@ -527,43 +561,6 @@ public class DecisionProbe {
 										   targetRules,
 										   callSiteStates);
 		}
-
-		/* All  recursion determines now before analysis
-		// LEFT RECURSION
-		// TODO: hideous cut/paste of code; try to refactor
-
-		Set dfaStatesWithLeftRecursionProblems =
-			stateToLeftRecursiveConfigurationsMap.keySet();
-		Set dfaStatesUnaliased =
-			getUnaliasedDFAStateSet(dfaStatesWithLeftRecursionProblems);
-
-		// now walk truly unique (unaliased) list of dfa states with inf recur
-		// Goal: create a map from alt to map<target,List<callsites>>
-		// Map<Map<String target, List<NFAState call sites>>
-		altToTargetToCallSitesMap = new HashMap();
-		// track a single problem DFA state for each alt
-		altToDFAState = new HashMap();
-		computeAltToProblemMaps(dfaStatesUnaliased,
-								stateToLeftRecursiveConfigurationsMap,
-								altToTargetToCallSitesMap, // output param
-								altToDFAState);            // output param
-
-		// walk each alt with recursion overflow problems and generate error
-		alts = altToTargetToCallSitesMap.keySet();
-		sortedAlts = new ArrayList(alts);
-		Collections.sort(sortedAlts);
-		for (Iterator altsIt = sortedAlts.iterator(); altsIt.hasNext();) {
-			Integer altI = (Integer) altsIt.next();
-			Map targetToCallSiteMap =
-				(Map)altToTargetToCallSitesMap.get(altI);
-			Set targetRules = targetToCallSiteMap.keySet();
-			Collection callSiteStates = targetToCallSiteMap.values();
-			ErrorManager.leftRecursion(this,
-									   altI.intValue(),
-									   targetRules,
-									   callSiteStates);
-		}
-		*/
 	}
 
 	private void computeAltToProblemMaps(Set dfaStatesUnaliased,
@@ -578,9 +575,9 @@ public class DecisionProbe {
 			for (int i = 0; i < configs.size(); i++) {
 				NFAConfiguration c = (NFAConfiguration) configs.get(i);
 				NFAState ruleInvocationState = dfa.nfa.getState(c.state);
-				Transition transition0 = ruleInvocationState.transition(0);
+				Transition transition0 = ruleInvocationState.transition[0];
 				RuleClosureTransition ref = (RuleClosureTransition)transition0;
-				String targetRule = ((NFAState)ref.target).getEnclosingRule();
+				String targetRule = ((NFAState) ref.target).enclosingRule.name;
 				Integer altI = Utils.integer(c.alt);
 				Map targetToCallSiteMap =
 					(Map)altToTargetToCallSitesMap.get(altI);
@@ -625,55 +622,40 @@ public class DecisionProbe {
 		danglingStates.add(d);
 	}
 
-	public void reportEarlyTermination() {
-		terminated = true;
-		dfa.nfa.grammar.setOfDFAWhoseConversionTerminatedEarly.add(dfa);
+	public void reportAnalysisTimeout() {
+		timedOut = true;
+		dfa.nfa.grammar.setOfDFAWhoseAnalysisTimedOut.add(dfa);
 	}
 
 	/** Report that at least 2 alts have recursive constructs.  There is
 	 *  no way to build a DFA so we terminated.
 	 */
 	public void reportNonLLStarDecision(DFA dfa) {
-		//System.out.println("non-LL(*) DFA "+dfa.decisionNumber);
+		/*
+		System.out.println("non-LL(*) DFA "+dfa.decisionNumber+", alts: "+
+						   dfa.recursiveAltSet.toList());
+						   */
 		nonLLStarDecision = true;
 		altsWithProblem.addAll(dfa.recursiveAltSet.toList());
 	}
 
-	public void reportRecursiveOverflow(DFAState d,
-										NFAConfiguration recursiveNFAConfiguration)
+	public void reportRecursionOverflow(DFAState d,
+										NFAConfiguration recursionNFAConfiguration)
 	{
 		// track the state number rather than the state as d will change
 		// out from underneath us; hash wouldn't return any value
-		Integer stateI = Utils.integer(d.stateNumber);
-		List configs = (List)stateToRecursiveOverflowConfigurationsMap.get(stateI);
-		if ( configs==null ) {
-			configs = new ArrayList();
-			configs.add(recursiveNFAConfiguration);
-			stateToRecursiveOverflowConfigurationsMap.put(stateI, configs);
-		}
-		else {
-			configs.add(recursiveNFAConfiguration);
-		}
-	}
 
-	public void reportLeftRecursion(DFAState d,
-									NFAConfiguration leftRecursiveNFAConfiguration)
-	{
-		// track the state number rather than the state as d will change
-		// out from underneath us; hash wouldn't return any value
-		Integer stateI = Utils.integer(d.stateNumber);
-		List configs = (List)stateToLeftRecursiveConfigurationsMap.get(stateI);
-		if ( configs==null ) {
-			configs = new ArrayList();
-			configs.add(leftRecursiveNFAConfiguration);
-			stateToLeftRecursiveConfigurationsMap.put(stateI, configs);
-		}
-		else {
-			configs.add(leftRecursiveNFAConfiguration);
+		// left-recursion is detected in start state.  Since we can't
+		// call resolveNondeterminism() on the start state (it would
+		// not look k=1 to get min single token lookahead), we must
+		// prevent errors derived from this state.  Avoid start state
+		if ( d.stateNumber > 0 ) {
+			Integer stateI = Utils.integer(d.stateNumber);
+			stateToRecursionOverflowConfigurationsMap.map(stateI, recursionNFAConfiguration);
 		}
 	}
 
-	public void reportNondeterminism(DFAState d, Set nondeterministicAlts) {
+	public void reportNondeterminism(DFAState d, Set<Integer> nondeterministicAlts) {
 		altsWithProblem.addAll(nondeterministicAlts); // track overall list
 		statesWithSyntacticallyAmbiguousAltsSet.add(d);
 		dfa.nfa.grammar.setOfNondeterministicDecisionNumbers.add(
@@ -685,12 +667,16 @@ public class DecisionProbe {
 	 *  we don't print out warnings in favor of just picking the first token
 	 *  definition found in the grammar ala lex/flex.
 	 */
-	public void reportLexerRuleNondeterminism(DFAState d, Set nondeterministicAlts) {
+	public void reportLexerRuleNondeterminism(DFAState d, Set<Integer> nondeterministicAlts) {
 		stateToSyntacticallyAmbiguousTokensRuleAltsMap.put(d,nondeterministicAlts);
 	}
 
-	public void reportNondeterminismResolvedWithSemanticPredicate(DFAState d)
-	{
+	public void reportNondeterminismResolvedWithSemanticPredicate(DFAState d) {
+		// First, prevent a recursion warning on this state due to
+		// pred resolution
+		if ( d.abortedDueToRecursionOverflow ) {
+			d.dfa.probe.removeRecursiveOverflowState(d);
+		}
 		statesResolvedWithSemanticPredicatesSet.add(d);
 		//System.out.println("resolved with pred: "+d);
 		dfa.nfa.grammar.setOfNondeterministicDecisionNumbersResolvedWithPredicates.add(
@@ -710,9 +696,9 @@ public class DecisionProbe {
 	}
 
 	public void reportIncompletelyCoveredAlts(DFAState d,
-											  List alts)
+											  Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate)
 	{
-		stateToIncompletelyCoveredAltsMap.put(d, alts);
+		stateToIncompletelyCoveredAltsMap.put(d, altToLocationsReachableWithoutPredicate);
 	}
 
 	// S U P P O R T
@@ -727,13 +713,13 @@ public class DecisionProbe {
 		if ( startState==targetState ) {
 			states.add(targetState);
 			//System.out.println("found target DFA state "+targetState.getStateNumber());
-			stateReachable.put(startState, REACHABLE_YES);
+			stateReachable.put(startState.stateNumber, REACHABLE_YES);
 			return true;
 		}
 
 		DFAState s = startState;
 		// avoid infinite loops
-		stateReachable.put(s, REACHABLE_BUSY);
+		stateReachable.put(s.stateNumber, REACHABLE_BUSY);
 
 		// look for a path to targetState among transitions for this state
 		// stop when you find the first one; I'm pretty sure there is
@@ -741,12 +727,12 @@ public class DecisionProbe {
 		for (int i=0; i<s.getNumberOfTransitions(); i++) {
 			Transition t = s.transition(i);
 			DFAState edgeTarget = (DFAState)t.target;
-			Integer targetStatus = (Integer)stateReachable.get(edgeTarget);
+			Integer targetStatus = stateReachable.get(edgeTarget.stateNumber);
 			if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
 				continue;
 			}
 			if ( targetStatus==REACHABLE_YES ) { // return success!
-				stateReachable.put(s, REACHABLE_YES);
+				stateReachable.put(s.stateNumber, REACHABLE_YES);
 				return true;
 			}
 			if ( targetStatus==REACHABLE_NO ) { // try another transition
@@ -755,41 +741,25 @@ public class DecisionProbe {
 			// if null, target must be REACHABLE_UNKNOWN (i.e., unvisited)
 			if ( reachesState(edgeTarget, targetState, states) ) {
 				states.add(s);
-				stateReachable.put(s, REACHABLE_YES);
+				stateReachable.put(s.stateNumber, REACHABLE_YES);
 				return true;
 			}
 		}
 
-		stateReachable.put(s, REACHABLE_NO);
+		stateReachable.put(s.stateNumber, REACHABLE_NO);
 		return false; // no path to targetState found.
 	}
 
 	protected Set getDFAPathStatesToTarget(DFAState targetState) {
 		Set dfaStates = new HashSet();
 		stateReachable = new HashMap();
+		if ( dfa==null || dfa.startState==null ) {
+			return dfaStates;
+		}
 		boolean reaches = reachesState(dfa.startState, targetState, dfaStates);
 		return dfaStates;
 	}
 
-    /** Given a set of DFA states, return a set of NFA states associated
-	 *  with alt collected from all DFA states.  If alt==0 then collect
-	 *  all NFA states regardless of alt.
-	protected Set getNFAStatesFromDFAStatesForAlt(Set dfaStates, int alt) {
-		Set nfaStates = new LinkedHashSet();
-		for (Iterator it = dfaStates.iterator(); it.hasNext();) {
-			DFAState d = (DFAState) it.next();
-			Set configs = d.getNFAConfigurations();
-			for (Iterator configIter = configs.iterator(); configIter.hasNext();) {
-				NFAConfiguration c = (NFAConfiguration) configIter.next();
-				if ( alt==0 || c.alt==alt ) {
-					nfaStates.add(Utils.integer(c.state));
-				}
-			}
-		}
-		return nfaStates;
-	}
-	 */
-
 	/** Given a start state and a final state, find a list of edge labels
 	 *  between the two ignoring epsilon.  Limit your scan to a set of states
 	 *  passed in.  This is used to show a sample input sequence that is
@@ -802,16 +772,16 @@ public class DecisionProbe {
 	protected void getSampleInputSequenceUsingStateSet(State startState,
 													   State targetState,
 													   Set states,
-													   List labels)
+													   List<Label> labels)
 	{
-		statesVisitedDuringSampleSequence.add(startState);
+		statesVisitedDuringSampleSequence.add(startState.stateNumber);
 
 		// pick the first edge in states as the one to traverse
 		for (int i=0; i<startState.getNumberOfTransitions(); i++) {
 			Transition t = startState.transition(i);
 			DFAState edgeTarget = (DFAState)t.target;
 			if ( states.contains(edgeTarget) &&
-				 !statesVisitedDuringSampleSequence.contains(edgeTarget) )
+				 !statesVisitedDuringSampleSequence.contains(edgeTarget.stateNumber) )
 			{
 				labels.add(t.label); // traverse edge and track label
 				if ( edgeTarget!=targetState ) {
@@ -862,7 +832,7 @@ public class DecisionProbe {
 		// pick the first edge whose target is in states and whose
 		// label is labels[labelIndex]
 		for (int i=0; i<s.getNumberOfTransitions(); i++) {
-			Transition t = s.transition(i);
+			Transition t = s.transition[i];
 			NFAState edgeTarget = (NFAState)t.target;
 			Label label = (Label)labels.get(labelIndex);
 			/*
@@ -871,7 +841,7 @@ public class DecisionProbe {
 							   edgeTarget.stateNumber+" =="+
 							   label.toString(dfa.nfa.grammar)+"?");
 			*/
-			if ( t.label.isEpsilon() ) {
+			if ( t.label.isEpsilon() || t.label.isSemanticPredicate() ) {
 				// nondeterministically backtrack down epsilon edges
 				path.add(edgeTarget);
 				boolean found =
@@ -931,11 +901,15 @@ public class DecisionProbe {
 		NFAState decisionState = dfa.getNFADecisionStartState();
 		NFAState altState =
 			dfa.nfa.grammar.getNFAStateForAltOfDecision(decisionState,alt);
-		NFAState decisionLeft = (NFAState)altState.transition(0).target;
+		NFAState decisionLeft = (NFAState)altState.transition[0].target;
 		RuleClosureTransition ruleCallEdge =
-			(RuleClosureTransition)decisionLeft.transition(0);
+			(RuleClosureTransition)decisionLeft.transition[0];
 		NFAState ruleStartState = (NFAState)ruleCallEdge.target;
 		//System.out.println("alt = "+decisionLeft.getEnclosingRule());
-		return ruleStartState.getEnclosingRule();
+		return ruleStartState.enclosingRule.name;
+	}
+
+	public void reset() {
+		stateToRecursionOverflowConfigurationsMap.clear();
 	}
 }
diff --git a/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java b/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java
new file mode 100644
index 0000000..a8740e8
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java
@@ -0,0 +1,446 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.tool.Rule;
+import org.antlr.grammar.v2.ANTLRParser;
+import org.antlr.tool.Grammar;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.IntSet;
+
+import java.util.*;
+
+/**
+ * Created by IntelliJ IDEA.
+ * User: parrt
+ * Date: Dec 31, 2007
+ * Time: 1:31:16 PM
+ * To change this template use File | Settings | File Templates.
+ */
+public class LL1Analyzer {
+	/**	0	if we hit end of rule and invoker should keep going (epsilon) */
+	public static final int DETECT_PRED_EOR = 0;
+	/**	1	if we found a nonautobacktracking pred */
+	public static final int DETECT_PRED_FOUND = 1;
+	/**	2	if we didn't find such a pred */
+	public static final int DETECT_PRED_NOT_FOUND = 2;
+
+	public Grammar grammar;
+
+	/** Used during LOOK to detect computation cycles */
+	protected Set<NFAState> lookBusy = new HashSet<NFAState>();
+
+	public Map<NFAState, LookaheadSet> FIRSTCache = new HashMap<NFAState, LookaheadSet>();
+	public Map<Rule, LookaheadSet> FOLLOWCache = new HashMap<Rule, LookaheadSet>();
+
+	public LL1Analyzer(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	/*
+	public void computeRuleFIRSTSets() {
+		if ( getNumberOfDecisions()==0 ) {
+			createNFAs();
+		}
+		for (Iterator it = getRules().iterator(); it.hasNext();) {
+			Rule r = (Rule)it.next();
+			if ( r.isSynPred ) {
+				continue;
+			}
+			LookaheadSet s = FIRST(r);
+			System.out.println("FIRST("+r.name+")="+s);
+		}
+	}
+	*/
+
+	/*
+	public Set<String> getOverriddenRulesWithDifferentFIRST() {
+		// walk every rule in this grammar and compare FIRST set with
+		// those in imported grammars.
+		Set<String> rules = new HashSet();
+		for (Iterator it = getRules().iterator(); it.hasNext();) {
+			Rule r = (Rule)it.next();
+			//System.out.println(r.name+" FIRST="+r.FIRST);
+			for (int i = 0; i < delegates.size(); i++) {
+				Grammar g = delegates.get(i);
+				Rule importedRule = g.getRule(r.name);
+				if ( importedRule != null ) { // exists in imported grammar
+					// System.out.println(r.name+" exists in imported grammar: FIRST="+importedRule.FIRST);
+					if ( !r.FIRST.equals(importedRule.FIRST) ) {
+						rules.add(r.name);
+					}
+				}
+			}
+		}
+		return rules;
+	}
+
+	public Set<Rule> getImportedRulesSensitiveToOverriddenRulesDueToLOOK() {
+		Set<String> diffFIRSTs = getOverriddenRulesWithDifferentFIRST();
+		Set<Rule> rules = new HashSet();
+		for (Iterator it = diffFIRSTs.iterator(); it.hasNext();) {
+			String r = (String) it.next();
+			for (int i = 0; i < delegates.size(); i++) {
+				Grammar g = delegates.get(i);
+				Set<Rule> callers = g.ruleSensitivity.get(r);
+				// somebody invokes rule whose FIRST changed in subgrammar?
+				if ( callers!=null ) {
+					rules.addAll(callers);
+					//System.out.println(g.name+" rules "+callers+" sensitive to "+r+"; dup 'em");
+				}
+			}
+		}
+		return rules;
+	}
+*/
+
+	/*
+	public LookaheadSet LOOK(Rule r) {
+		if ( r.FIRST==null ) {
+			r.FIRST = FIRST(r.startState);
+		}
+		return r.FIRST;
+	}
+*/
+
+	/** From an NFA state, s, find the set of all labels reachable from s.
+	 *  Used to compute follow sets for error recovery.  Never computes
+	 *  a FOLLOW operation.  FIRST stops at end of rules, returning EOR, unless
+	 *  invoked from another rule.  I.e., routine properly handles
+	 *
+	 *     a : b A ;
+	 *
+	 *  where b is nullable.
+	 *
+	 *  We record with EOR_TOKEN_TYPE if we hit the end of a rule so we can
+	 *  know at runtime (when these sets are used) to start walking up the
+	 *  follow chain to compute the real, correct follow set (as opposed to
+	 *  the FOLLOW, which is a superset).
+	 *
+	 *  This routine will only be used on parser and tree parser grammars.
+	 */
+	public LookaheadSet FIRST(NFAState s) {
+		//System.out.println("> FIRST("+s.enclosingRule.name+") in rule "+s.enclosingRule);
+		lookBusy.clear();
+		LookaheadSet look = _FIRST(s, false);
+		//System.out.println("< FIRST("+s.enclosingRule.name+") in rule "+s.enclosingRule+"="+look.toString(this.grammar));
+		return look;
+	}
+
+	public LookaheadSet FOLLOW(Rule r) {
+        //System.out.println("> FOLLOW("+r.name+") in rule "+r.startState.enclosingRule);
+		LookaheadSet f = FOLLOWCache.get(r);
+		if ( f!=null ) {
+			return f;
+		}
+		f = _FIRST(r.stopState, true);
+		FOLLOWCache.put(r, f);
+        //System.out.println("< FOLLOW("+r+") in rule "+r.startState.enclosingRule+"="+f.toString(this.grammar));
+		return f;
+	}
+
+	public LookaheadSet LOOK(NFAState s) {
+		if ( NFAToDFAConverter.debug ) {
+			System.out.println("> LOOK("+s+")");
+		}
+		lookBusy.clear();
+		LookaheadSet look = _FIRST(s, true);
+		// FOLLOW makes no sense (at the moment!) for lexical rules.
+		if ( grammar.type!=Grammar.LEXER && look.member(Label.EOR_TOKEN_TYPE) ) {
+			// avoid altering FIRST reset as it is cached
+			LookaheadSet f = FOLLOW(s.enclosingRule);
+			f.orInPlace(look);
+			f.remove(Label.EOR_TOKEN_TYPE);
+			look = f;
+			//look.orInPlace(FOLLOW(s.enclosingRule));
+		}
+		else if ( grammar.type==Grammar.LEXER && look.member(Label.EOT) ) {
+			// if this has EOT, lookahead is all char (all char can follow rule)
+			//look = new LookaheadSet(Label.EOT);
+			look = new LookaheadSet(IntervalSet.COMPLETE_SET);
+		}
+		if ( NFAToDFAConverter.debug ) {
+			System.out.println("< LOOK("+s+")="+look.toString(grammar));
+		}
+		return look;
+	}
+
+	protected LookaheadSet _FIRST(NFAState s, boolean chaseFollowTransitions) {
+		/*
+		System.out.println("_LOOK("+s+") in rule "+s.enclosingRule);
+		if ( s.transition[0] instanceof RuleClosureTransition ) {
+			System.out.println("go to rule "+((NFAState)s.transition[0].target).enclosingRule);
+		}
+		*/
+		if ( !chaseFollowTransitions && s.isAcceptState() ) {
+			if ( grammar.type==Grammar.LEXER ) {
+				// FOLLOW makes no sense (at the moment!) for lexical rules.
+				// assume all char can follow
+				return new LookaheadSet(IntervalSet.COMPLETE_SET);
+			}
+			return new LookaheadSet(Label.EOR_TOKEN_TYPE);
+		}
+
+		if ( lookBusy.contains(s) ) {
+			// return a copy of an empty set; we may modify set inline
+			return new LookaheadSet();
+		}
+		lookBusy.add(s);
+
+		Transition transition0 = s.transition[0];
+		if ( transition0==null ) {
+			return null;
+		}
+
+		if ( transition0.label.isAtom() ) {
+			int atom = transition0.label.getAtom();
+			return new LookaheadSet(atom);
+		}
+		if ( transition0.label.isSet() ) {
+			IntSet sl = transition0.label.getSet();
+			return new LookaheadSet(sl);
+		}
+
+		// compute FIRST of transition 0
+		LookaheadSet tset = null;
+		// if transition 0 is a rule call and we don't want FOLLOW, check cache
+        if ( !chaseFollowTransitions && transition0 instanceof RuleClosureTransition ) {
+			LookaheadSet prev = FIRSTCache.get((NFAState)transition0.target);
+			if ( prev!=null ) {
+				tset = new LookaheadSet(prev);
+			}
+		}
+
+		// if not in cache, must compute
+		if ( tset==null ) {
+			tset = _FIRST((NFAState)transition0.target, chaseFollowTransitions);
+			// save FIRST cache for transition 0 if rule call
+			if ( !chaseFollowTransitions && transition0 instanceof RuleClosureTransition ) {
+				FIRSTCache.put((NFAState)transition0.target, tset);
+			}
+		}
+
+		// did we fall off the end?
+		if ( grammar.type!=Grammar.LEXER && tset.member(Label.EOR_TOKEN_TYPE) ) {
+			if ( transition0 instanceof RuleClosureTransition ) {
+				// we called a rule that found the end of the rule.
+				// That means the rule is nullable and we need to
+				// keep looking at what follows the rule ref.  E.g.,
+				// a : b A ; where b is nullable means that LOOK(a)
+				// should include A.
+				RuleClosureTransition ruleInvocationTrans =
+					(RuleClosureTransition)transition0;
+				// remove the EOR and get what follows
+				//tset.remove(Label.EOR_TOKEN_TYPE);
+				NFAState following = (NFAState) ruleInvocationTrans.followState;
+				LookaheadSet fset =	_FIRST(following, chaseFollowTransitions);
+				fset.orInPlace(tset); // tset cached; or into new set
+				fset.remove(Label.EOR_TOKEN_TYPE);
+				tset = fset;
+			}
+		}
+
+		Transition transition1 = s.transition[1];
+		if ( transition1!=null ) {
+			LookaheadSet tset1 =
+				_FIRST((NFAState)transition1.target, chaseFollowTransitions);
+			tset1.orInPlace(tset); // tset cached; or into new set
+			tset = tset1;
+		}
+
+		return tset;
+	}
+
+	/** Is there a non-syn-pred predicate visible from s that is not in
+	 *  the rule enclosing s?  This accounts for most predicate situations
+	 *  and lets ANTLR do a simple LL(1)+pred computation.
+	 *
+	 *  TODO: what about gated vs regular preds?
+	 */
+	public boolean detectConfoundingPredicates(NFAState s) {
+		lookBusy.clear();
+		Rule r = s.enclosingRule;
+		return _detectConfoundingPredicates(s, r, false) == DETECT_PRED_FOUND;
+	}
+
+	protected int _detectConfoundingPredicates(NFAState s,
+											   Rule enclosingRule,
+											   boolean chaseFollowTransitions)
+	{
+		//System.out.println("_detectNonAutobacktrackPredicates("+s+")");
+		if ( !chaseFollowTransitions && s.isAcceptState() ) {
+			if ( grammar.type==Grammar.LEXER ) {
+				// FOLLOW makes no sense (at the moment!) for lexical rules.
+				// assume all char can follow
+				return DETECT_PRED_NOT_FOUND;
+			}
+			return DETECT_PRED_EOR;
+		}
+
+		if ( lookBusy.contains(s) ) {
+			// return a copy of an empty set; we may modify set inline
+			return DETECT_PRED_NOT_FOUND;
+		}
+		lookBusy.add(s);
+
+		Transition transition0 = s.transition[0];
+		if ( transition0==null ) {
+			return DETECT_PRED_NOT_FOUND;
+		}
+
+		if ( !(transition0.label.isSemanticPredicate()||
+			   transition0.label.isEpsilon()) ) {
+			return DETECT_PRED_NOT_FOUND;
+		}
+
+		if ( transition0.label.isSemanticPredicate() ) {
+			//System.out.println("pred "+transition0.label);
+			SemanticContext ctx = transition0.label.getSemanticContext();
+			SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
+			if ( p.predicateAST.getType() != ANTLRParser.BACKTRACK_SEMPRED ) {
+				return DETECT_PRED_FOUND;
+			}
+		}
+		
+		/*
+		if ( transition0.label.isSemanticPredicate() ) {
+			System.out.println("pred "+transition0.label);
+			SemanticContext ctx = transition0.label.getSemanticContext();
+			SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
+			// if a non-syn-pred found not in enclosingRule, say we found one
+			if ( p.predicateAST.getType() != ANTLRParser.BACKTRACK_SEMPRED &&
+				 !p.predicateAST.enclosingRuleName.equals(enclosingRule.name) )
+			{
+				System.out.println("found pred "+p+" not in "+enclosingRule.name);
+				return DETECT_PRED_FOUND;
+			}
+		}
+		*/
+
+		int result = _detectConfoundingPredicates((NFAState)transition0.target,
+												  enclosingRule,
+												  chaseFollowTransitions);
+		if ( result == DETECT_PRED_FOUND ) {
+			return DETECT_PRED_FOUND;
+		}
+
+		if ( result == DETECT_PRED_EOR ) {
+			if ( transition0 instanceof RuleClosureTransition ) {
+				// we called a rule that found the end of the rule.
+				// That means the rule is nullable and we need to
+				// keep looking at what follows the rule ref.  E.g.,
+				// a : b A ; where b is nullable means that LOOK(a)
+				// should include A.
+				RuleClosureTransition ruleInvocationTrans =
+					(RuleClosureTransition)transition0;
+				NFAState following = (NFAState) ruleInvocationTrans.followState;
+				int afterRuleResult =
+					_detectConfoundingPredicates(following,
+												 enclosingRule,
+												 chaseFollowTransitions);
+				if ( afterRuleResult == DETECT_PRED_FOUND ) {
+					return DETECT_PRED_FOUND;
+				}
+			}
+		}
+
+		Transition transition1 = s.transition[1];
+		if ( transition1!=null ) {
+			int t1Result =
+				_detectConfoundingPredicates((NFAState)transition1.target,
+											 enclosingRule,
+											 chaseFollowTransitions);
+			if ( t1Result == DETECT_PRED_FOUND ) {
+				return DETECT_PRED_FOUND;
+			}
+		}
+
+		return DETECT_PRED_NOT_FOUND;
+	}
+
+	/** Return predicate expression found via epsilon edges from s.  Do
+	 *  not look into other rules for now.  Do something simple.  Include
+	 *  backtracking synpreds.
+	 */
+	public SemanticContext getPredicates(NFAState altStartState) {
+		lookBusy.clear();
+		return _getPredicates(altStartState, altStartState);
+	}
+
+	protected SemanticContext _getPredicates(NFAState s, NFAState altStartState) {
+		//System.out.println("_getPredicates("+s+")");
+		if ( s.isAcceptState() ) {
+			return null;
+		}
+
+		// avoid infinite loops from (..)* etc...
+		if ( lookBusy.contains(s) ) {
+			return null;
+		}
+		lookBusy.add(s);
+
+		Transition transition0 = s.transition[0];
+		// no transitions
+		if ( transition0==null ) {
+			return null;
+		}
+
+		// not a predicate and not even an epsilon
+		if ( !(transition0.label.isSemanticPredicate()||
+			   transition0.label.isEpsilon()) ) {
+			return null;
+		}
+
+		SemanticContext p = null;
+		SemanticContext p0 = null;
+		SemanticContext p1 = null;
+		if ( transition0.label.isSemanticPredicate() ) {
+			//System.out.println("pred "+transition0.label);
+			p = transition0.label.getSemanticContext();
+			// ignore backtracking preds not on left edge for this decision
+			if ( ((SemanticContext.Predicate)p).predicateAST.getType() ==
+				  ANTLRParser.BACKTRACK_SEMPRED  &&
+				 s == altStartState.transition[0].target )
+			{
+				p = null; // don't count
+			}
+		}
+
+		// get preds from beyond this state
+		p0 = _getPredicates((NFAState)transition0.target, altStartState);
+
+		// get preds from other transition
+		Transition transition1 = s.transition[1];
+		if ( transition1!=null ) {
+			p1 = _getPredicates((NFAState)transition1.target, altStartState);
+		}
+
+		// join this&following-right|following-down
+		return SemanticContext.and(p,SemanticContext.or(p0,p1));
+	}
+}
diff --git a/tool/src/main/java/org/antlr/analysis/LL1DFA.java b/tool/src/main/java/org/antlr/analysis/LL1DFA.java
new file mode 100644
index 0000000..191fb9c
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/LL1DFA.java
@@ -0,0 +1,179 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.MultiMap;
+import org.antlr.grammar.v2.ANTLRParser;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Collections;
+
+/** A special DFA that is exactly LL(1) or LL(1) with backtracking mode
+ *  predicates to resolve edge set collisions.
+ */
+public class LL1DFA extends DFA {
+	/** From list of lookahead sets (one per alt in decision), create
+	 *  an LL(1) DFA.  One edge per set.
+	 *
+	 *  s0-{alt1}->:o=>1
+	 *  | \
+	 *  |  -{alt2}->:o=>2
+	 *  |
+	 *  ...
+	 */
+	public LL1DFA(int decisionNumber, NFAState decisionStartState, LookaheadSet[] altLook) {
+		DFAState s0 = newState();
+		startState = s0;
+		nfa = decisionStartState.nfa;
+		nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+		this.decisionNumber = decisionNumber;
+		this.decisionNFAStartState = decisionStartState;
+		initAltRelatedInfo();
+		unreachableAlts = null;
+		for (int alt=1; alt<altLook.length; alt++) {
+			DFAState acceptAltState = newState();
+			acceptAltState.acceptState = true;
+			setAcceptState(alt, acceptAltState);
+			acceptAltState.k = 1;
+			acceptAltState.cachedUniquelyPredicatedAlt = alt;
+			Label e = getLabelForSet(altLook[alt].tokenTypeSet);
+			s0.addTransition(acceptAltState, e);
+		}
+	}
+
+	/** From a set of edgeset->list-of-alts mappings, create a DFA
+	 *  that uses syn preds for all |list-of-alts|>1.
+	 */
+	public LL1DFA(int decisionNumber,
+				  NFAState decisionStartState,
+				  MultiMap<IntervalSet, Integer> edgeMap)
+	{
+		DFAState s0 = newState();
+		startState = s0;
+		nfa = decisionStartState.nfa;
+		nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+		this.decisionNumber = decisionNumber;
+		this.decisionNFAStartState = decisionStartState;
+		initAltRelatedInfo();
+		unreachableAlts = null;
+		for (Iterator it = edgeMap.keySet().iterator(); it.hasNext();) {
+			IntervalSet edge = (IntervalSet)it.next();
+			List<Integer> alts = edgeMap.get(edge);
+			Collections.sort(alts); // make sure alts are attempted in order
+			//System.out.println(edge+" -> "+alts);
+			DFAState s = newState();
+			s.k = 1;
+			Label e = getLabelForSet(edge);
+			s0.addTransition(s, e);
+			if ( alts.size()==1 ) {
+				s.acceptState = true;
+				int alt = alts.get(0);
+				setAcceptState(alt, s);
+				s.cachedUniquelyPredicatedAlt = alt;
+			}
+			else {
+				// resolve with syntactic predicates.  Add edges from
+				// state s that test predicates.
+				s.resolvedWithPredicates = true;
+				for (int i = 0; i < alts.size(); i++) {
+					int alt = (int)alts.get(i);
+					s.cachedUniquelyPredicatedAlt =	NFA.INVALID_ALT_NUMBER;
+					DFAState predDFATarget = getAcceptState(alt);
+					if ( predDFATarget==null ) {
+						predDFATarget = newState(); // create if not there.
+						predDFATarget.acceptState = true;
+						predDFATarget.cachedUniquelyPredicatedAlt =	alt;
+						setAcceptState(alt, predDFATarget);
+					}
+					// add a transition to pred target from d
+					/*
+					int walkAlt =
+						decisionStartState.translateDisplayAltToWalkAlt(alt);
+					NFAState altLeftEdge = nfa.grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+					NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
+					SemanticContext ctx = nfa.grammar.ll1Analyzer.getPredicates(altStartState);
+					System.out.println("sem ctx = "+ctx);
+					if ( ctx == null ) {
+						ctx = new SemanticContext.TruePredicate();
+					}
+					s.addTransition(predDFATarget, new Label(ctx));
+					*/
+					SemanticContext.Predicate synpred =
+						getSynPredForAlt(decisionStartState, alt);
+					if ( synpred == null ) {
+						synpred = new SemanticContext.TruePredicate();
+					}
+					s.addTransition(predDFATarget, new PredicateLabel(synpred));
+				}
+			}
+		}
+		//System.out.println("dfa for preds=\n"+this);
+	}
+
+	protected Label getLabelForSet(IntervalSet edgeSet) {
+		Label e = null;
+		int atom = edgeSet.getSingleElement();
+		if ( atom != Label.INVALID ) {
+			e = new Label(atom);
+		}
+		else {
+			e = new Label(edgeSet);
+		}
+		return e;
+	}
+
+	protected SemanticContext.Predicate getSynPredForAlt(NFAState decisionStartState,
+														 int alt)
+	{
+		int walkAlt =
+			decisionStartState.translateDisplayAltToWalkAlt(alt);
+		NFAState altLeftEdge =
+			nfa.grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+		NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
+		//System.out.println("alt "+alt+" start state = "+altStartState.stateNumber);
+		if ( altStartState.transition[0].isSemanticPredicate() ) {
+			SemanticContext ctx = altStartState.transition[0].label.getSemanticContext();
+			if ( ctx.isSyntacticPredicate() ) {
+				SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
+				if ( p.predicateAST.getType() == ANTLRParser.BACKTRACK_SEMPRED ) {
+					/*
+					System.out.println("syn pred for alt "+walkAlt+" "+
+									   ((SemanticContext.Predicate)altStartState.transition[0].label.getSemanticContext()).predicateAST);
+					*/
+					if ( ctx.isSyntacticPredicate() ) {
+						nfa.grammar.synPredUsedInDFA(this, ctx);
+					}
+					return (SemanticContext.Predicate)altStartState.transition[0].label.getSemanticContext();
+				}
+			}
+		}
+		return null;
+	}
+}
diff --git a/src/org/antlr/analysis/Label.java b/tool/src/main/java/org/antlr/analysis/Label.java
similarity index 88%
rename from src/org/antlr/analysis/Label.java
rename to tool/src/main/java/org/antlr/analysis/Label.java
index 4bb1956..28b8dc9 100644
--- a/src/org/antlr/analysis/Label.java
+++ b/tool/src/main/java/org/antlr/analysis/Label.java
@@ -38,9 +38,11 @@ import org.antlr.misc.IntSet;
  *  (which assumes an epsilon transition) or a tree of predicates (in a DFA).
  */
 public class Label implements Comparable, Cloneable {
-    public static final int INVALID = -6;
+    public static final int INVALID = -7;
 
-    public static final int EPSILON = -5;
+	public static final int ACTION = -6;
+	
+	public static final int EPSILON = -5;
 
     public static final String EPSILON_STR = "<EPSILON>";
 
@@ -86,7 +88,7 @@ public class Label implements Comparable, Cloneable {
 
     // TODO: is 0 a valid unicode char? max is FFFF -1, right?
     public static final int MIN_CHAR_VALUE = '\u0000';
-    public static final int MAX_CHAR_VALUE = '\uFFFE';
+    public static final int MAX_CHAR_VALUE = '\uFFFF';
 
 	/** End of rule token type; imaginary token type used only for
 	 *  local, partial FOLLOW sets to indicate that the local FOLLOW
@@ -115,13 +117,6 @@ public class Label implements Comparable, Cloneable {
     /** The token type or character value; or, signifies special label. */
     protected int label;
 
-    /** A tree of semantic predicates from the grammar AST if label==SEMPRED.
-     *  In the NFA, labels will always be exactly one predicate, but the DFA
-     *  may have to combine a bunch of them as it collects predicates from
-     *  multiple NFA configurations into a single DFA state.
-     */
-    protected SemanticContext semanticContext;
-
     /** A set of token types or character codes if label==SET */
 	// TODO: try IntervalSet for everything
     protected IntSet labelSet;
@@ -130,18 +125,6 @@ public class Label implements Comparable, Cloneable {
         this.label = label;
     }
 
-    /** Make a semantic predicate label */
-    public Label(GrammarAST predicateASTNode) {
-        this(SEMPRED);
-        this.semanticContext = new SemanticContext.Predicate(predicateASTNode);
-    }
-
-    /** Make a semantic predicates label */
-    public Label(SemanticContext semCtx) {
-        this(SEMPRED);
-        this.semanticContext = semCtx;
-    }
-
     /** Make a set label */
     public Label(IntSet labelSet) {
 		if ( labelSet==null ) {
@@ -206,13 +189,18 @@ public class Label implements Comparable, Cloneable {
     public boolean isAtom() {
         return label>=MIN_ATOM_VALUE;
     }
+
     public boolean isEpsilon() {
         return label==EPSILON;
     }
 
-    public boolean isSemanticPredicate() {
-        return label==SEMPRED;
-    }
+	public boolean isSemanticPredicate() {
+		return false;
+	}
+
+	public boolean isAction() {
+		return false;
+	}
 
     public boolean isSet() {
         return label==SET;
@@ -240,7 +228,7 @@ public class Label implements Comparable, Cloneable {
     }
 
     public SemanticContext getSemanticContext() {
-        return semanticContext;
+        return null;
     }
 
 	public boolean matches(int atom) {
@@ -276,20 +264,22 @@ public class Label implements Comparable, Cloneable {
 	}
 
     public int hashCode() {
-        switch (label) {
-            case SET :
-                return labelSet.hashCode();
-            case SEMPRED :
-                return semanticContext.hashCode();
-            default :
-                return label;
-        }
-    }
+        if (label==SET) {
+            return labelSet.hashCode();
+		}
+		else {
+			return label;
+		}
+	}
 
-    public boolean equals(Object o) {
+	// TODO: do we care about comparing set {A} with atom A? Doesn't now.
+	public boolean equals(Object o) {
 		if ( o==null ) {
 			return false;
 		}
+		if ( this == o ) {
+			return true; // equals if same object
+		}
 		// labels must be the same even if epsilon or set or sempred etc...
         if ( label!=((Label)o).label ) {
             return false;
@@ -336,8 +326,6 @@ public class Label implements Comparable, Cloneable {
         switch (label) {
             case SET :
                 return labelSet.toString();
-            case SEMPRED :
-                return "{"+semanticContext+"}?";
             default :
                 return String.valueOf(label);
         }
@@ -347,8 +335,6 @@ public class Label implements Comparable, Cloneable {
         switch (label) {
             case SET :
                 return labelSet.toString(g);
-            case SEMPRED :
-                return "{"+semanticContext+"}?";
             default :
                 return g.getTokenDisplayName(label);
         }
@@ -371,4 +357,26 @@ public class Label implements Comparable, Cloneable {
         return buf.toString();
     }
     */
+
+	public static boolean intersect(Label label, Label edgeLabel) {
+		boolean hasIntersection = false;
+		boolean labelIsSet = label.isSet();
+		boolean edgeIsSet = edgeLabel.isSet();
+		if ( !labelIsSet && !edgeIsSet && edgeLabel.label==label.label ) {
+			hasIntersection = true;
+		}
+		else if ( labelIsSet && edgeIsSet &&
+				  !edgeLabel.getSet().and(label.getSet()).isNil() ) {
+			hasIntersection = true;
+		}
+		else if ( labelIsSet && !edgeIsSet &&
+				  label.getSet().member(edgeLabel.label) ) {
+			hasIntersection = true;
+		}
+		else if ( !labelIsSet && edgeIsSet &&
+				  edgeLabel.getSet().member(label.label) ) {
+			hasIntersection = true;
+		}
+		return hasIntersection;
+	}
 }
diff --git a/src/org/antlr/analysis/LookaheadSet.java b/tool/src/main/java/org/antlr/analysis/LookaheadSet.java
similarity index 72%
copy from src/org/antlr/analysis/LookaheadSet.java
copy to tool/src/main/java/org/antlr/analysis/LookaheadSet.java
index 8239e06..5b3911a 100644
--- a/src/org/antlr/analysis/LookaheadSet.java
+++ b/tool/src/main/java/org/antlr/analysis/LookaheadSet.java
@@ -37,8 +37,7 @@ import org.antlr.tool.Grammar;
  *  reasons in the future to abstract a LookaheadSet over a raw BitSet.
  */
 public class LookaheadSet {
-	public IntSet tokenTypeSet;
-	public boolean hasEOF;
+	public IntervalSet tokenTypeSet;
 
 	public LookaheadSet() {
 		tokenTypeSet = new IntervalSet();
@@ -53,39 +52,57 @@ public class LookaheadSet {
 		tokenTypeSet = IntervalSet.of(atom);
 	}
 
-	public void orInPlace(LookaheadSet other) {
+    public LookaheadSet(LookaheadSet other) {
+        this();
+        this.tokenTypeSet.addAll(other.tokenTypeSet);
+    }
+
+    public void orInPlace(LookaheadSet other) {
 		this.tokenTypeSet.addAll(other.tokenTypeSet);
-		this.hasEOF = this.hasEOF || other.hasEOF;
+	}
+
+	public LookaheadSet or(LookaheadSet other) {
+		return new LookaheadSet(tokenTypeSet.or(other.tokenTypeSet));
+	}
+
+	public LookaheadSet subtract(LookaheadSet other) {
+		return new LookaheadSet(this.tokenTypeSet.subtract(other.tokenTypeSet));
 	}
 
 	public boolean member(int a) {
 		return tokenTypeSet.member(a);
 	}
 
+	public LookaheadSet intersection(LookaheadSet s) {
+		IntSet i = this.tokenTypeSet.and(s.tokenTypeSet);
+		LookaheadSet intersection = new LookaheadSet(i);
+		return intersection;
+	}
+
+	public boolean isNil() {
+		return tokenTypeSet.isNil();
+	}
+
 	public void remove(int a) {
-		tokenTypeSet = tokenTypeSet.subtract(IntervalSet.of(a));
+		tokenTypeSet = (IntervalSet)tokenTypeSet.subtract(IntervalSet.of(a));
+	}
+
+	public int hashCode() {
+		return tokenTypeSet.hashCode();
+	}
+
+	public boolean equals(Object other) {
+		return tokenTypeSet.equals(((LookaheadSet)other).tokenTypeSet);
 	}
 
 	public String toString(Grammar g) {
 		if ( tokenTypeSet==null ) {
-			if ( hasEOF ) {
-				return "EOF";
-			}
 			return "";
 		}
 		String r = tokenTypeSet.toString(g);
-		if ( hasEOF ) {
-			return r+"+EOF";
-		}
 		return r;
 	}
 
-	public static LookaheadSet EOF() {
-		LookaheadSet eof = new LookaheadSet();
-		eof.hasEOF = true;
-		return eof;
-	}
-
 	public String toString() {
 		return toString(null);
 	}
diff --git a/src/org/antlr/analysis/NFA.java b/tool/src/main/java/org/antlr/analysis/NFA.java
similarity index 76%
rename from src/org/antlr/analysis/NFA.java
rename to tool/src/main/java/org/antlr/analysis/NFA.java
index de79eab..426c4ce 100644
--- a/src/org/antlr/analysis/NFA.java
+++ b/tool/src/main/java/org/antlr/analysis/NFA.java
@@ -30,8 +30,6 @@ package org.antlr.analysis;
 import org.antlr.tool.Grammar;
 import org.antlr.tool.NFAFactory;
 
-import java.util.Vector;
-
 /** An NFA (collection of NFAStates) constructed from a grammar.  This
  *  NFA is one big machine for entire grammar.  Decision points are recorded
  *  by the Grammar object so we can, for example, convert to DFA or simulate
@@ -42,28 +40,26 @@ public class NFA {
 
     /** This NFA represents which grammar? */
     public Grammar grammar;
-
-    /** The NFA states in this NFA.  Maps state number to NFAState object.
-     *  This is a Vector instead of a List because I need to be able to grow
-     *  this properly.  After talking to Josh Bloch, Collections guy at Sun,
-     *  I decided this was easiest solution.
-     */
-    protected Vector numberToStateList = new Vector(1000);
-
-    /** Which factory created this NFA? */
+	
+	/** Which factory created this NFA? */
     protected NFAFactory factory = null;
 
-    public NFA(Grammar g) {
+	public boolean complete;
+
+	public NFA(Grammar g) {
         this.grammar = g;
     }
 
-    public void addState(NFAState state) {
-        numberToStateList.setSize(state.stateNumber+1); // make sure we have room
-        numberToStateList.set(state.stateNumber, state);
+	public int getNewNFAStateNumber() {
+		return grammar.composite.getNewNFAStateNumber();
+	}
+
+	public void addState(NFAState state) {
+		grammar.composite.addState(state);
     }
 
     public NFAState getState(int s) {
-        return (NFAState)numberToStateList.get(s);
+		return grammar.composite.getState(s);
     }
 
     public NFAFactory getFactory() {
diff --git a/src/org/antlr/analysis/NFAConfiguration.java b/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
similarity index 94%
rename from src/org/antlr/analysis/NFAConfiguration.java
rename to tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
index fb8a4f9..6cf9734 100644
--- a/src/org/antlr/analysis/NFAConfiguration.java
+++ b/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
@@ -27,6 +27,8 @@
 */
 package org.antlr.analysis;
 
+import org.antlr.misc.Utils;
+
 /** An NFA state, predicted alt, and syntactic/semantic context.
  *  The syntactic context is a pointer into the rule invocation
  *  chain used to arrive at the state.  The semantic context is
@@ -82,7 +84,9 @@ public class NFAConfiguration {
      */
     protected boolean singleAtomTransitionEmanating;
 
-    public NFAConfiguration(int state,
+	//protected boolean addedDuringClosure = true;
+
+	public NFAConfiguration(int state,
                             int alt,
                             NFAContext context,
                             SemanticContext semanticContext)
@@ -134,14 +138,15 @@ public class NFAConfiguration {
         if ( semanticContext!=null &&
              semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
             buf.append("|");
-            buf.append(semanticContext);
+			String escQuote = Utils.replace(semanticContext.toString(), "\"", "\\\"");
+			buf.append(escQuote);
         }
         if ( resolved ) {
             buf.append("|resolved");
         }
-        if ( resolveWithPredicate ) {
-            buf.append("|resolveWithPredicate");
-        }
-        return buf.toString();
+		if ( resolveWithPredicate ) {
+			buf.append("|resolveWithPredicate");
+		}
+		return buf.toString();
     }
 }
diff --git a/src/org/antlr/analysis/NFAContext.java b/tool/src/main/java/org/antlr/analysis/NFAContext.java
similarity index 96%
rename from src/org/antlr/analysis/NFAContext.java
rename to tool/src/main/java/org/antlr/analysis/NFAContext.java
index b56d9d0..9ffec39 100644
--- a/src/org/antlr/analysis/NFAContext.java
+++ b/tool/src/main/java/org/antlr/analysis/NFAContext.java
@@ -61,16 +61,25 @@ public class NFAContext {
 	 *
 	 *  you could chase your tail forever if somebody said "s : e '.' | e ';' ;"
 	 *  This constant prevents new states from being created after a stack gets
-	 *  "too big".
+	 *  "too big".  Actually (12/14/2007) I realize that this example is
+	 *  trapped by the non-LL(*) detector for recursion in > 1 alt.  Here is
+	 *  an example that trips stack overflow:
 	 *
-	 *  Imagine doing a depth-first search on the DFA...as you chase an input
+	 *	  s : a Y | A A A A A X ; // force recursion past m=4
+	 *	  a : A a | Q;
+	 *
+	 *  If that were:
+	 *
+	 *	  s : a Y | A+ X ;
+	 *
+	 *  it could loop forever.
+	 *
+	 *  Imagine doing a depth-first search on the e DFA...as you chase an input
 	 *  sequence you can recurse to same rule such as e above.  You'd have a
 	 *  chain of ((((.  When you get do some point, you have to give up.  The
 	 *  states in the chain will have longer and longer NFA config stacks.
 	 *  Must limit size.
 	 *
-	 *  TODO: i wonder if we can recognize recursive loops and use a simple cycle?
-	 *
 	 *  max=0 implies you cannot ever jump to another rule during closure.
 	 *  max=1 implies you can make as many calls as you want--you just
 	 *        can't ever visit a state that is on your rule invocation stack.
diff --git a/src/org/antlr/codegen/CSharpTarget.java b/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
similarity index 61%
copy from src/org/antlr/codegen/CSharpTarget.java
copy to tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
index e1da9bd..d1d0d92 100644
--- a/src/org/antlr/codegen/CSharpTarget.java
+++ b/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
@@ -1,46 +1,65 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2006 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.tool.Grammar;
-
-public class CSharpTarget extends Target 
-{
-	protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
-													 CodeGenerator generator,
-													 Grammar grammar,
-													 StringTemplate recognizerST,
-													 StringTemplate cyclicDFAST)
-	{
-		return recognizerST;
-	}
-}
-
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.analysis;
+
+import org.antlr.misc.Barrier;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.ErrorManager;
+
+/** Convert all decisions i..j inclusive in a thread */
+public class NFAConversionThread implements Runnable {
+	Grammar grammar;
+	int i, j;
+	Barrier barrier;
+	public NFAConversionThread(Grammar grammar,
+							   Barrier barrier,
+							   int i,
+							   int j)
+	{
+		this.grammar = grammar;
+		this.barrier = barrier;
+		this.i = i;
+		this.j = j;
+	}
+	public void run() {
+		for (int decision=i; decision<=j; decision++) {
+			NFAState decisionStartState = grammar.getDecisionNFAStartState(decision);
+			if ( decisionStartState.getNumberOfTransitions()>1 ) {
+				grammar.createLookaheadDFA(decision,true);
+			}
+		}
+		// now wait for others to finish
+		try {
+			barrier.waitForRelease();
+		}
+		catch(InterruptedException e) {
+			ErrorManager.internalError("what the hell? DFA interruptus", e);
+		}
+	}
+}
+
diff --git a/src/org/antlr/analysis/NFAState.java b/tool/src/main/java/org/antlr/analysis/NFAState.java
similarity index 87%
rename from src/org/antlr/analysis/NFAState.java
rename to tool/src/main/java/org/antlr/analysis/NFAState.java
index d70c06e..80bd534 100644
--- a/src/org/antlr/analysis/NFAState.java
+++ b/tool/src/main/java/org/antlr/analysis/NFAState.java
@@ -28,6 +28,8 @@
 package org.antlr.analysis;
 
 import org.antlr.tool.GrammarAST;
+import org.antlr.tool.Rule;
+import org.antlr.tool.ErrorManager;
 
 /** A state within an NFA. At most 2 transitions emanate from any NFA state. */
 public class NFAState extends State {
@@ -43,7 +45,13 @@ public class NFAState extends State {
 
 	/** How many transitions; 0, 1, or 2 transitions */
 	int numTransitions = 0;
-	Transition[] transition = new Transition[MAX_TRANSITIONS];
+	public Transition[] transition = new Transition[MAX_TRANSITIONS];
+
+	/** For o-A->o type NFA tranitions, record the label that leads to this
+	 *  state.  Useful for creating rich error messages when we find
+	 *  insufficiently (with preds) covered states.
+	 */
+	public Label incidentEdgeLabel;
 
 	/** Which NFA are we in? */
 	public NFA nfa = null;
@@ -71,7 +79,7 @@ public class NFAState extends State {
 	public int decisionStateType;
 
 	/** What rule do we live in? */
-	protected String enclosingRule;
+	public Rule enclosingRule;
 
 	/** During debugging and for nondeterminism warnings, it's useful
 	 *  to know what relationship this node has to the original grammar.
@@ -87,7 +95,7 @@ public class NFAState extends State {
 	 *  report line:col info.  Could also be used to track line:col
 	 *  for elements such as token refs.
 	 */
-	protected GrammarAST associatedASTNode;
+	public GrammarAST associatedASTNode;
 
 	/** Is this state the sole target of an EOT transition? */
 	protected boolean EOTTargetState = false;
@@ -106,12 +114,24 @@ public class NFAState extends State {
 	}
 
 	public void addTransition(Transition e) {
+		if ( e==null ) {
+			throw new IllegalArgumentException("You can't add a null transition");			
+		}
 		if ( numTransitions>transition.length ) {
 			throw new IllegalArgumentException("You can only have "+transition.length+" transitions");
 		}
 		if ( e!=null ) {
 			transition[numTransitions] = e;
 			numTransitions++;
+			// Set the "back pointer" of the target state so that it
+			// knows about the label of the incoming edge.
+			Label label = e.label;
+			if ( label.isAtom() || label.isSet() ) {
+				if ( ((NFAState)e.target).incidentEdgeLabel!=null ) {
+					ErrorManager.internalError("Clobbered incident edge");
+				}
+				((NFAState)e.target).incidentEdgeLabel = e.label;
+			}
 		}
 	}
 
@@ -119,6 +139,9 @@ public class NFAState extends State {
 	 *  transition another state has.
 	 */
 	public void setTransition0(Transition e) {
+		if ( e==null ) {
+			throw new IllegalArgumentException("You can't use a solitary null transition");
+		}
 		transition[0] = e;
 		transition[1] = null;
 		numTransitions = 1;
@@ -154,7 +177,8 @@ public class NFAState extends State {
 	 *
 	 *  Return same alt if we can't translate.
 	 */
-	public int translateDisplayAltToWalkAlt(DFA dfa, int displayAlt) {
+	public int translateDisplayAltToWalkAlt(int displayAlt) {
+		NFAState nfaStart = this;
 		if ( decisionNumber==0 || decisionStateType==0 ) {
 			return displayAlt;
 		}
@@ -168,9 +192,8 @@ public class NFAState extends State {
 			ErrorManager.internalError("can't get DFA for decision "+decisionNumber);
 		}
 		*/
-		NFAState nfaStart = dfa.getNFADecisionStartState();
 		int nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(nfaStart);
-		switch ( decisionStateType ) {
+		switch ( nfaStart.decisionStateType ) {
 			case LOOPBACK :
 				walkAlt = displayAlt % nAlts + 1; // rotate right mod 1..3
 				break;
@@ -200,14 +223,6 @@ public class NFAState extends State {
 		this.associatedASTNode = decisionASTNode;
 	}
 
-	public GrammarAST getAssociatedASTNode() {
-		return associatedASTNode;
-	}
-
-	 public void setAssociatedASTNode(GrammarAST ASTNode) {
-		this.associatedASTNode = ASTNode;
-	}
-
 	public String getDescription() {
 		return description;
 	}
@@ -224,14 +239,6 @@ public class NFAState extends State {
 		this.decisionNumber = decisionNumber;
 	}
 
-	public void setEnclosingRuleName(String rule) {
-		this.enclosingRule = rule;
-	}
-
-	public String getEnclosingRule() {
-		return enclosingRule;
-	}
-
 	public boolean isEOTTargetState() {
 		return EOTTargetState;
 	}
diff --git a/src/org/antlr/analysis/NFAToDFAConverter.java b/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
similarity index 80%
rename from src/org/antlr/analysis/NFAToDFAConverter.java
rename to tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
index 7c23b4a..c0c7de9 100644
--- a/src/org/antlr/analysis/NFAToDFAConverter.java
+++ b/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
@@ -27,13 +27,18 @@
 */
 package org.antlr.analysis;
 
-import org.antlr.misc.IntSet;
 import org.antlr.misc.OrderedHashSet;
 import org.antlr.misc.Utils;
+import org.antlr.tool.ErrorManager;
 
 import java.util.*;
 
-/** Code that embodies the NFA conversion to DFA. */
+import antlr.Token;
+
+/** Code that embodies the NFA conversion to DFA. A new object is needed
+ *  per DFA (also required for thread safety if multiple conversions
+ *  launched).
+ */
 public class NFAToDFAConverter {
 	/** A list of DFA states we still need to process during NFA conversion */
 	protected List work = new LinkedList();
@@ -61,9 +66,10 @@ public class NFAToDFAConverter {
 	 */
 	public static boolean SINGLE_THREADED_NFA_CONVERSION = true;
 
+	protected boolean computingStartState = false;
+
 	public NFAToDFAConverter(DFA dfa) {
 		this.dfa = dfa;
-		NFAState nfaStartState = dfa.getNFADecisionStartState();
 		int nAlts = dfa.getNumberOfAlts();
 		initContextTrees(nAlts);
 	}
@@ -76,19 +82,22 @@ public class NFAToDFAConverter {
 
 		// while more DFA states to check, process them
 		while ( work.size()>0 &&
-			    !dfa.probe.analysisAborted() &&
-				!dfa.probe.nonLLStarDecision &&
 				!dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() )
 		{
 			DFAState d = (DFAState) work.get(0);
-			if ( dfa.nfa.grammar.getWatchNFAConversion() ) {
+			if ( dfa.nfa.grammar.composite.watchNFAConversion ) {
 				System.out.println("convert DFA state "+d.stateNumber+
-								   " ("+d.getNFAConfigurations().size()+" nfa states)");
+								   " ("+d.nfaConfigurations.size()+" nfa states)");
 			}
 			int k = dfa.getUserMaxLookahead();
 			if ( k>0 && k==d.getLookaheadDepth() ) {
 				// we've hit max lookahead, make this a stop state
 				//System.out.println("stop state @k="+k+" (terminated early)");
+				/*
+				List<Label> sampleInputLabels = d.dfa.probe.getSampleNonDeterministicInputSequence(d);
+				String input = d.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
+				System.out.println("sample input: "+input);
+				 */
 				resolveNonDeterminisms(d);
 				// Check to see if we need to add any semantic predicate transitions
 				if ( d.isResolvedWithPredicates() ) {
@@ -104,27 +113,10 @@ public class NFAToDFAConverter {
 			work.remove(0); // done with it; remove from work list
 		}
 
-		// walk all accept states and find the synpreds
-		// I used to do this in the code generator, but that is too late.
-		// This converter tries to avoid computing DFA for decisions in
-		// syntactic predicates that are not ever used such as those
-		// created by autobacktrack mode.
-		int nAlts = dfa.getNumberOfAlts();
-		for (int i=1; i<=nAlts; i++) {
-			DFAState a = dfa.getAcceptState(i);
-			if ( a!=null ) {
-				Set synpreds = a.getSyntacticPredicatesInNFAConfigurations();
-				if ( synpreds!=null ) {
-					// add all the predicates we find (should be just one, right?)
-					for (Iterator it = synpreds.iterator(); it.hasNext();) {
-						SemanticContext semctx = (SemanticContext) it.next();
-						// System.out.println("synpreds: "+semctx);
-						dfa.nfa.grammar.synPredUsedInDFA(dfa, semctx);
-					}
-				}
-			}
-		}
-
+		// Find all manual syn preds (gated).  These are not discovered
+		// in tryToResolveWithSemanticPredicates because they are implicitly
+		// added to every edge by code gen, DOT generation etc...
+		dfa.findAllGatedSynPredsUsedInDFAAcceptStates();
 	}
 
 	/** From this first NFA state of a decision, create a DFA.
@@ -148,6 +140,7 @@ public class NFAToDFAConverter {
 	protected DFAState computeStartState() {
 		NFAState alt = dfa.decisionNFAStartState;
 		DFAState startState = dfa.newState();
+		computingStartState = true;
 		int i = 0;
 		int altNum = 1;
 		while ( alt!=null ) {
@@ -162,32 +155,34 @@ public class NFAToDFAConverter {
 				 dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK )
 			{
 				int numAltsIncludingExitBranch = dfa.nfa.grammar
-						.getNumberOfAltsForDecisionNFA(dfa.decisionNFAStartState);
+					.getNumberOfAltsForDecisionNFA(dfa.decisionNFAStartState);
 				altNum = numAltsIncludingExitBranch;
-				closure((NFAState)alt.transition(0).target,
+				closure((NFAState)alt.transition[0].target,
 						altNum,
 						initialContext,
 						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
 						startState,
-						true);
+						true
+				);
 				altNum = 1; // make next alt the first
 			}
 			else {
-				closure((NFAState)alt.transition(0).target,
+				closure((NFAState)alt.transition[0].target,
 						altNum,
 						initialContext,
 						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
 						startState,
-						true);
+						true
+				);
 				altNum++;
 			}
 			i++;
 
 			// move to next alternative
-			if ( alt.transition(1)==null ) {
+			if ( alt.transition[1] ==null ) {
 				break;
 			}
-			alt = (NFAState)alt.transition(1).target;
+			alt = (NFAState)alt.transition[1].target;
 		}
 
 		// now DFA start state has the complete closure for the decision
@@ -195,6 +190,7 @@ public class NFAToDFAConverter {
 		// NFA states.
 		dfa.addState(startState); // make sure dfa knows about this state
 		work.add(startState);
+		computingStartState = false;
 		return startState;
 	}
 
@@ -206,8 +202,9 @@ public class NFAToDFAConverter {
 	protected void findNewDFAStatesAndAddDFATransitions(DFAState d) {
 		//System.out.println("work on DFA state "+d);
 		OrderedHashSet labels = d.getReachableLabels();
+		//System.out.println("reachable labels="+labels);
+
 		/*
-		System.out.println("reachable="+labels.toString());
 		System.out.println("|reachable|/|nfaconfigs|="+
 				labels.size()+"/"+d.getNFAConfigurations().size()+"="+
 				labels.size()/(float)d.getNFAConfigurations().size());
@@ -238,7 +235,7 @@ public class NFAToDFAConverter {
 		// just reversing the resolution of ambiguity.
 		// TODO: should this be done in the resolveAmbig method?
 		Label EOTLabel = new Label(Label.EOT);
-		boolean containsEOT = labels.contains(EOTLabel);
+		boolean containsEOT = labels!=null && labels.contains(EOTLabel);
 		if ( !dfa.isGreedy() && containsEOT ) {
 			convertToEOTAcceptState(d);
 			return; // no more work to do on this accept state
@@ -270,25 +267,26 @@ public class NFAToDFAConverter {
 		int numberOfEdgesEmanating = 0;
 		Map targetToLabelMap = new HashMap();
 		// for each label that could possibly emanate from NFAStates of d
-		// (abort if we find any closure operation on a configuration of d
-		//  that finds multiple alts with recursion, non-LL(*), as we cannot
-		//  trust any reach operations from d since we are blind to some
-		//  paths.  Leave state a dead-end and try to resolve with preds)
-		for (int i=0; !d.abortedDueToMultipleRecursiveAlts && i<labels.size(); i++) {
+		int numLabels = 0;
+		if ( labels!=null ) {
+			numLabels = labels.size();
+		}
+		for (int i=0; i<numLabels; i++) {
 			Label label = (Label)labels.get(i);
 			DFAState t = reach(d, label);
 			if ( debug ) {
-				System.out.println("DFA state after reach "+d+"-" +
+				System.out.println("DFA state after reach "+label+" "+d+"-" +
 								   label.toString(dfa.nfa.grammar)+"->"+t);
 			}
-            if ( t==null ) {
-                // nothing was reached by label due to conflict resolution
+			if ( t==null ) {
+				// nothing was reached by label due to conflict resolution
 				// EOT also seems to be in here occasionally probably due
 				// to an end-of-rule state seeing it even though we'll pop
 				// an invoking state off the state; don't bother to conflict
 				// as this labels set is a covering approximation only.
-                continue;
+				continue;
 			}
+			//System.out.println("dfa.k="+dfa.getUserMaxLookahead());
 			if ( t.getUniqueAlt()==NFA.INVALID_ALT_NUMBER ) {
 				// Only compute closure if a unique alt number is not known.
 				// If a unique alternative is mentioned among all NFA
@@ -305,39 +303,37 @@ public class NFAToDFAConverter {
 							   "->"+t);
 							   */
 
-			// add if not in DFA yet even if its closure aborts due to non-LL(*);
-			// getting to the state is ok, we just can't see where to go next--it's
-			// a blind alley.
+			// add if not in DFA yet and then make d-label->t
 			DFAState targetState = addDFAStateToWorkList(t);
 
 			numberOfEdgesEmanating +=
 				addTransition(d, label, targetState, targetToLabelMap);
 
 			// lookahead of target must be one larger than d's k
+			// We are possibly setting the depth of a pre-existing state
+			// that is equal to one we just computed...not sure if that's
+			// ok.
 			targetState.setLookaheadDepth(d.getLookaheadDepth() + 1);
-
-			// closure(t) might have aborted, but addDFAStateToWorkList will try
-			// to resolve t with predicates.  If that fails, must give an error
-			// Note: this is tested on the target of d not d.
-			if ( t.abortedDueToMultipleRecursiveAlts && !t.isResolvedWithPredicates() ) {
-				// no predicates to resolve non-LL(*) decision, report
-				t.dfa.probe.reportNonLLStarDecision(t.dfa);
-			}
 		}
 
 		//System.out.println("DFA after reach / closures:\n"+dfa);
 
 		if ( !d.isResolvedWithPredicates() && numberOfEdgesEmanating==0 ) {
+			//System.out.println("dangling DFA state "+d+"\nAfter reach / closures:\n"+dfa);
 			// TODO: can fixed lookahead hit a dangling state case?
 			// TODO: yes, with left recursion
-			// TODO: alter DANGLING err template to have input to that state
 			//System.err.println("dangling state alts: "+d.getAltSet());
 			dfa.probe.reportDanglingState(d);
 			// turn off all configurations except for those associated with
 			// min alt number; somebody has to win else some input will not
 			// predict any alt.
 			int minAlt = resolveByPickingMinAlt(d, null);
-			convertToAcceptState(d, minAlt); // force it to be an accept state
+			// force it to be an accept state
+			// don't call convertToAcceptState() which merges stop states.
+			// other states point at us; don't want them pointing to dead states
+			d.setAcceptState(true); // might be adding new accept state for alt
+			dfa.setAcceptState(minAlt, d);
+			//convertToAcceptState(d, minAlt); // force it to be an accept state
 		}
 
 		// Check to see if we need to add any semantic predicate transitions
@@ -463,21 +459,22 @@ public class NFAToDFAConverter {
 		if ( debug ) {
 			System.out.println("closure("+d+")");
 		}
-		Set configs = new HashSet();
+
+		List<NFAConfiguration> configs = new ArrayList<NFAConfiguration>();
 		// Because we are adding to the configurations in closure
 		// must clone initial list so we know when to stop doing closure
-		// TODO: expensive, try to get around this alloc / copy
-		configs.addAll(d.getNFAConfigurations());
+		configs.addAll(d.nfaConfigurations);
 		// for each NFA configuration in d (abort if we detect non-LL(*) state)
-		Iterator iter = configs.iterator();
-		while (!d.abortedDueToMultipleRecursiveAlts && iter.hasNext() ) {
-			NFAConfiguration c = (NFAConfiguration)iter.next();
+		int numConfigs = configs.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = (NFAConfiguration)configs.get(i);
 			if ( c.singleAtomTransitionEmanating ) {
 				continue; // ignore NFA states w/o epsilon transitions
 			}
 			//System.out.println("go do reach for NFA state "+c.state);
 			// figure out reachable NFA states from each of d's nfa states
-			// via epsilon transitions
+			// via epsilon transitions.
+			// Fill configsInClosure rather than altering d configs inline
 			closure(dfa.nfa.getState(c.state),
 					c.alt,
 					c.context,
@@ -485,6 +482,7 @@ public class NFAToDFAConverter {
 					d,
 					false);
 		}
+		//System.out.println("after closure d="+d);
 		d.closureBusy = null; // wack all that memory used during closure
 	}
 
@@ -599,27 +597,17 @@ public class NFAToDFAConverter {
 						boolean collectPredicates)
 	{
 		if ( debug ){
-			System.out.println("closure at NFA state "+p.stateNumber+"|"+
+			System.out.println("closure at "+p.enclosingRule.name+" state "+p.stateNumber+"|"+
 							   alt+" filling DFA state "+d.stateNumber+" with context "+context
 							   );
 		}
 
-		if ( d.abortedDueToMultipleRecursiveAlts ) {
-			// keep walking back out, we're in the process of terminating
-			// this closure operation on NFAState p contained with DFAState d
-			return;
-		}
-
-		/* NOTE SURE WE NEED THIS FAILSAFE NOW 11/8/2006 and it complicates
-		   MY ALGORITHM TO HAVE TO ABORT ENTIRE DFA CONVERSION
-		   */
 		if ( DFA.MAX_TIME_PER_DFA_CREATION>0 &&
 			 System.currentTimeMillis() - d.dfa.conversionStartTime >=
 			 DFA.MAX_TIME_PER_DFA_CREATION )
 		{
-			// report and back your way out; we've blown up somehow
-			dfa.probe.reportEarlyTermination();
-			return;
+			// bail way out; we've blown up somehow
+			throw new AnalysisTimeoutException(d.dfa);
 		}
 
 		NFAConfiguration proposedNFAConfiguration =
@@ -631,8 +619,9 @@ public class NFAToDFAConverter {
 		// Avoid infinite recursion
 		if ( closureIsBusy(d, proposedNFAConfiguration) ) {
 			if ( debug ) {
-				System.out.println("avoid visiting exact closure computation NFA config: "+proposedNFAConfiguration);
-				System.out.println("state is "+d.dfa.decisionNumber+"."+d);
+				System.out.println("avoid visiting exact closure computation NFA config: "+
+								   proposedNFAConfiguration+" in "+p.enclosingRule.name);
+				System.out.println("state is "+d.dfa.decisionNumber+"."+d.stateNumber);
 			}
 			return;
 		}
@@ -644,18 +633,17 @@ public class NFAToDFAConverter {
 		d.addNFAConfiguration(p, proposedNFAConfiguration);
 
 		// Case 1: are we a reference to another rule?
-		Transition transition0 = p.transition(0);
+		Transition transition0 = p.transition[0];
 		if ( transition0 instanceof RuleClosureTransition ) {
 			int depth = context.recursionDepthEmanatingFromState(p.stateNumber);
 			// Detect recursion by more than a single alt, which indicates
 			// that the decision's lookahead language is non-regular; terminate
 			if ( depth == 1 && d.dfa.getUserMaxLookahead()==0 ) { // k=* only
-			//if ( depth >= NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK ) {
 				d.dfa.recursiveAltSet.add(alt); // indicate that this alt is recursive
 				if ( d.dfa.recursiveAltSet.size()>1 ) {
 					//System.out.println("recursive alts: "+d.dfa.recursiveAltSet.toString());
 					d.abortedDueToMultipleRecursiveAlts = true;
-					return;
+					throw new NonLLStarDecisionException(d.dfa);
 				}
 				/*
 				System.out.println("alt "+alt+" in rule "+p.enclosingRule+" dec "+d.dfa.decisionNumber+
@@ -671,8 +659,11 @@ public class NFAToDFAConverter {
 				System.out.println("OVF state "+d);
 				System.out.println("proposed "+proposedNFAConfiguration);
 				*/
-				d.dfa.probe.reportRecursiveOverflow(d, proposedNFAConfiguration);
 				d.abortedDueToRecursionOverflow = true;
+				d.dfa.probe.reportRecursionOverflow(d, proposedNFAConfiguration);
+				if ( debug ) {
+					System.out.println("analysis overflow in closure("+d.stateNumber+")");
+				}
 				return;
 			}
 
@@ -685,7 +676,7 @@ public class NFAToDFAConverter {
 			// pushed on the stack).
 			// Reset the context to reflect the fact we invoked rule
 			NFAContext newContext = new NFAContext(context, p);
-			// System.out.print("invoking rule "+nfa.getGrammar().getRuleName(ref.getRuleIndex()));
+			//System.out.println("invoking rule "+ref.rule.name);
 			// System.out.println(" context="+context);
 			// traverse epsilon edge to new rule
 			NFAState ruleTarget = (NFAState)ref.target;
@@ -695,53 +686,56 @@ public class NFAToDFAConverter {
 		else if ( p.isAcceptState() && context.parent!=null ) {
 			NFAState whichStateInvokedRule = context.invokingState;
 			RuleClosureTransition edgeToRule =
-				(RuleClosureTransition)whichStateInvokedRule.transition(0);
-			NFAState continueState = edgeToRule.getFollowState();
+				(RuleClosureTransition)whichStateInvokedRule.transition[0];
+			NFAState continueState = edgeToRule.followState;
 			NFAContext newContext = context.parent; // "pop" invoking state
 			closure(continueState, alt, newContext, semanticContext, d, collectPredicates);
 		}
-		/*
-		11/27/2005: I tried adding this but it highlighted that
-		lexer rules needed to be called from Tokens not just ref'd directly
-		so their contexts are different for F : I '.' ;  I : '0' ;  otherwise
-		we get an ambiguity.  The context of state following '0' has same
-		NFA state with [6 $] and [$] hence they conflict.  We need to get
-		the other stack call in there.
-		else if ( dfa.nfa.grammar.type == Grammar.LEXER &&
-			      p.isAcceptState() &&
-			context.invokingState.enclosingRule.equals("Tokens") )
-		{
-			// hit the end of a lexer rule when no one has invoked that rule
-			// (this will be the case if Tokens rule analysis reaches the
-			// stop state of a token in its alt list).
-			// Must not follow the FOLLOW links; must return
-			return;
-		}
-		*/
 		// Case 3: end of rule state, nobody invoked this rule (no context)
 		//    Fall thru to be handled by case 4 automagically.
 		// Case 4: ordinary NFA->DFA conversion case: simple epsilon transition
 		else {
 			// recurse down any epsilon transitions
 			if ( transition0!=null && transition0.isEpsilon() ) {
+				boolean collectPredicatesAfterAction = collectPredicates;
+				if ( transition0.isAction() && collectPredicates ) {
+					collectPredicatesAfterAction = false;
+					/*
+					if ( computingStartState ) {
+						System.out.println("found action during prediction closure "+((ActionLabel)transition0.label).actionAST.token);
+					}
+					 */
+				}
 				closure((NFAState)transition0.target,
 						alt,
 						context,
 						semanticContext,
 						d,
-						collectPredicates);
+						collectPredicatesAfterAction
+				);
 			}
 			else if ( transition0!=null && transition0.isSemanticPredicate() ) {
-				// continue closure here too, but add the sem pred to ctx
-				SemanticContext newSemanticContext = semanticContext;
-				if ( collectPredicates ) {
-					// AND the previous semantic context with new pred
-					SemanticContext labelContext =
-						transition0.label.getSemanticContext();
-					// do not hoist syn preds from other rules; only get if in
-					// starting state's rule (i.e., context is empty)
-					int walkAlt =
-						dfa.decisionNFAStartState.translateDisplayAltToWalkAlt(dfa, alt);
+                SemanticContext labelContext = transition0.label.getSemanticContext();
+                if ( computingStartState ) {
+                    if ( collectPredicates ) {
+                        // only indicate we can see a predicate if we're collecting preds
+                        // Could be computing start state & seen an action before this.
+                        dfa.predicateVisible = true;
+                    }
+                    else {
+                        // this state has a pred, but we can't see it.
+                        dfa.hasPredicateBlockedByAction = true;
+                        // System.out.println("found pred during prediction but blocked by action found previously");
+                    }
+                }
+                // continue closure here too, but add the sem pred to ctx
+                SemanticContext newSemanticContext = semanticContext;
+                if ( collectPredicates ) {
+                    // AND the previous semantic context with new pred
+                    // do not hoist syn preds from other rules; only get if in
+                    // starting state's rule (i.e., context is empty)
+                    int walkAlt =
+						dfa.decisionNFAStartState.translateDisplayAltToWalkAlt(alt);
 					NFAState altLeftEdge =
 						dfa.nfa.grammar.getNFAStateForAltOfDecision(dfa.decisionNFAStartState,walkAlt);
 					/*
@@ -752,7 +746,7 @@ public class NFAToDFAConverter {
 						altLeftEdge.transition(0).target.stateNumber);
 					*/
 					if ( !labelContext.isSyntacticPredicate() ||
-						 p==altLeftEdge.transition(0).target )
+						 p==altLeftEdge.transition[0].target )
 					{
 						//System.out.println("&"+labelContext+" enclosingRule="+p.enclosingRule);
 						newSemanticContext =
@@ -766,7 +760,7 @@ public class NFAToDFAConverter {
 						d,
 						collectPredicates);
 			}
-			Transition transition1 = p.transition(1);
+			Transition transition1 = p.transition[1];
 			if ( transition1!=null && transition1.isEpsilon() ) {
 				closure((NFAState)transition1.target,
 						alt,
@@ -789,51 +783,44 @@ public class NFAToDFAConverter {
 	 *  then clearly the exact same computation is proposed.  If a context
 	 *  is a suffix of the other, then again the computation is in an
 	 *  identical context.  ?$ and ??$ are considered the same stack.
-	 *  We have to walk configurations linearly doing the comparison instead
-	 *  of a set for exact matches.
-	 *
-	 *  We cannot use a set hash table for this lookup as contexts that are
-	 *  suffixes could be !equal() but their hashCode()s would be different;
-	 *  that's a problem for a HashSet.  This costs a lot actually, it
-	 *  takes about 490ms vs 355ms for Java grammar's analysis phase when
-	 *  I moved away from hash lookup.  Argh!  Still it's small.  For newbie
-	 *  generated grammars though this really speeds things up because it
-	 *  avoids chasing its tail during closure operations on highly left-
-	 *  recursive grammars.
-	 *
-	 *  Ok, backing this out to use exact match again for speed.  We will
+	 *  We could walk configurations linearly doing the comparison instead
+	 *  of a set for exact matches but it's much slower because you can't
+	 *  do a Set lookup.  I use exact match as ANTLR
 	 *  always detect the conflict later when checking for context suffixes...
-	 *  I was just trying to prevent unnecessary closures for random crap
-	 *  submitted by newbies.  Instead now I check for left-recursive stuff
-	 *  and terminate before analysis obviates the need to do this more
-	 *  expensive computation.
-	 *
-	 *  If the semantic context is different, then allow new computation.
+	 *  I check for left-recursive stuff and terminate before analysis to
+	 *  avoid need to do this more expensive computation.
+	 *
+	 *  12-31-2007: I had to use the loop again rather than simple
+	 *  closureBusy.contains(proposedNFAConfiguration) lookup.  The
+	 *  semantic context should not be considered when determining if
+	 *  a closure operation is busy.  I saw a FOLLOW closure operation
+	 *  spin until time out because the predicate context kept increasing
+	 *  in size even though it's same boolean value.  This seems faster also
+	 *  because I'm not doing String.equals on the preds all the time.
+	 *
+	 *  05-05-2008: Hmm...well, i think it was a mistake to remove the sem
+	 *  ctx check below...adding back in.  Coincides with report of ANTLR
+	 *  getting super slow: http://www.antlr.org:8888/browse/ANTLR-235
+	 *  This could be because it doesn't properly compute then resolve
+	 *  a predicate expression.  Seems to fix unit test:
+	 *  TestSemanticPredicates.testSemanticContextPreventsEarlyTerminationOfClosure()
+	 *  Changing back to Set from List.  Changed a large grammar from 8 minutes
+	 *  to 11 seconds.  Cool.  Closing ANTLR-235.
 	 */
 	public static boolean closureIsBusy(DFAState d,
 										NFAConfiguration proposedNFAConfiguration)
 	{
-		// Check epsilon cycle (same state, same alt, same context)
 		return d.closureBusy.contains(proposedNFAConfiguration);
-		/*
-		// Uncomment to get all conflicts not just exact context matches
-		for (int i = 0; i < d.closureBusy.size(); i++) {
+/*
+		int numConfigs = d.closureBusy.size();
+		// Check epsilon cycle (same state, same alt, same context)
+		for (int i = 0; i < numConfigs; i++) {
 			NFAConfiguration c = (NFAConfiguration) d.closureBusy.get(i);
 			if ( proposedNFAConfiguration.state==c.state &&
 				 proposedNFAConfiguration.alt==c.alt &&
 				 proposedNFAConfiguration.semanticContext.equals(c.semanticContext) &&
 				 proposedNFAConfiguration.context.suffix(c.context) )
 			{
-				// if computing closure of start state, we tried to
-				// recompute a closure, must be left recursion.  We got back
-				// to the same computation.  After having consumed no input,
-				// we're back.  Only track rule invocation states
-				if ( (dfa.startState==null ||
-					  d.stateNumber==dfa.startState.stateNumber) &&
-					 p.transition(0) instanceof RuleClosureTransition )
-				{
-					d.dfa.probe.reportLeftRecursion(d, proposedNFAConfiguration);
-				}
 				return true;
 			}
 		}
@@ -860,20 +847,24 @@ public class NFAToDFAConverter {
 	 *  accept states if the rule was invoked by somebody.
 	 */
 	public DFAState reach(DFAState d, Label label) {
+		//System.out.println("reach "+label.toString(dfa.nfa.grammar)+" from "+d.stateNumber);
 		DFAState labelDFATarget = dfa.newState();
-		// for each NFA state in d, add in target states for label
-		int intLabel = label.getAtom();
-		IntSet setLabel = label.getSet();
-		Iterator iter = d.getNFAConfigurations().iterator();
-		while ( iter.hasNext() ) {
-			NFAConfiguration c = (NFAConfiguration)iter.next();
+
+		// for each NFA state in d with a labeled edge,
+		// add in target states for label
+		//System.out.println("size(d.state="+d.stateNumber+")="+d.nfaConfigurations.size());
+		//System.out.println("size(labeled edge states)="+d.configurationsWithLabeledEdges.size());
+		List<NFAConfiguration> configs = d.configurationsWithLabeledEdges;
+		int numConfigs = configs.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = configs.get(i);
 			if ( c.resolved || c.resolveWithPredicate ) {
 				continue; // the conflict resolver indicates we must leave alone
 			}
 			NFAState p = dfa.nfa.getState(c.state);
 			// by design of the grammar->NFA conversion, only transition 0
 			// may have a non-epsilon edge.
-			Transition edge = p.transition(0);
+			Transition edge = p.transition[0];
 			if ( edge==null || !c.singleAtomTransitionEmanating ) {
 				continue;
 			}
@@ -886,34 +877,28 @@ public class NFAToDFAConverter {
 			// to this rule in the invoking rule.  In other words, if
 			// somebody called this rule, don't see the EOT emanating from
 			// this accept state.
-			if ( c.context.parent!=null &&
-				 edgeLabel.isAtom() &&
-				 edgeLabel.getAtom()==Label.EOT )
-			{
+			if ( c.context.parent!=null && edgeLabel.label==Label.EOT )	{
 				continue;
 			}
 
 			// Labels not unique at this point (not until addReachableLabels)
 			// so try simple int label match before general set intersection
 			//System.out.println("comparing "+edgeLabel+" with "+label);
-			boolean matched =
-				(!label.isSet()&&edgeLabel.getAtom()==intLabel)||
-				(!edgeLabel.getSet().and(setLabel).isNil());
-			if ( matched ) {
+			if ( Label.intersect(label, edgeLabel) ) {
 				// found a transition with label;
 				// add NFA target to (potentially) new DFA state
-                labelDFATarget.addNFAConfiguration(
+				NFAConfiguration newC = labelDFATarget.addNFAConfiguration(
 					(NFAState)edge.target,
 					c.alt,
 					c.context,
 					c.semanticContext);
 			}
 		}
-        if ( labelDFATarget.getNFAConfigurations().size()==0 ) {
-            // kill; it's empty
-            dfa.setState(labelDFATarget.stateNumber, null);
-            labelDFATarget = null;
-        }
+		if ( labelDFATarget.nfaConfigurations.size()==0 ) {
+			// kill; it's empty
+			dfa.setState(labelDFATarget.stateNumber, null);
+			labelDFATarget = null;
+		}
         return labelDFATarget;
 	}
 
@@ -930,21 +915,20 @@ public class NFAToDFAConverter {
 	 */
 	protected void convertToEOTAcceptState(DFAState d) {
 		Label eot = new Label(Label.EOT);
-		Iterator iter = d.getNFAConfigurations().iterator();
-		while ( iter.hasNext() ) {
-			NFAConfiguration c =
-					(NFAConfiguration)iter.next();
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = (NFAConfiguration)d.nfaConfigurations.get(i);
 			if ( c.resolved || c.resolveWithPredicate ) {
 				continue; // the conflict resolver indicates we must leave alone
 			}
 			NFAState p = dfa.nfa.getState(c.state);
-			Transition edge = p.transition(0);
+			Transition edge = p.transition[0];
 			Label edgeLabel = edge.label;
 			if ( edgeLabel.equals(eot) ) {
 				//System.out.println("config with EOT: "+c);
 				d.setAcceptState(true);
 				//System.out.println("d goes from "+d);
-				d.getNFAConfigurations().clear();
+				d.nfaConfigurations.clear();
 				d.addNFAConfiguration(p,c.alt,c.context,c.semanticContext);
 				//System.out.println("to "+d);
 				return; // assume only one EOT transition
@@ -968,7 +952,7 @@ public class NFAToDFAConverter {
 			// infinite recursion on a state before it knows
 			// whether or not the state will already be
 			// found after closure on it finishes.  It could be
-			// refer to a state that will ultimately not make it
+			// referring to a state that will ultimately not make it
 			// into the reachable state space and the error
 			// reporting must be able to compute the path from
 			// start to the error state with infinite recursion
@@ -1035,9 +1019,6 @@ public class NFAToDFAConverter {
 				}
 				// else consider it a new accept state; fall through.
 			}
-			d.setAcceptState(true); // new accept state for alt
-			dfa.setAcceptState(alt, d);
-			return d;
 		}
 		d.setAcceptState(true); // new accept state for alt
 		dfa.setAcceptState(alt, d);
@@ -1200,10 +1181,9 @@ public class NFAToDFAConverter {
 		// transition on EOT to get to this DFA state as well so all
 		// states in d must be targets of EOT.  These are the end states
 		// created in NFAFactory.build_EOFState
-		NFAConfiguration anyConfig;
-		Iterator itr = d.nfaConfigurations.iterator();
-        anyConfig = (NFAConfiguration)itr.next();
+		NFAConfiguration anyConfig = d.nfaConfigurations.get(0);
 		NFAState anyState = dfa.nfa.getState(anyConfig.state);
+
 		// if d is target of EOT and more than one predicted alt
 		// indicate that d is nondeterministic on all alts otherwise
 		// it looks like state has no problem
@@ -1234,24 +1214,15 @@ public class NFAToDFAConverter {
 			// TODO: how to turn off when it's only the FOLLOW that is
 			// conflicting.  This used to shut off even alts i,j < n
 			// conflict warnings. :(
-			/*
-			if ( dfa.isGreedy() ) {
-				// if nongreedy then they have said to let it fall out of loop
-				// don't report the problem
-				dfa.probe.reportNondeterminism(d);
-			}
-			else {
-				// TODO: remove when sure it's cool
-				dfa.probe.reportNondeterminism(d);
-				System.out.println("temp warning: warning suppressed for nongreedy loop");
-			}
-			*/
 		}
 
 		// ATTEMPT TO RESOLVE WITH SEMANTIC PREDICATES
 		boolean resolved =
 			tryToResolveWithSemanticPredicates(d, nondeterministicAlts);
 		if ( resolved ) {
+			if ( debug ) {
+				System.out.println("resolved DFA state "+d.stateNumber+" with pred");
+			}
 			d.resolvedWithPredicates = true;
 			dfa.probe.reportNondeterminismResolvedWithSemanticPredicate(d);
 			return;
@@ -1306,8 +1277,7 @@ public class NFAToDFAConverter {
 			min = getMinAlt(nondeterministicAlts);
 		}
 		else {
-			// else walk the actual configurations to find the min
-			min = getMinAlt(d);
+			min = d.minAltInConfigurations;
 		}
 
 		turnOffOtherAlts(d, min, nondeterministicAlts);
@@ -1327,11 +1297,10 @@ public class NFAToDFAConverter {
 	/** turn off all states associated with alts other than the good one
 	 *  (as long as they are one of the nondeterministic ones)
 	 */
-	protected static void turnOffOtherAlts(DFAState d, int min, Set nondeterministicAlts) {
-		Iterator iter = d.nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+	protected static void turnOffOtherAlts(DFAState d, int min, Set<Integer> nondeterministicAlts) {
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
 			if ( configuration.alt!=min ) {
 				if ( nondeterministicAlts==null ||
 					 nondeterministicAlts.contains(Utils.integer(configuration.alt)) )
@@ -1342,24 +1311,9 @@ public class NFAToDFAConverter {
 		}
 	}
 
-	protected static int getMinAlt(DFAState d) {
-		int min = Integer.MAX_VALUE;
-		Iterator iter = d.nfaConfigurations.iterator();
-		NFAConfiguration configuration;
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
-			if ( configuration.alt<min ) {
-				min = configuration.alt;
-			}
-		}
-		return min;
-	}
-
-	protected static int getMinAlt(Set nondeterministicAlts) {
+	protected static int getMinAlt(Set<Integer> nondeterministicAlts) {
 		int min = Integer.MAX_VALUE;
-		Iterator iter = nondeterministicAlts.iterator();
-		while (iter.hasNext()) {
-			Integer altI = (Integer) iter.next();
+		for (Integer altI : nondeterministicAlts) {
 			int alt = altI.intValue();
 			if ( alt < min ) {
 				min = alt;
@@ -1399,7 +1353,7 @@ public class NFAToDFAConverter {
 	protected boolean tryToResolveWithSemanticPredicates(DFAState d,
 														 Set nondeterministicAlts)
 	{
-		Map altToPredMap =
+		Map<Integer, SemanticContext> altToPredMap =
 				getPredicatesPerNonDeterministicAlt(d, nondeterministicAlts);
 
 		if ( altToPredMap.size()==0 ) {
@@ -1457,10 +1411,9 @@ public class NFAToDFAConverter {
 
 			altToPredMap.put(Utils.integer(nakedAlt), nakedAltPred);
 			// set all config with alt=nakedAlt to have the computed predicate
-			Iterator iter = d.nfaConfigurations.iterator();
-			NFAConfiguration configuration;
-			while (iter.hasNext()) {
-				configuration = (NFAConfiguration) iter.next();
+			int numConfigs = d.nfaConfigurations.size();
+			for (int i = 0; i < numConfigs; i++) {
+				NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
 				if ( configuration.alt == nakedAlt ) {
 					configuration.semanticContext = nakedAltPred;
 				}
@@ -1475,10 +1428,9 @@ public class NFAToDFAConverter {
 			if ( d.abortedDueToRecursionOverflow ) {
 				d.dfa.probe.removeRecursiveOverflowState(d);
 			}
-			Iterator iter = d.nfaConfigurations.iterator();
-			NFAConfiguration configuration;
-			while (iter.hasNext()) {
-				configuration = (NFAConfiguration) iter.next();
+			int numConfigs = d.nfaConfigurations.size();
+			for (int i = 0; i < numConfigs; i++) {
+				NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
 				SemanticContext semCtx = (SemanticContext)
 						altToPredMap.get(Utils.integer(configuration.alt));
 				if ( semCtx!=null ) {
@@ -1487,6 +1439,7 @@ public class NFAToDFAConverter {
 					configuration.resolveWithPredicate = true;
 					configuration.semanticContext = semCtx; // reset to combined
 					altToPredMap.remove(Utils.integer(configuration.alt));
+
 					// notify grammar that we've used the preds contained in semCtx
 					if ( semCtx.isSyntacticPredicate() ) {
 						dfa.nfa.grammar.synPredUsedInDFA(dfa, semCtx);
@@ -1518,26 +1471,42 @@ public class NFAToDFAConverter {
 	 *  not tell us how to resolve anything.  So, if any NFA configuration
 	 *  in this DFA state does not have a semantic context, the alt cannot
 	 *  be resolved with a predicate.
+	 *
+	 *  If nonnull, incidentEdgeLabel tells us what NFA transition label
+	 *  we did a reach on to compute state d.  d may have insufficient
+	 *  preds, so we really want this for the error message.
 	 */
-	protected Map getPredicatesPerNonDeterministicAlt(DFAState d,
-													  Set nondeterministicAlts)
+	protected Map<Integer, SemanticContext> getPredicatesPerNonDeterministicAlt(DFAState d,
+																				Set nondeterministicAlts)
 	{
 		// map alt to combined SemanticContext
-		Map altToPredicateContextMap = new HashMap();
+		Map<Integer, SemanticContext> altToPredicateContextMap =
+			new HashMap<Integer, SemanticContext>();
 		// init the alt to predicate set map
-		Map altToSetOfContextsMap = new HashMap();
+		Map<Integer, Set<SemanticContext>> altToSetOfContextsMap =
+			new HashMap<Integer, Set<SemanticContext>>();
 		for (Iterator it = nondeterministicAlts.iterator(); it.hasNext();) {
 			Integer altI = (Integer) it.next();
-			altToSetOfContextsMap.put(altI, new HashSet());
+			altToSetOfContextsMap.put(altI, new HashSet<SemanticContext>());
 		}
-		Set altToIncompletePredicateContextSet = new HashSet();
-		Iterator iter = d.nfaConfigurations.iterator();
-		NFAConfiguration configuration;
+
+		/*
+		List<Label> sampleInputLabels = d.dfa.probe.getSampleNonDeterministicInputSequence(d);
+		String input = d.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
+		System.out.println("sample input: "+input);
+		*/
+
 		// for each configuration, create a unique set of predicates
 		// Also, track the alts with at least one uncovered configuration
 		// (one w/o a predicate); tracks tautologies like p1||true
-		while (iter.hasNext()) {
-			configuration = (NFAConfiguration) iter.next();
+		Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate = new HashMap<Integer, Set<Token>>();
+		Set<Integer> nondetAltsWithUncoveredConfiguration = new HashSet<Integer>();
+		//System.out.println("configs="+d.nfaConfigurations);
+		//System.out.println("configs with preds?"+d.atLeastOneConfigurationHasAPredicate);
+		//System.out.println("configs with preds="+d.configurationsWithPredicateEdges);
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
 			Integer altI = Utils.integer(configuration.alt);
 			// if alt is nondeterministic, combine its predicates
 			if ( nondeterministicAlts.contains(altI) ) {
@@ -1545,39 +1514,33 @@ public class NFAToDFAConverter {
 				if ( configuration.semanticContext !=
 					 SemanticContext.EMPTY_SEMANTIC_CONTEXT )
 				{
-					/*
-					SemanticContext altsExistingPred =(SemanticContext)
-							altToPredicateContextMap.get(Utils.integer(configuration.alt));
-					if ( altsExistingPred!=null ) {
-						// must merge all predicates from configs with same alt
-						SemanticContext combinedContext =
-								SemanticContext.or(
-										altsExistingPred,
-										configuration.semanticContext);
-						System.out.println(altsExistingPred+" OR "+
-										   configuration.semanticContext+
-										   "="+combinedContext);
-						altToPredicateContextMap.put(
-								Utils.integer(configuration.alt),
-								combinedContext
-						);
-					}
-					else {
-						// not seen before, just add it
-						altToPredicateContextMap.put(
-								Utils.integer(configuration.alt),
-								configuration.semanticContext
-						);
-					}
-					*/
-					Set predSet = (Set)altToSetOfContextsMap.get(altI);
+					Set<SemanticContext> predSet = altToSetOfContextsMap.get(altI);
 					predSet.add(configuration.semanticContext);
 				}
 				else {
 					// if no predicate, but it's part of nondeterministic alt
 					// then at least one path exists not covered by a predicate.
 					// must remove predicate for this alt; track incomplete alts
-					altToIncompletePredicateContextSet.add(altI);
+					nondetAltsWithUncoveredConfiguration.add(altI);
+					/*
+					NFAState s = dfa.nfa.getState(configuration.state);
+					System.out.println("###\ndec "+dfa.decisionNumber+" alt "+configuration.alt+
+									   " enclosing rule for nfa state not covered "+
+									   s.enclosingRule);
+					if ( s.associatedASTNode!=null ) {
+						System.out.println("token="+s.associatedASTNode.token);
+					}
+					System.out.println("nfa state="+s);
+
+					if ( s.incidentEdgeLabel!=null && Label.intersect(incidentEdgeLabel, s.incidentEdgeLabel) ) {
+						Set<Token> locations = altToLocationsReachableWithoutPredicate.get(altI);
+						if ( locations==null ) {
+							locations = new HashSet<Token>();
+							altToLocationsReachableWithoutPredicate.put(altI, locations);
+						}
+						locations.add(s.associatedASTNode.token);
+					}
+					*/
 				}
 			}
 		}
@@ -1587,20 +1550,18 @@ public class NFAToDFAConverter {
 		// Also, track the list of incompletely covered alts: those alts
 		// with at least 1 predicate and at least one configuration w/o a
 		// predicate. We want this in order to report to the decision probe.
-		List incompletelyCoveredAlts = new ArrayList();
+		List<Integer> incompletelyCoveredAlts = new ArrayList<Integer>();
 		for (Iterator it = nondeterministicAlts.iterator(); it.hasNext();) {
 			Integer altI = (Integer) it.next();
-			Set predSet = (Set)altToSetOfContextsMap.get(altI);
-			if ( altToIncompletePredicateContextSet.contains(altI) ) {
-				SemanticContext insufficientPred =(SemanticContext)
-						altToPredicateContextMap.get(altI);
-				if ( predSet.size()>0 ) {
-					incompletelyCoveredAlts.add(altI);
+			Set<SemanticContext> contextsForThisAlt = altToSetOfContextsMap.get(altI);
+			if ( nondetAltsWithUncoveredConfiguration.contains(altI) ) { // >= 1 config has no ctx
+				if ( contextsForThisAlt.size()>0 ) {    // && at least one pred
+					incompletelyCoveredAlts.add(altI);  // this alt incompleted covered
 				}
-				continue;
+				continue; // don't include at least 1 config has no ctx
 			}
 			SemanticContext combinedContext = null;
-			for (Iterator itrSet = predSet.iterator(); itrSet.hasNext();) {
+			for (Iterator itrSet = contextsForThisAlt.iterator(); itrSet.hasNext();) {
 				SemanticContext ctx = (SemanticContext) itrSet.next();
 				combinedContext =
 						SemanticContext.or(combinedContext,ctx);
@@ -1608,24 +1569,52 @@ public class NFAToDFAConverter {
 			altToPredicateContextMap.put(altI, combinedContext);
 		}
 
-		// remove any predicates from incompletely covered alts
-		/*
-		iter = altToIncompletePredicateContextSet.iterator();
-		List incompletelyCoveredAlts = new ArrayList();
-		while (iter.hasNext()) {
-			Integer alt = (Integer) iter.next();
-			SemanticContext insufficientPred =(SemanticContext)
-					altToPredicateContextMap.get(alt);
-			if ( insufficientPred!=null ) {
-				incompletelyCoveredAlts.add(alt);
-			}
-			altToPredicateContextMap.remove(alt);
-		}
-		*/
-
 		if ( incompletelyCoveredAlts.size()>0 ) {
+			/*
+			System.out.println("prob in dec "+dfa.decisionNumber+" state="+d);
+			FASerializer serializer = new FASerializer(dfa.nfa.grammar);
+			String result = serializer.serialize(dfa.startState);
+			System.out.println("dfa: "+result);
+			System.out.println("incomplete alts: "+incompletelyCoveredAlts);
+			System.out.println("nondet="+nondeterministicAlts);
+			System.out.println("nondetAltsWithUncoveredConfiguration="+ nondetAltsWithUncoveredConfiguration);
+			System.out.println("altToCtxMap="+altToSetOfContextsMap);
+			System.out.println("altToPredicateContextMap="+altToPredicateContextMap);
+			*/
+			for (int i = 0; i < numConfigs; i++) {
+				NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
+				Integer altI = Utils.integer(configuration.alt);
+				if ( incompletelyCoveredAlts.contains(altI) &&
+					 configuration.semanticContext == SemanticContext.EMPTY_SEMANTIC_CONTEXT )
+				{
+					NFAState s = dfa.nfa.getState(configuration.state);
+					/*
+					System.out.print("nondet config w/o context "+configuration+
+									 " incident "+(s.incidentEdgeLabel!=null?s.incidentEdgeLabel.toString(dfa.nfa.grammar):null));
+					if ( s.associatedASTNode!=null ) {
+						System.out.print(" token="+s.associatedASTNode.token);
+					}
+					else System.out.println();
+					*/
+                    // We want to report getting to an NFA state with an
+                    // incoming label, unless it's EOF, w/o a predicate.
+                    if ( s.incidentEdgeLabel!=null && s.incidentEdgeLabel.label != Label.EOF ) {
+                        if ( s.associatedASTNode==null || s.associatedASTNode.token==null ) {
+							ErrorManager.internalError("no AST/token for nonepsilon target w/o predicate");
+						}
+						else {
+							Set<Token> locations = altToLocationsReachableWithoutPredicate.get(altI);
+							if ( locations==null ) {
+								locations = new HashSet<Token>();
+								altToLocationsReachableWithoutPredicate.put(altI, locations);
+							}
+							locations.add(s.associatedASTNode.token);
+						}
+					}
+				}
+			}
 			dfa.probe.reportIncompletelyCoveredAlts(d,
-													incompletelyCoveredAlts);
+													altToLocationsReachableWithoutPredicate);
 		}
 
 		return altToPredicateContextMap;
@@ -1663,9 +1652,9 @@ public class NFAToDFAConverter {
 	protected void addPredicateTransitions(DFAState d) {
 		List configsWithPreds = new ArrayList();
 		// get a list of all configs with predicates
-		Iterator iter = d.getNFAConfigurations().iterator();
-		while ( iter.hasNext() ) {
-			NFAConfiguration c = (NFAConfiguration)iter.next();
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = (NFAConfiguration)d.nfaConfigurations.get(i);
 			if ( c.resolveWithPredicate ) {
 				configsWithPreds.add(c);
 			}
@@ -1694,6 +1683,7 @@ public class NFAToDFAConverter {
 												  c.context,
 												  c.semanticContext);
 				predDFATarget.setAcceptState(true);
+				dfa.setAcceptState(c.alt, predDFATarget);
 				DFAState existingState = dfa.addState(predDFATarget);
 				if ( predDFATarget != existingState ) {
 					// already there...use/return the existing DFA state that
@@ -1704,7 +1694,7 @@ public class NFAToDFAConverter {
 				}
 			}
 			// add a transition to pred target from d
-			d.addTransition(predDFATarget, new Label(c.semanticContext));
+			d.addTransition(predDFATarget, new PredicateLabel(c.semanticContext));
 		}
 	}
 
diff --git a/src/org/antlr/analysis/StateCluster.java b/tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java
similarity index 80%
copy from src/org/antlr/analysis/StateCluster.java
copy to tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java
index c31e9e2..885bdd9 100644
--- a/src/org/antlr/analysis/StateCluster.java
+++ b/tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,15 +27,12 @@
 */
 package org.antlr.analysis;
 
-/** A Cluster object points to the left/right (start and end) states of a
- *  state machine.  Used to build NFAs.
+/** Used to abort DFA construction when we find non-LL(*) decision; i.e.,
+ *  a decision that has recursion in more than a single alt.
  */
-public class StateCluster {
-    public NFAState left;
-    public NFAState right;
-
-    public StateCluster(NFAState left, NFAState right) {
-        this.left = left;
-        this.right = right;
-    }
+public class NonLLStarDecisionException extends RuntimeException {
+	public DFA abortedDFA;
+	public NonLLStarDecisionException(DFA abortedDFA) {
+		this.abortedDFA = abortedDFA;
+	}
 }
diff --git a/src/org/antlr/analysis/LookaheadSet.java b/tool/src/main/java/org/antlr/analysis/PredicateLabel.java
similarity index 54%
rename from src/org/antlr/analysis/LookaheadSet.java
rename to tool/src/main/java/org/antlr/analysis/PredicateLabel.java
index 8239e06..47595ed 100644
--- a/src/org/antlr/analysis/LookaheadSet.java
+++ b/tool/src/main/java/org/antlr/analysis/PredicateLabel.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,66 +27,59 @@
 */
 package org.antlr.analysis;
 
-import org.antlr.misc.IntervalSet;
-import org.antlr.misc.IntSet;
+import org.antlr.tool.GrammarAST;
 import org.antlr.tool.Grammar;
 
-/** An LL(1) lookahead set; contains a set of token types and a "hasEOF"
- *  condition when the set contains EOF.  Since EOF is -1 everywhere and -1
- *  cannot be stored in my BitSet, I set a condition here.  There may be other
- *  reasons in the future to abstract a LookaheadSet over a raw BitSet.
- */
-public class LookaheadSet {
-	public IntSet tokenTypeSet;
-	public boolean hasEOF;
-
-	public LookaheadSet() {
-		tokenTypeSet = new IntervalSet();
-	}
-
-	public LookaheadSet(IntSet s) {
-		this();
-		tokenTypeSet.addAll(s);
+public class PredicateLabel extends Label {
+	/** A tree of semantic predicates from the grammar AST if label==SEMPRED.
+	 *  In the NFA, labels will always be exactly one predicate, but the DFA
+	 *  may have to combine a bunch of them as it collects predicates from
+	 *  multiple NFA configurations into a single DFA state.
+	 */
+	protected SemanticContext semanticContext;
+	
+	/** Make a semantic predicate label */
+	public PredicateLabel(GrammarAST predicateASTNode) {
+		super(SEMPRED);
+		this.semanticContext = new SemanticContext.Predicate(predicateASTNode);
 	}
 
-	public LookaheadSet(int atom) {
-		tokenTypeSet = IntervalSet.of(atom);
+	/** Make a semantic predicates label */
+	public PredicateLabel(SemanticContext semCtx) {
+		super(SEMPRED);
+		this.semanticContext = semCtx;
 	}
 
-	public void orInPlace(LookaheadSet other) {
-		this.tokenTypeSet.addAll(other.tokenTypeSet);
-		this.hasEOF = this.hasEOF || other.hasEOF;
+	public int hashCode() {
+		return semanticContext.hashCode();
 	}
 
-	public boolean member(int a) {
-		return tokenTypeSet.member(a);
+	public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+		if ( this == o ) {
+			return true; // equals if same object
+		}
+		if ( !(o instanceof PredicateLabel) ) {
+			return false;
+		}
+		return semanticContext.equals(((PredicateLabel)o).semanticContext);
 	}
 
-	public void remove(int a) {
-		tokenTypeSet = tokenTypeSet.subtract(IntervalSet.of(a));
+	public boolean isSemanticPredicate() {
+		return true;
 	}
 
-	public String toString(Grammar g) {
-		if ( tokenTypeSet==null ) {
-			if ( hasEOF ) {
-				return "EOF";
-			}
-			return "";
-		}
-		String r = tokenTypeSet.toString(g);
-		if ( hasEOF ) {
-			return r+"+EOF";
-		}
-		return r;
+	public SemanticContext getSemanticContext() {
+		return semanticContext;
 	}
 
-	public static LookaheadSet EOF() {
-		LookaheadSet eof = new LookaheadSet();
-		eof.hasEOF = true;
-		return eof;
+	public String toString() {
+		return "{"+semanticContext+"}?";
 	}
 
-	public String toString() {
-		return toString(null);
+	public String toString(Grammar g) {
+		return toString();
 	}
 }
diff --git a/src/org/antlr/analysis/RuleClosureTransition.java b/tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java
similarity index 79%
rename from src/org/antlr/analysis/RuleClosureTransition.java
rename to tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java
index 2edc164..16fd26b 100644
--- a/src/org/antlr/analysis/RuleClosureTransition.java
+++ b/tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java
@@ -27,6 +27,9 @@
 */
 package org.antlr.analysis;
 
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Rule;
+
 /** A transition used to reference another rule.  It tracks two targets
  *  really: the actual transition target and the state following the
  *  state that refers to the other rule.  Conversion of an NFA that
@@ -34,27 +37,19 @@ package org.antlr.analysis;
  *  that rule because of these special transitions.
  */
 public class RuleClosureTransition extends Transition {
-    /** Index of rule targeted by this transition */
-    protected int ruleIndex;
+	/** Ptr to the rule definition object for this rule ref */
+	public Rule rule;
 
-    /** What node to begin computations following ref to rule */
-    protected NFAState followState;
+	/** What node to begin computations following ref to rule */
+    public NFAState followState;
 
-    public RuleClosureTransition(int ruleIndex,
-                             NFAState ruleStart,
-                             NFAState followState)
+    public RuleClosureTransition(Rule rule,
+								 NFAState ruleStart,
+								 NFAState followState)
     {
         super(Label.EPSILON, ruleStart);
-        this.ruleIndex = ruleIndex;
+        this.rule = rule;
         this.followState = followState;
-    }
-
-    public NFAState getFollowState() {
-        return followState;
-    }
-
-    public int getRuleIndex() {
-        return ruleIndex;
-    }
+	}
 }
 
diff --git a/src/org/antlr/analysis/SemanticContext.java b/tool/src/main/java/org/antlr/analysis/SemanticContext.java
similarity index 94%
rename from src/org/antlr/analysis/SemanticContext.java
rename to tool/src/main/java/org/antlr/analysis/SemanticContext.java
index 4468c17..3b9c2ae 100644
--- a/src/org/antlr/analysis/SemanticContext.java
+++ b/tool/src/main/java/org/antlr/analysis/SemanticContext.java
@@ -30,7 +30,7 @@ package org.antlr.analysis;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.stringtemplate.StringTemplateGroup;
 import org.antlr.codegen.CodeGenerator;
-import org.antlr.tool.ANTLRParser;
+import org.antlr.grammar.v2.ANTLRParser;
 import org.antlr.tool.GrammarAST;
 import org.antlr.tool.Grammar;
 import java.util.Set;
@@ -86,7 +86,7 @@ public abstract class SemanticContext {
 
 	public static class Predicate extends SemanticContext {
 		/** The AST node in tree created from the grammar holding the predicate */
-		protected GrammarAST predicate;
+		public GrammarAST predicateAST;
 
 		/** Is this a {...}?=> gating predicate or a normal disambiguating {..}?
 		 *  If any predicate in expression is gated, then expression is considered
@@ -113,12 +113,12 @@ public abstract class SemanticContext {
 		protected int constantValue = INVALID_PRED_VALUE;
 
 		public Predicate() {
-			predicate = new GrammarAST();
+			predicateAST = new GrammarAST();
 			this.gated=false;
 		}
 
 		public Predicate(GrammarAST predicate) {
-			this.predicate = predicate;
+			this.predicateAST = predicate;
 			this.gated =
 				predicate.getType()==ANTLRParser.GATED_SEMPRED ||
 				predicate.getType()==ANTLRParser.SYN_SEMPRED ;
@@ -128,7 +128,7 @@ public abstract class SemanticContext {
 		}
 
 		public Predicate(Predicate p) {
-			this.predicate = p.predicate;
+			this.predicateAST = p.predicateAST;
 			this.gated = p.gated;
 			this.synpred = p.synpred;
 			this.constantValue = p.constantValue;
@@ -143,14 +143,14 @@ public abstract class SemanticContext {
 			if ( !(o instanceof Predicate) ) {
 				return false;
 			}
-			return predicate.getText().equals(((Predicate)o).predicate.getText());
+			return predicateAST.getText().equals(((Predicate)o).predicateAST.getText());
 		}
 
 		public int hashCode() {
-			if ( predicate==null ) {
+			if ( predicateAST ==null ) {
 				return 0;
 			}
-			return predicate.getText().hashCode();
+			return predicateAST.getText().hashCode();
 		}
 
 		public StringTemplate genExpr(CodeGenerator generator,
@@ -166,7 +166,7 @@ public abstract class SemanticContext {
 					eST = templates.getInstanceOf("evalPredicate");
 					generator.grammar.decisionsWhoseDFAsUsesSemPreds.add(dfa);
 				}
-				String predEnclosingRuleName = predicate.getEnclosingRule();
+				String predEnclosingRuleName = predicateAST.enclosingRuleName;
 				/*
 				String decisionEnclosingRuleName =
 					dfa.getNFADecisionStartState().getEnclosingRule();
@@ -177,7 +177,7 @@ public abstract class SemanticContext {
 				//eST.setAttribute("pred", this.toString());
 				if ( generator!=null ) {
 					eST.setAttribute("pred",
-									 generator.translateAction(predEnclosingRuleName,predicate));
+									 generator.translateAction(predEnclosingRuleName,predicateAST));
 				}
 			}
 			else {
@@ -201,22 +201,22 @@ public abstract class SemanticContext {
 		}
 
 		public boolean isSyntacticPredicate() {
-			return predicate!=null &&
-				( predicate.getType()==ANTLRParser.SYN_SEMPRED ||
-				  predicate.getType()==ANTLRParser.BACKTRACK_SEMPRED );
+			return predicateAST !=null &&
+				( predicateAST.getType()==ANTLRParser.SYN_SEMPRED ||
+				  predicateAST.getType()==ANTLRParser.BACKTRACK_SEMPRED );
 		}
 
 		public void trackUseOfSyntacticPredicates(Grammar g) {
 			if ( synpred ) {
-				g.synPredNamesUsedInDFA.add(predicate.getText());
+				g.synPredNamesUsedInDFA.add(predicateAST.getText());
 			}
 		}
 
 		public String toString() {
-			if ( predicate==null ) {
+			if ( predicateAST ==null ) {
 				return "<nopred>";
 			}
-			return predicate.getText();
+			return predicateAST.getText();
 		}
 	}
 
@@ -430,6 +430,7 @@ public abstract class SemanticContext {
 	}
 
 	public static SemanticContext and(SemanticContext a, SemanticContext b) {
+		//System.out.println("AND: "+a+"&&"+b);
 		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null ) {
 			return b;
 		}
@@ -439,10 +440,12 @@ public abstract class SemanticContext {
 		if ( a.equals(b) ) {
 			return a; // if same, just return left one
 		}
+		//System.out.println("## have to AND");
 		return new AND(a,b);
 	}
 
 	public static SemanticContext or(SemanticContext a, SemanticContext b) {
+		//System.out.println("OR: "+a+"||"+b);
 		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null ) {
 			return b;
 		}
@@ -472,6 +475,7 @@ public abstract class SemanticContext {
 		else if ( a.equals(b) ) {
 			return a;
 		}
+		//System.out.println("## have to OR");
 		return new OR(a,b);
 	}
 
diff --git a/src/org/antlr/analysis/State.java b/tool/src/main/java/org/antlr/analysis/State.java
similarity index 100%
rename from src/org/antlr/analysis/State.java
rename to tool/src/main/java/org/antlr/analysis/State.java
diff --git a/src/org/antlr/analysis/StateCluster.java b/tool/src/main/java/org/antlr/analysis/StateCluster.java
similarity index 100%
rename from src/org/antlr/analysis/StateCluster.java
rename to tool/src/main/java/org/antlr/analysis/StateCluster.java
diff --git a/src/org/antlr/analysis/Transition.java b/tool/src/main/java/org/antlr/analysis/Transition.java
similarity index 96%
rename from src/org/antlr/analysis/Transition.java
rename to tool/src/main/java/org/antlr/analysis/Transition.java
index 041f2e5..bc74ecf 100644
--- a/src/org/antlr/analysis/Transition.java
+++ b/tool/src/main/java/org/antlr/analysis/Transition.java
@@ -51,9 +51,13 @@ public class Transition implements Comparable {
         this.target = target;
     }
 
-    public boolean isEpsilon() {
-        return label.isEpsilon();
-    }
+	public boolean isEpsilon() {
+		return label.isEpsilon();
+	}
+
+	public boolean isAction() {
+		return label.isAction();
+	}
 
     public boolean isSemanticPredicate() {
         return label.isSemanticPredicate();
diff --git a/src/org/antlr/codegen/ACyclicDFACodeGenerator.java b/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
similarity index 93%
rename from src/org/antlr/codegen/ACyclicDFACodeGenerator.java
rename to tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
index ee58c9a..2a198a4 100644
--- a/src/org/antlr/codegen/ACyclicDFACodeGenerator.java
+++ b/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
@@ -53,6 +53,7 @@ public class ACyclicDFACodeGenerator {
 			DFAState s,
 			int k)
 	{
+		//System.out.println("walk "+s.stateNumber+" in dfa for decision "+dfa.decisionNumber);
 		if ( s.isAcceptState() ) {
 			StringTemplate dfaST = templates.getInstanceOf("dfaAcceptState");
 			dfaST.setAttribute("alt", Utils.integer(s.getUniquelyPredictedAlt()));
@@ -82,19 +83,21 @@ public class ACyclicDFACodeGenerator {
 		dfaST.setAttribute("k", Utils.integer(k));
 		dfaST.setAttribute("stateNumber", Utils.integer(s.stateNumber));
 		dfaST.setAttribute("semPredState",
-							Boolean.valueOf(s.isResolvedWithPredicates()));
+						   Boolean.valueOf(s.isResolvedWithPredicates()));
+		/*
 		String description = dfa.getNFADecisionStartState().getDescription();
 		description = parentGenerator.target.getTargetStringLiteralFromString(description);
-		//System.out.println("DFA: "+description+" associated with AST "+decisionASTNode);
+		//System.out.println("DFA: "+description+" associated with AST "+dfa.getNFADecisionStartState());
 		if ( description!=null ) {
 			dfaST.setAttribute("description", description);
 		}
+		*/
 		int EOTPredicts = NFA.INVALID_ALT_NUMBER;
 		DFAState EOTTarget = null;
 		//System.out.println("DFA state "+s.stateNumber);
 		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
 			Transition edge = (Transition) s.transition(i);
-			//System.out.println("edge label "+edge.label.toString());
+			//System.out.println("edge "+s.stateNumber+"-"+edge.label.toString()+"->"+edge.target.stateNumber);
 			if ( edge.label.getAtom()==Label.EOT ) {
 				// don't generate a real edge for EOT; track alt EOT predicts
 				// generate that prediction in the else clause as default case
@@ -121,7 +124,7 @@ public class ACyclicDFACodeGenerator {
 			}
 			else { // else create an expression to evaluate (the general case)
 				edgeST.setAttribute("labelExpr",
-								parentGenerator.genLabelExpr(templates,edge,k));
+									parentGenerator.genLabelExpr(templates,edge,k));
 			}
 
 			// stick in any gated predicates for any edge if not already a pred
@@ -134,7 +137,7 @@ public class ACyclicDFACodeGenerator {
 					StringTemplate predST = preds.genExpr(parentGenerator,
 														  parentGenerator.getTemplates(),
 														  dfa);
-					edgeST.setAttribute("predicates", predST.toString());
+					edgeST.setAttribute("predicates", predST);
 				}
 			}
 
@@ -169,8 +172,9 @@ public class ACyclicDFACodeGenerator {
 				Transition predEdge = (Transition)EOTTarget.transition(i);
 				StringTemplate edgeST = templates.getInstanceOf(dfaEdgeName);
 				edgeST.setAttribute("labelExpr",
-							parentGenerator.genSemanticPredicateExpr(templates,predEdge));
+									parentGenerator.genSemanticPredicateExpr(templates,predEdge));
 				// the target must be an accept state
+				//System.out.println("EOT edge");
 				StringTemplate targetST =
 					walkFixedDFAGeneratingStateMachine(templates,
 													   dfa,
diff --git a/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java b/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java
new file mode 100644
index 0000000..f521e5f
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java
@@ -0,0 +1,134 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+public class ActionScriptTarget extends Target {
+
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+
+        int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+        return String.valueOf(c);
+    }
+
+    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
+                                            int ttype) {
+        // use ints for predefined types;
+        // <invalid> <EOR> <DOWN> <UP>
+        if (ttype >= 0 && ttype <= 3) {
+            return String.valueOf(ttype);
+        }
+
+        String name = generator.grammar.getTokenDisplayName(ttype);
+
+        // If name is a literal, return the token type instead
+        if (name.charAt(0) == '\'') {
+            return String.valueOf(ttype);
+        }
+
+        return name;
+    }
+
+    /**
+     * ActionScript doesn't support Unicode String literals that are considered "illegal"
+     * or are in the surrogate pair ranges.  For example "/uffff" will not encode properly
+     * nor will "/ud800".  To keep things as compact as possible we use the following encoding
+     * if the int is below 255, we encode as hex literal
+     * If the int is between 255 and 0x7fff we use a single unicode literal with the value
+     * If the int is above 0x7fff, we use a unicode literal of 0x80hh, where hh is the high-order
+     * bits followed by \xll where ll is the lower order bits of a 16-bit number.
+     *
+     * Ideally this should be improved at a future date.  The most optimal way to encode this
+     * may be a compressed AMF encoding that is embedded using an Embed tag in ActionScript.
+     *
+     * @param v
+     * @return
+     */
+    public String encodeIntAsCharEscape(int v) {
+        // encode as hex
+        if ( v<=255 ) {
+			return "\\x"+ Integer.toHexString(v|0x100).substring(1,3);
+		}
+        if (v <= 0x7fff) {
+            String hex = Integer.toHexString(v|0x10000).substring(1,5);
+		    return "\\u"+hex;
+        }
+        if (v > 0xffff) {
+            System.err.println("Warning: character literal out of range for ActionScript target " + v);
+            return "";
+        }
+        StringBuffer buf = new StringBuffer("\\u80");
+        buf.append(Integer.toHexString((v >> 8) | 0x100).substring(1, 3)); // high - order bits
+        buf.append("\\x");
+        buf.append(Integer.toHexString((v & 0xff) | 0x100).substring(1, 3)); // low -order bits
+        return buf.toString();
+    }
+
+    /** Convert long to two 32-bit numbers separted by a comma.
+     *  ActionScript does not support 64-bit numbers, so we need to break
+     *  the number into two 32-bit literals to give to the Bit.  A number like
+     *  0xHHHHHHHHLLLLLLLL is broken into the following string:
+     *  "0xLLLLLLLL, 0xHHHHHHHH"
+	 *  Note that the low order bits are first, followed by the high order bits.
+     *  This is to match how the BitSet constructor works, where the bits are
+     *  passed in in 32-bit chunks with low-order bits coming first.
+	 */
+	public String getTarget64BitStringFromValue(long word) {
+		StringBuffer buf = new StringBuffer(22); // enough for the two "0x", "," and " "
+		buf.append("0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word & 0x00000000ffffffffL)));
+        buf.append(", 0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word >> 32)));
+
+        return buf.toString();
+	}
+
+    private void writeHexWithPadding(StringBuffer buf, String digits) {
+       digits = digits.toUpperCase();
+		int padding = 8 - digits.length();
+		// pad left with zeros
+		for (int i=1; i<=padding; i++) {
+			buf.append('0');
+		}
+		buf.append(digits);
+    }
+
+    protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+                                                     CodeGenerator generator,
+                                                     Grammar grammar,
+                                                     StringTemplate recognizerST,
+                                                     StringTemplate cyclicDFAST) {
+        return recognizerST;
+    }
+}
+
diff --git a/src/org/antlr/codegen/CPPTarget.java b/tool/src/main/java/org/antlr/codegen/CPPTarget.java
similarity index 100%
rename from src/org/antlr/codegen/CPPTarget.java
rename to tool/src/main/java/org/antlr/codegen/CPPTarget.java
diff --git a/src/org/antlr/codegen/CSharpTarget.java b/tool/src/main/java/org/antlr/codegen/CSharp2Target.java
similarity index 86%
copy from src/org/antlr/codegen/CSharpTarget.java
copy to tool/src/main/java/org/antlr/codegen/CSharp2Target.java
index e1da9bd..05e4fd8 100644
--- a/src/org/antlr/codegen/CSharpTarget.java
+++ b/tool/src/main/java/org/antlr/codegen/CSharp2Target.java
@@ -1,46 +1,57 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2006 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.tool.Grammar;
-
-public class CSharpTarget extends Target 
-{
-	protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
-													 CodeGenerator generator,
-													 Grammar grammar,
-													 StringTemplate recognizerST,
-													 StringTemplate cyclicDFAST)
-	{
-		return recognizerST;
-	}
-}
-
+/*
+ [The "BSD licence"]
+ Copyright (c) 2006 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+public class CSharp2Target extends Target 
+{
+	protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+													 CodeGenerator generator,
+													 Grammar grammar,
+													 StringTemplate recognizerST,
+													 StringTemplate cyclicDFAST)
+	{
+		return recognizerST;
+	}
+
+	public String encodeIntAsCharEscape(int v)
+	{
+		if (v <= 127)
+		{
+			String hex1 = Integer.toHexString(v | 0x10000).substring(3, 5);
+			return "\\x" + hex1;
+		}
+		String hex = Integer.toHexString(v | 0x10000).substring(1, 5);
+		return "\\u" + hex;
+	}
+}
+
diff --git a/src/org/antlr/codegen/CSharpTarget.java b/tool/src/main/java/org/antlr/codegen/CSharpTarget.java
similarity index 85%
rename from src/org/antlr/codegen/CSharpTarget.java
rename to tool/src/main/java/org/antlr/codegen/CSharpTarget.java
index e1da9bd..ffcf2d9 100644
--- a/src/org/antlr/codegen/CSharpTarget.java
+++ b/tool/src/main/java/org/antlr/codegen/CSharpTarget.java
@@ -42,5 +42,16 @@ public class CSharpTarget extends Target
 	{
 		return recognizerST;
 	}
+
+	public String encodeIntAsCharEscape(int v)
+	{
+		if (v <= 127)
+		{
+			String hex1 = Integer.toHexString(v | 0x10000).substring(3, 5);
+			return "\\x" + hex1;
+		}
+		String hex = Integer.toHexString(v | 0x10000).substring(1, 5);
+		return "\\u" + hex;
+	}
 }
 
diff --git a/tool/src/main/java/org/antlr/codegen/CTarget.java b/tool/src/main/java/org/antlr/codegen/CTarget.java
new file mode 100644
index 0000000..f89d2f9
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/CTarget.java
@@ -0,0 +1,329 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+public class CTarget extends Target {
+
+    ArrayList strings = new ArrayList();
+
+    @Override
+    protected void genRecognizerFile(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            StringTemplate outputFileST)
+            throws IOException {
+
+        // Before we write this, and cause it to generate its string,
+        // we need to add all the string literals that we are going to match
+        //
+        outputFileST.setAttribute("literals", strings);
+        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+        generator.write(outputFileST, fileName);
+    }
+
+    @Override
+    protected void genRecognizerHeaderFile(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            StringTemplate headerFileST,
+            String extName)
+            throws IOException {
+        // Pick up the file name we are generating. This method will return a
+        // a file suffixed with .c, so we must substring and add the extName
+        // to it as we cannot assign into strings in Java.
+        ///
+        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+        fileName = fileName.substring(0, fileName.length() - 2) + extName;
+
+        generator.write(headerFileST, fileName);
+    }
+
+    protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            StringTemplate recognizerST,
+            StringTemplate cyclicDFAST) {
+        return recognizerST;
+    }
+
+    /** Is scope in @scope::name {action} valid for this kind of grammar?
+     *  Targets like C++ may want to allow new scopes like headerfile or
+     *  some such.  The action names themselves are not policed at the
+     *  moment so targets can add template actions w/o having to recompile
+     *  ANTLR.
+     */
+    @Override
+    public boolean isValidActionScope(int grammarType, String scope) {
+        switch (grammarType) {
+            case Grammar.LEXER:
+                if (scope.equals("lexer")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+            case Grammar.PARSER:
+                if (scope.equals("parser")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+            case Grammar.COMBINED:
+                if (scope.equals("parser")) {
+                    return true;
+                }
+                if (scope.equals("lexer")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+            case Grammar.TREE_PARSER:
+                if (scope.equals("treeparser")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+        }
+        return false;
+    }
+
+    @Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+
+        if (literal.startsWith("'\\u")) {
+            literal = "0x" + literal.substring(3, 7);
+        } else {
+            int c = literal.charAt(1);
+
+            if (c < 32 || c > 127) {
+                literal = "0x" + Integer.toHexString(c);
+            }
+        }
+
+        return literal;
+    }
+
+    /** Convert from an ANTLR string literal found in a grammar file to
+     *  an equivalent string literal in the C target.
+     *  Because we must support Unicode character sets and have chosen
+     *  to have the lexer match UTF32 characters, then we must encode
+     *  string matches to use 32 bit character arrays. Here then we
+     *  must produce the C array and cater for the case where the 
+     *  lexer has been encoded with a string such as 'xyz\n',
+     */
+    @Override
+    public String getTargetStringLiteralFromANTLRStringLiteral(
+            CodeGenerator generator,
+            String literal) {
+        int index;
+        String bytes;
+        StringBuffer buf = new StringBuffer();
+
+        buf.append("{ ");
+
+        // We need ot lose any escaped characters of the form \x and just
+        // replace them with their actual values as well as lose the surrounding
+        // quote marks.
+        //
+        for (int i = 1; i < literal.length() - 1; i++) {
+            buf.append("0x");
+
+            if (literal.charAt(i) == '\\') {
+                i++; // Assume that there is a next character, this will just yield
+                // invalid strings if not, which is what the input would be of course - invalid
+                switch (literal.charAt(i)) {
+                    case 'u':
+                    case 'U':
+                        buf.append(literal.substring(i + 1, i + 5));  // Already a hex string
+                        i = i + 5;                                // Move to next string/char/escape
+                        break;
+
+                    case 'n':
+                    case 'N':
+
+                        buf.append("0A");
+                        break;
+
+                    case 'r':
+                    case 'R':
+
+                        buf.append("0D");
+                        break;
+
+                    case 't':
+                    case 'T':
+
+                        buf.append("09");
+                        break;
+
+                    case 'b':
+                    case 'B':
+
+                        buf.append("08");
+                        break;
+
+                    case 'f':
+                    case 'F':
+
+                        buf.append("0C");
+                        break;
+
+                    default:
+
+                        // Anything else is what it is!
+                        //
+                        buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
+                        break;
+                }
+            } else {
+                buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
+            }
+            buf.append(", ");
+        }
+        buf.append(" ANTLR3_STRING_TERMINATOR}");
+
+        bytes = buf.toString();
+        index = strings.indexOf(bytes);
+
+        if (index == -1) {
+            strings.add(bytes);
+            index = strings.indexOf(bytes);
+        }
+
+        String strref = "lit_" + String.valueOf(index + 1);
+
+        return strref;
+    }
+
+    /**
+     * Overrides the standard grammar analysis so we can prepare the analyser
+     * a little differently from the other targets.
+     *
+     * In particular we want to influence the way the code generator makes assumptions about
+     * switchs vs ifs, vs table driven DFAs. In general, C code should be generated that
+     * has the minimum use of tables, and tha meximum use of large switch statements. This
+     * allows the optimizers to generate very efficient code, it can reduce object code size
+     * by about 30% and give about a 20% performance improvement over not doing this. Hence,
+     * for the C target only, we change the defaults here, but only if they are still set to the
+     * defaults.
+     *
+     * @param generator An instance of the generic code generator class.
+     * @param grammar The grammar that we are currently analyzing
+     */
+    @Override
+    protected void performGrammarAnalysis(CodeGenerator generator, Grammar grammar) {
+
+        // Check to see if the maximum inline DFA states is still set to
+        // the default size. If it is then whack it all the way up to the maximum that
+        // we can sensibly get away with.
+        //
+        if (CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE == CodeGenerator.MADSI_DEFAULT) {
+
+            CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = 65535;
+        }
+
+        // Check to see if the maximum switch size is still set to the default
+        // and bring it up much higher if it is. Modern C compilers can handle
+        // much bigger switch statements than say Java can and if anyone finds a compiler
+        // that cannot deal with such big switches, all the need do is generate the
+        // code with a reduced -Xmaxswitchcaselabels nnn
+        //
+        if  (CodeGenerator.MAX_SWITCH_CASE_LABELS == CodeGenerator.MSCL_DEFAULT) {
+
+            CodeGenerator.MAX_SWITCH_CASE_LABELS = 3000;
+        }
+
+        // Check to see if the number of transitions considered a miminum for using
+        // a switch is still at the default. Because a switch is still generally faster than
+        // an if even with small sets, and given that the optimizer will do the best thing with it
+        // anyway, then we simply want to generate a switch for any number of states.
+        //
+        if (CodeGenerator.MIN_SWITCH_ALTS == CodeGenerator.MSA_DEFAULT) {
+
+            CodeGenerator.MIN_SWITCH_ALTS = 1;
+        }
+
+        // Now we allow the superclass implementation to do whatever it feels it
+        // must do.
+        //
+        super.performGrammarAnalysis(generator, grammar);
+    }
+}
+
diff --git a/src/org/antlr/codegen/CodeGenerator.java b/tool/src/main/java/org/antlr/codegen/CodeGenerator.java
similarity index 82%
rename from src/org/antlr/codegen/CodeGenerator.java
rename to tool/src/main/java/org/antlr/codegen/CodeGenerator.java
index 3e3ed51..8839b58 100644
--- a/src/org/antlr/codegen/CodeGenerator.java
+++ b/tool/src/main/java/org/antlr/codegen/CodeGenerator.java
@@ -1,6 +1,6 @@
 /*
 [The "BSD licence"]
-Copyright (c) 2005-2006 Terence Parr
+Copyright (c) 2005-2007 Terence Parr
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -27,12 +27,13 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 package org.antlr.codegen;
 
+
+import antlr.ANTLRLexer;
 import antlr.RecognitionException;
 import antlr.TokenStreamRewriteEngine;
 import antlr.collections.AST;
 import org.antlr.Tool;
 import org.antlr.analysis.*;
-import org.antlr.misc.BitSet;
 import org.antlr.misc.*;
 import org.antlr.stringtemplate.*;
 import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
@@ -41,7 +42,14 @@ import org.antlr.tool.*;
 import java.io.IOException;
 import java.io.StringReader;
 import java.io.Writer;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.antlr.grammar.v2.*;
+import org.antlr.grammar.v3.ActionTranslator;
 
 /** ANTLR's code generator.
  *
@@ -73,11 +81,15 @@ public class CodeGenerator {
 	 *  limit will be hit only for lexers where wildcard in a UNICODE
 	 *  vocabulary environment would generate a SWITCH with 65000 labels.
 	 */
-	public int MAX_SWITCH_CASE_LABELS = 300;
-	public int MIN_SWITCH_ALTS = 3;
+        public final static int MSCL_DEFAULT = 300;
+	public static int MAX_SWITCH_CASE_LABELS = MSCL_DEFAULT;
+        public final static int MSA_DEFAULT = 3;
+	public static int MIN_SWITCH_ALTS = MSA_DEFAULT;
 	public boolean GENERATE_SWITCHES_WHEN_POSSIBLE = true;
-	public static boolean GEN_ACYCLIC_DFA_INLINE = true;
+	//public static boolean GEN_ACYCLIC_DFA_INLINE = true;
 	public static boolean EMIT_TEMPLATE_DELIMITERS = false;
+        public final static int MADSI_DEFAULT = 10;
+	public static int MAX_ACYCLIC_DFA_STATES_INLINE = 10;
 
 	public String classpathTemplateRootDirectoryName =
 		"org/antlr/codegen/templates";
@@ -196,7 +208,7 @@ public class CodeGenerator {
 		}
 
 		// dynamically add subgroups that act like filters to apply to
-		// their supergroup.  E.g., Java:Dbg:AST:ASTDbg.
+		// their supergroup.  E.g., Java:Dbg:AST:ASTParser::ASTDbg.
 		String outputOption = (String)grammar.getOption("output");
 		if ( outputOption!=null && outputOption.equals("AST") ) {
 			if ( debug && grammar.type!=Grammar.LEXER ) {
@@ -205,12 +217,36 @@ public class CodeGenerator {
 				baseTemplates = dbgTemplates;
 				StringTemplateGroup astTemplates =
 					StringTemplateGroup.loadGroup("AST",dbgTemplates);
+				StringTemplateGroup astParserTemplates = astTemplates;
+				//if ( !grammar.rewriteMode() ) {
+					if ( grammar.type==Grammar.TREE_PARSER ) {
+						astParserTemplates =
+							StringTemplateGroup.loadGroup("ASTTreeParser", astTemplates);
+					}
+					else {
+						astParserTemplates =
+							StringTemplateGroup.loadGroup("ASTParser", astTemplates);
+					}
+				//}
 				StringTemplateGroup astDbgTemplates =
-					StringTemplateGroup.loadGroup("ASTDbg", astTemplates);
+					StringTemplateGroup.loadGroup("ASTDbg", astParserTemplates);
 				templates = astDbgTemplates;
 			}
 			else {
-				templates = StringTemplateGroup.loadGroup("AST", coreTemplates);
+				StringTemplateGroup astTemplates =
+					StringTemplateGroup.loadGroup("AST", coreTemplates);
+				StringTemplateGroup astParserTemplates = astTemplates;
+				//if ( !grammar.rewriteMode() ) {
+					if ( grammar.type==Grammar.TREE_PARSER ) {
+						astParserTemplates =
+							StringTemplateGroup.loadGroup("ASTTreeParser", astTemplates);
+					}
+					else {
+						astParserTemplates =
+							StringTemplateGroup.loadGroup("ASTParser", astTemplates);
+					}
+				//}
+				templates = astParserTemplates;
 			}
 		}
 		else if ( outputOption!=null && outputOption.equals("template") ) {
@@ -220,10 +256,6 @@ public class CodeGenerator {
 				baseTemplates = dbgTemplates;
 				StringTemplateGroup stTemplates =
 					StringTemplateGroup.loadGroup("ST",dbgTemplates);
-				/*
-				StringTemplateGroup astDbgTemplates =
-					StringTemplateGroup.loadGroup("STDbg", astTemplates);
-				*/
 				templates = stTemplates;
 			}
 			else {
@@ -258,23 +290,20 @@ public class CodeGenerator {
 	 *  The target, such as JavaTarget, dictates which files get written.
 	 */
 	public StringTemplate genRecognizer() {
+		//System.out.println("### generate "+grammar.name+" recognizer");
 		// LOAD OUTPUT TEMPLATES
 		loadTemplates(language);
 		if ( templates==null ) {
 			return null;
 		}
 
-		// CHECK FOR LEFT RECURSION; Make sure we can actually do analysis
-		grammar.checkAllRulesForLeftRecursion();
-
-		// was there a severe problem while reading in grammar?
+		// CREATE NFA FROM GRAMMAR, CREATE DFA FROM NFA
 		if ( ErrorManager.doNotAttemptAnalysis() ) {
 			return null;
 		}
-
-		// CREATE NFA FROM GRAMMAR, CREATE DFA FROM NFA
 		target.performGrammarAnalysis(this, grammar);
 
+
 		// some grammar analysis errors will not yield reliable DFA
 		if ( ErrorManager.doNotAttemptCodeGen() ) {
 			return null;
@@ -299,10 +328,11 @@ public class CodeGenerator {
 
 		boolean filterMode = grammar.getOption("filter")!=null &&
 							  grammar.getOption("filter").equals("true");
-		boolean canBacktrack = grammar.getSyntacticPredicates()!=null ||
-							   filterMode;
+        boolean canBacktrack = grammar.getSyntacticPredicates()!=null ||
+                               grammar.composite.getRootGrammar().atLeastOneBacktrackOption ||
+                               filterMode;
 
-		// TODO: move this down further because generating the recognizer
+        // TODO: move this down further because generating the recognizer
 		// alters the model with info on who uses predefined properties etc...
 		// The actions here might refer to something.
 
@@ -313,24 +343,16 @@ public class CodeGenerator {
 		verifyActionScopesOkForTarget(actions);
 		// translate $x::y references
 		translateActionAttributeReferences(actions);
-		Map actionsForGrammarScope =
-			(Map)actions.get(grammar.getDefaultActionScope(grammar.type));
-		if ( filterMode &&
-			 (actionsForGrammarScope==null ||
-			 !actionsForGrammarScope.containsKey(Grammar.SYNPREDGATE_ACTION_NAME)) )
-		{
-			// if filtering, we need to set actions to execute at backtracking
-			// level 1 not 0.  Don't set this action if a user has though
-			StringTemplate gateST = templates.getInstanceOf("filteringActionGate");
-			if ( actionsForGrammarScope==null ) {
-				actionsForGrammarScope=new HashMap();
-				actions.put(grammar.getDefaultActionScope(grammar.type),
-							actionsForGrammarScope);
-			}
-			actionsForGrammarScope.put(Grammar.SYNPREDGATE_ACTION_NAME,
-									   gateST);
-		}
-		headerFileST.setAttribute("actions", actions);
+
+        StringTemplate gateST = templates.getInstanceOf("actionGate");
+        if ( filterMode ) {
+            // if filtering, we need to set actions to execute at backtracking
+            // level 1 not 0.
+            gateST = templates.getInstanceOf("filteringActionGate");
+        }
+        grammar.setSynPredGateIfNotAlready(gateST);
+
+        headerFileST.setAttribute("actions", actions);
 		outputFileST.setAttribute("actions", actions);
 
 		headerFileST.setAttribute("buildTemplate", new Boolean(grammar.buildTemplate()));
@@ -338,20 +360,22 @@ public class CodeGenerator {
 		headerFileST.setAttribute("buildAST", new Boolean(grammar.buildAST()));
 		outputFileST.setAttribute("buildAST", new Boolean(grammar.buildAST()));
 
-		String rewrite = (String)grammar.getOption("rewrite");
-		outputFileST.setAttribute("rewrite",
-								  Boolean.valueOf(rewrite!=null&&rewrite.equals("true")));
-		headerFileST.setAttribute("rewrite",
-								  Boolean.valueOf(rewrite!=null&&rewrite.equals("true")));
+		outputFileST.setAttribute("rewriteMode", Boolean.valueOf(grammar.rewriteMode()));
+		headerFileST.setAttribute("rewriteMode", Boolean.valueOf(grammar.rewriteMode()));
 
 		outputFileST.setAttribute("backtracking", Boolean.valueOf(canBacktrack));
 		headerFileST.setAttribute("backtracking", Boolean.valueOf(canBacktrack));
+		// turn on memoize attribute at grammar level so we can create ruleMemo.
+		// each rule has memoize attr that hides this one, indicating whether
+		// it needs to save results
 		String memoize = (String)grammar.getOption("memoize");
 		outputFileST.setAttribute("memoize",
-								  Boolean.valueOf(memoize!=null&&memoize.equals("true")&&
+								  (grammar.atLeastOneRuleMemoizes||
+								  Boolean.valueOf(memoize!=null&&memoize.equals("true"))&&
 									          canBacktrack));
 		headerFileST.setAttribute("memoize",
-								  Boolean.valueOf(memoize!=null&&memoize.equals("true")&&
+								  (grammar.atLeastOneRuleMemoizes||
+								  Boolean.valueOf(memoize!=null&&memoize.equals("true"))&&
 									          canBacktrack));
 
 
@@ -380,6 +404,8 @@ public class CodeGenerator {
 			recognizerST = templates.getInstanceOf("treeParser");
 			outputFileST.setAttribute("TREE_PARSER", Boolean.valueOf(true));
 			headerFileST.setAttribute("TREE_PARSER", Boolean.valueOf(true));
+            recognizerST.setAttribute("filterMode",
+                                      Boolean.valueOf(filterMode));
 		}
 		outputFileST.setAttribute("recognizer", recognizerST);
 		headerFileST.setAttribute("recognizer", recognizerST);
@@ -392,8 +418,8 @@ public class CodeGenerator {
 			target.getTargetStringLiteralFromString(grammar.getFileName());
 		outputFileST.setAttribute("fileName", targetAppropriateFileNameString);
 		headerFileST.setAttribute("fileName", targetAppropriateFileNameString);
-		outputFileST.setAttribute("ANTLRVersion", Tool.VERSION);
-		headerFileST.setAttribute("ANTLRVersion", Tool.VERSION);
+		outputFileST.setAttribute("ANTLRVersion", tool.VERSION);
+		headerFileST.setAttribute("ANTLRVersion", tool.VERSION);
 		outputFileST.setAttribute("generatedTimestamp", Tool.getCurrentTimeStamp());
 		headerFileST.setAttribute("generatedTimestamp", Tool.getCurrentTimeStamp());
 
@@ -413,6 +439,7 @@ public class CodeGenerator {
 			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
 							   re);
 		}
+
 		genTokenTypeConstants(recognizerST);
 		genTokenTypeConstants(outputFileST);
 		genTokenTypeConstants(headerFileST);
@@ -479,7 +506,7 @@ public class CodeGenerator {
 				ErrorManager.grammarError(
 					ErrorManager.MSG_INVALID_ACTION_SCOPE,grammar,
 					actionAST.getToken(),scope,
-					Grammar.grammarTypeToString[grammar.type]);
+					grammar.getGrammarTypeString());
 			}
 		}
 	}
@@ -497,7 +524,7 @@ public class CodeGenerator {
 	}
 
 	/** Use for translating rule @init{...} actions that have no scope */
-	protected void translateActionAttributeReferencesForSingleScope(
+	public void translateActionAttributeReferencesForSingleScope(
 		Rule r,
 		Map scopeActions)
 	{
@@ -537,22 +564,30 @@ public class CodeGenerator {
 									String enclosingRuleName,
 									int elementIndex)
 	{
-		NFAState followingNFAState = referencedElementNode.followingNFAState;
-/*
-		System.out.print("compute FOLLOW "+referencedElementNode.toString()+
+		/*
+		System.out.println("compute FOLLOW "+grammar.name+"."+referencedElementNode.toString()+
 						 " for "+referencedElementName+"#"+elementIndex +" in "+
 						 enclosingRuleName+
 						 " line="+referencedElementNode.getLine());
-*/
+						 */
+		NFAState followingNFAState = referencedElementNode.followingNFAState;
 		LookaheadSet follow = null;
 		if ( followingNFAState!=null ) {
-			follow = grammar.LOOK(followingNFAState);
+			// compute follow for this element and, as side-effect, track
+			// the rule LOOK sensitivity.
+			follow = grammar.FIRST(followingNFAState);
 		}
 
 		if ( follow==null ) {
 			ErrorManager.internalError("no follow state or cannot compute follow");
 			follow = new LookaheadSet();
 		}
+		if ( follow.member(Label.EOF) ) {
+			// TODO: can we just remove?  Seems needed here:
+			// compilation_unit : global_statement* EOF
+			// Actually i guess we resync to EOF regardless
+			follow.remove(Label.EOF);
+		}
 		//System.out.println(" "+follow);
 
         List tokenTypeList = null;
@@ -866,41 +901,143 @@ public class CodeGenerator {
 		if ( actionTree.getType()==ANTLRParser.ARG_ACTION ) {
 			return translateArgAction(ruleName, actionTree);
 		}
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(this,ruleName,actionTree);
+		ActionTranslator translator = new ActionTranslator(this,ruleName,actionTree);
 		List chunks = translator.translateToChunks();
 		chunks = target.postProcessAction(chunks, actionTree.token);
 		return chunks;
 	}
 
 	/** Translate an action like [3,"foo",a[3]] and return a List of the
-	 *  translated actions.  Because actions are translated to a list of
-	 *  chunks, this returns List<List<String|StringTemplate>>.
-	 *
-	 *  Simple ',' separator is assumed.
+	 *  translated actions.  Because actions are themselves translated to a list
+	 *  of chunks, must cat together into a StringTemplate>.  Don't translate
+	 *  to strings early as we need to eval templates in context.
 	 */
-	public List translateArgAction(String ruleName,
-								   GrammarAST actionTree)
+	public List<StringTemplate> translateArgAction(String ruleName,
+										   GrammarAST actionTree)
 	{
 		String actionText = actionTree.token.getText();
-		StringTokenizer argTokens = new StringTokenizer(actionText, ",");
-		List args = new ArrayList();
-		while ( argTokens.hasMoreTokens() ) {
-			String arg = (String)argTokens.nextToken();
-			antlr.Token actionToken = new antlr.CommonToken(ANTLRParser.ACTION,arg);
-			ActionTranslatorLexer translator =
-				new ActionTranslatorLexer(this,ruleName,
-										  actionToken,
-										  actionTree.outerAltNum);
-			List chunks = translator.translateToChunks();
-			chunks = target.postProcessAction(chunks, actionToken);
-			args.add(chunks);
+		List<String> args = getListOfArgumentsFromAction(actionText,',');
+		List<StringTemplate> translatedArgs = new ArrayList<StringTemplate>();
+		for (String arg : args) {
+			if ( arg!=null ) {
+				antlr.Token actionToken =
+					new antlr.CommonToken(ANTLRParser.ACTION,arg);
+				ActionTranslator translator =
+					new ActionTranslator(this,ruleName,
+											  actionToken,
+											  actionTree.outerAltNum);
+				List chunks = translator.translateToChunks();
+				chunks = target.postProcessAction(chunks, actionToken);
+				StringTemplate catST = new StringTemplate(templates, "<chunks>");
+				catST.setAttribute("chunks", chunks);
+				templates.createStringTemplate();
+				translatedArgs.add(catST);
+			}
 		}
-		if ( args.size()==0 ) {
+		if ( translatedArgs.size()==0 ) {
 			return null;
 		}
+		return translatedArgs;
+	}
+
+	public static List<String> getListOfArgumentsFromAction(String actionText,
+															int separatorChar)
+	{
+		List<String> args = new ArrayList<String>();
+		getListOfArgumentsFromAction(actionText, 0, -1, separatorChar, args);
 		return args;
 	}
 
+	/** Given an arg action like
+	 *
+	 *  [x, (*a).foo(21,33), 3.2+1, '\n',
+	 *  "a,oo\nick", {bl, "fdkj"eck}, ["cat\n,", x, 43]]
+	 *
+	 *  convert to a list of arguments.  Allow nested square brackets etc...
+	 *  Set separatorChar to ';' or ',' or whatever you want.
+	 */
+	public static int getListOfArgumentsFromAction(String actionText,
+												   int start,
+												   int targetChar,
+												   int separatorChar,
+												   List<String> args)
+	{
+		if ( actionText==null ) {
+			return -1;
+		}
+		actionText = actionText.replaceAll("//.*\n", "");
+		int n = actionText.length();
+		//System.out.println("actionText@"+start+"->"+(char)targetChar+"="+actionText.substring(start,n));
+		int p = start;
+		int last = p;
+		while ( p<n && actionText.charAt(p)!=targetChar ) {
+			int c = actionText.charAt(p);
+			switch ( c ) {
+				case '\'' :
+					p++;
+					while ( p<n && actionText.charAt(p)!='\'' ) {
+						if ( actionText.charAt(p)=='\\' && (p+1)<n &&
+							 actionText.charAt(p+1)=='\'' )
+						{
+							p++; // skip escaped quote
+						}
+						p++;
+					}
+					p++;
+					break;
+				case '"' :
+					p++;
+					while ( p<n && actionText.charAt(p)!='\"' ) {
+						if ( actionText.charAt(p)=='\\' && (p+1)<n &&
+							 actionText.charAt(p+1)=='\"' )
+						{
+							p++; // skip escaped quote
+						}
+						p++;
+					}
+					p++;
+					break;
+				case '(' :
+					p = getListOfArgumentsFromAction(actionText,p+1,')',separatorChar,args);
+					break;
+				case '{' :
+					p = getListOfArgumentsFromAction(actionText,p+1,'}',separatorChar,args);
+					break;
+				case '<' :
+					if ( actionText.indexOf('>',p+1)>=p ) {
+						// do we see a matching '>' ahead?  if so, hope it's a generic
+						// and not less followed by expr with greater than
+						p = getListOfArgumentsFromAction(actionText,p+1,'>',separatorChar,args);
+					}
+					else {
+						p++; // treat as normal char
+					}
+					break;
+				case '[' :
+					p = getListOfArgumentsFromAction(actionText,p+1,']',separatorChar,args);
+					break;
+				default :
+					if ( c==separatorChar && targetChar==-1 ) {
+						String arg = actionText.substring(last, p);
+						//System.out.println("arg="+arg);
+						args.add(arg.trim());
+						last = p+1;
+					}
+					p++;
+					break;
+			}
+		}
+		if ( targetChar==-1 && p<=n ) {
+			String arg = actionText.substring(last, p).trim();
+			//System.out.println("arg="+arg);
+			if ( arg.length()>0 ) {
+				args.add(arg.trim());
+			}
+		}
+		p++;
+		return p;
+	}
+
 	/** Given a template constructor action like %foo(a={...}) in
 	 *  an action, translate it to the appropriate template constructor
 	 *  from the templateLib. This translates a *piece* of the action.
@@ -912,7 +1049,7 @@ public class CodeGenerator {
 	{
 		// first, parse with antlr.g
 		//System.out.println("translate template: "+templateActionText);
-		ANTLRLexer lexer = new ANTLRLexer(new StringReader(templateActionText));
+		org.antlr.grammar.v2.ANTLRLexer lexer = new org.antlr.grammar.v2.ANTLRLexer(new StringReader(templateActionText));
 		lexer.setFilename(grammar.getFileName());
 		lexer.setTokenObjectClass("antlr.TokenWithIndex");
 		TokenStreamRewriteEngine tokenBuffer = new TokenStreamRewriteEngine(lexer);
@@ -940,8 +1077,8 @@ public class CodeGenerator {
 		// then translate via codegen.g
 		CodeGenTreeWalker gen = new CodeGenTreeWalker();
 		gen.init(grammar);
-		gen.currentRuleName = ruleName;
-		gen.outerAltNum = outerAltNum;
+		gen.setCurrentRuleName(ruleName);
+		gen.setOuterAltNum(outerAltNum);
 		StringTemplate st = null;
 		try {
 			st = gen.rewrite_template((AST)rewriteTree);
@@ -1107,10 +1244,22 @@ public class CodeGenerator {
 		return outputFileST;
 	}
 
+	/** Generate TParser.java and TLexer.java from T.g if combined, else
+	 *  just use T.java as output regardless of type.
+	 */
 	public String getRecognizerFileName(String name, int type) {
 		StringTemplate extST = templates.getInstanceOf("codeFileExtension");
-		String suffix = Grammar.grammarTypeToFileNameSuffix[type];
+		String recognizerName = grammar.getRecognizerName();
+		return recognizerName+extST.toString();
+		/*
+		String suffix = "";
+		if ( type==Grammar.COMBINED ||
+			 (type==Grammar.LEXER && !grammar.implicitLexer) )
+		{
+			suffix = Grammar.grammarTypeToFileNameSuffix[type];
+		}
 		return name+suffix+extST.toString();
+		*/
 	}
 
 	/** What is the name of the vocab file generated for this grammar?
diff --git a/tool/src/main/java/org/antlr/codegen/DelphiTarget.java b/tool/src/main/java/org/antlr/codegen/DelphiTarget.java
new file mode 100644
index 0000000..efe1755
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/DelphiTarget.java
@@ -0,0 +1,147 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2008 Erik van Bilsen
+ Copyright (c) 2006 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.tool.Grammar;
+import org.antlr.misc.Utils; 
+import org.antlr.analysis.Label;
+
+public class DelphiTarget extends Target 
+{
+  public DelphiTarget() { 
+    targetCharValueEscape['\n'] = "'#10'";    
+    targetCharValueEscape['\r'] = "'#13'";    
+    targetCharValueEscape['\t'] = "'#9'";   
+    targetCharValueEscape['\b'] = "\\b";    
+    targetCharValueEscape['\f'] = "\\f";    
+    targetCharValueEscape['\\'] = "\\";   
+    targetCharValueEscape['\''] = "''";   
+    targetCharValueEscape['"'] = "'";
+  } 
+
+  protected StringTemplate chooseWhereCyclicDFAsGo(Tool tool,
+                           CodeGenerator generator,
+                           Grammar grammar,
+                           StringTemplate recognizerST,
+                           StringTemplate cyclicDFAST)
+  {
+    return recognizerST;
+  }
+
+  public String encodeIntAsCharEscape(int v)
+  {
+    if (v <= 127)
+    {
+      String hex1 = Integer.toHexString(v | 0x10000).substring(3, 5);
+      return "'#$" + hex1 + "'";
+    }
+    String hex = Integer.toHexString(v | 0x10000).substring(1, 5);
+    return "'#$" + hex + "'";
+  }
+  
+  public String getTargetCharLiteralFromANTLRCharLiteral(
+    CodeGenerator generator,
+    String literal)
+  {
+    StringBuffer buf = new StringBuffer();
+    int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+    if ( c<Label.MIN_CHAR_VALUE ) {
+      return "0";
+    }
+    // normal char
+    buf.append(c);
+
+    return buf.toString();
+  } 
+
+  public String getTargetStringLiteralFromString(String s, boolean quoted) {
+    if ( s==null ) {
+      return null;
+    }
+    StringBuffer buf = new StringBuffer();
+    if ( quoted ) {
+      buf.append('\'');
+    }
+    for (int i=0; i<s.length(); i++) {
+      int c = s.charAt(i);
+      if ( c!='"' && // don't escape double quotes in strings for Delphi
+         c<targetCharValueEscape.length &&
+         targetCharValueEscape[c]!=null )
+      {
+        buf.append(targetCharValueEscape[c]);
+      }
+      else {
+        buf.append((char)c);
+      }
+      if ((i & 127) == 127)
+      {
+        // Concatenate string literals because Delphi doesn't support literals over 255 characters,
+        // and the code editor doesn't support lines over 1023 characters
+        buf.append("\' + \r\n  \'");
+      }
+    }
+    if ( quoted ) {
+      buf.append('\'');
+    }
+    return buf.toString();
+  }
+
+  public String getTargetStringLiteralFromANTLRStringLiteral(
+    CodeGenerator generator,
+    String literal)
+  {
+    literal = Utils.replace(literal,"\\\'","''"); // \' to ' to normalize
+    literal = Utils.replace(literal,"\\r\\n","'#13#10'"); 
+    literal = Utils.replace(literal,"\\r","'#13'"); 
+    literal = Utils.replace(literal,"\\n","'#10'"); 
+    StringBuffer buf = new StringBuffer(literal);
+    buf.setCharAt(0,'\'');
+    buf.setCharAt(literal.length()-1,'\'');
+    return buf.toString();
+  }
+   
+  public String getTarget64BitStringFromValue(long word) {
+    int numHexDigits = 8*2;
+    StringBuffer buf = new StringBuffer(numHexDigits+2);
+    buf.append("$");
+    String digits = Long.toHexString(word);
+    digits = digits.toUpperCase();
+    int padding = numHexDigits - digits.length();
+    // pad left with zeros
+    for (int i=1; i<=padding; i++) {
+      buf.append('0');
+    }
+    buf.append(digits);
+    return buf.toString();
+  }
+
+}
\ No newline at end of file
diff --git a/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java b/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java
new file mode 100644
index 0000000..7b770fc
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java
@@ -0,0 +1,47 @@
+package org.antlr.codegen;
+import java.util.*;
+
+public class JavaScriptTarget extends Target {
+    /** Convert an int to a JavaScript Unicode character literal.
+     *
+     *  The current JavaScript spec (ECMA-262) doesn't provide for octal
+     *  notation in String literals, although some implementations support it.
+     *  This method overrides the parent class so that characters will always
+     *  be encoded as Unicode literals (e.g. \u0011).
+     */
+    public String encodeIntAsCharEscape(int v) {
+        String hex = Integer.toHexString(v|0x10000).substring(1,5);
+        return "\\u"+hex;
+    }
+
+    /** Convert long to two 32-bit numbers separted by a comma.
+     *  JavaScript does not support 64-bit numbers, so we need to break
+     *  the number into two 32-bit literals to give to the Bit.  A number like
+     *  0xHHHHHHHHLLLLLLLL is broken into the following string:
+     *  "0xLLLLLLLL, 0xHHHHHHHH"
+     *  Note that the low order bits are first, followed by the high order bits.
+     *  This is to match how the BitSet constructor works, where the bits are
+     *  passed in in 32-bit chunks with low-order bits coming first.
+     *
+     *  Note: stole the following two methods from the ActionScript target.
+     */
+    public String getTarget64BitStringFromValue(long word) {
+        StringBuffer buf = new StringBuffer(22); // enough for the two "0x", "," and " "
+        buf.append("0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word & 0x00000000ffffffffL)));
+        buf.append(", 0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word >> 32)));
+
+        return buf.toString();
+    }
+
+    private void writeHexWithPadding(StringBuffer buf, String digits) {
+        digits = digits.toUpperCase();
+        int padding = 8 - digits.length();
+        // pad left with zeros
+        for (int i=1; i<=padding; i++) {
+            buf.append('0');
+        }
+        buf.append(digits);
+    }
+}
diff --git a/src/org/antlr/codegen/JavaTarget.java b/tool/src/main/java/org/antlr/codegen/JavaTarget.java
similarity index 100%
rename from src/org/antlr/codegen/JavaTarget.java
rename to tool/src/main/java/org/antlr/codegen/JavaTarget.java
diff --git a/src/org/antlr/codegen/ObjCTarget.java b/tool/src/main/java/org/antlr/codegen/ObjCTarget.java
similarity index 100%
rename from src/org/antlr/codegen/ObjCTarget.java
rename to tool/src/main/java/org/antlr/codegen/ObjCTarget.java
diff --git a/tool/src/main/java/org/antlr/codegen/Perl5Target.java b/tool/src/main/java/org/antlr/codegen/Perl5Target.java
new file mode 100644
index 0000000..e466f82
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/Perl5Target.java
@@ -0,0 +1,92 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007 Ronald Blaschke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.codegen;
+
+import org.antlr.analysis.Label;
+import org.antlr.tool.AttributeScope;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.RuleLabelScope;
+
+public class Perl5Target extends Target {
+    public Perl5Target() {
+        targetCharValueEscape['$'] = "\\$";
+        targetCharValueEscape['@'] = "\\@";
+        targetCharValueEscape['%'] = "\\%";
+        AttributeScope.tokenScope.addAttribute("self", null);
+        RuleLabelScope.predefinedLexerRulePropertiesScope.addAttribute("self", null);
+    }
+
+    public String getTargetCharLiteralFromANTLRCharLiteral(final CodeGenerator generator,
+                                                           final String literal) {
+        final StringBuffer buf = new StringBuffer(10);
+
+        final int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+        if (c < Label.MIN_CHAR_VALUE) {
+            buf.append("\\x{0000}");
+        } else if (c < targetCharValueEscape.length &&
+                targetCharValueEscape[c] != null) {
+            buf.append(targetCharValueEscape[c]);
+        } else if (Character.UnicodeBlock.of((char) c) ==
+                Character.UnicodeBlock.BASIC_LATIN &&
+                !Character.isISOControl((char) c)) {
+            // normal char
+            buf.append((char) c);
+        } else {
+            // must be something unprintable...use \\uXXXX
+            // turn on the bit above max "\\uFFFF" value so that we pad with zeros
+            // then only take last 4 digits
+            String hex = Integer.toHexString(c | 0x10000).toUpperCase().substring(1, 5);
+            buf.append("\\x{");
+            buf.append(hex);
+            buf.append("}");
+        }
+
+        if (buf.indexOf("\\") == -1) {
+            // no need for interpolation, use single quotes
+            buf.insert(0, '\'');
+            buf.append('\'');
+        } else {
+            // need string interpolation
+            buf.insert(0, '\"');
+            buf.append('\"');
+        }
+
+        return buf.toString();
+    }
+
+    public String encodeIntAsCharEscape(final int v) {
+        final int intValue;
+        if ((v & 0x8000) == 0) {
+            intValue = v;
+        } else {
+            intValue = -(0x10000 - v);
+        }
+
+        return String.valueOf(intValue);
+    }
+}
diff --git a/src/org/antlr/codegen/PythonTarget.java b/tool/src/main/java/org/antlr/codegen/PythonTarget.java
similarity index 98%
rename from src/org/antlr/codegen/PythonTarget.java
rename to tool/src/main/java/org/antlr/codegen/PythonTarget.java
index 2d095f6..c2a3ffb 100644
--- a/src/org/antlr/codegen/PythonTarget.java
+++ b/tool/src/main/java/org/antlr/codegen/PythonTarget.java
@@ -34,6 +34,7 @@ full of WTFs - though IMHO Java is the Real WTF(TM) here...
  */
 
 package org.antlr.codegen;
+import org.antlr.tool.Grammar;
 import java.util.*;
 
 public class PythonTarget extends Target {
@@ -59,7 +60,8 @@ public class PythonTarget extends Target {
     public String getTargetCharLiteralFromANTLRCharLiteral(
             CodeGenerator generator,
             String literal) {
-	return "u" + literal;
+	int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+	return String.valueOf(c);
     }
 
     private List splitLines(String text) {
diff --git a/src/org/antlr/codegen/RubyTarget.java b/tool/src/main/java/org/antlr/codegen/RubyTarget.java
similarity index 100%
rename from src/org/antlr/codegen/RubyTarget.java
rename to tool/src/main/java/org/antlr/codegen/RubyTarget.java
diff --git a/src/org/antlr/codegen/Target.java b/tool/src/main/java/org/antlr/codegen/Target.java
similarity index 79%
rename from src/org/antlr/codegen/Target.java
rename to tool/src/main/java/org/antlr/codegen/Target.java
index 2901f4e..9255e6c 100644
--- a/src/org/antlr/codegen/Target.java
+++ b/tool/src/main/java/org/antlr/codegen/Target.java
@@ -108,7 +108,7 @@ public class Target {
 										  Grammar grammar)
 	{
 		// Build NFAs from the grammar AST
-		grammar.createNFAs();
+		grammar.buildNFA();
 
 		// Create the DFA predictors for each decision
 		grammar.createLookaheadDFAs();
@@ -198,16 +198,73 @@ public class Target {
 	 *  is the translation 'a\n"' -> "a\n\"".  Expect single quotes
 	 *  around the incoming literal.  Just flip the quotes and replace
 	 *  double quotes with \"
+     * 
+     *  Note that we have decided to allow poeple to use '\"' without
+     *  penalty, so we must build the target string in a loop as Utils.replae
+     *  cannot handle both \" and " without a lot of messing around.
+     * 
 	 */
 	public String getTargetStringLiteralFromANTLRStringLiteral(
 		CodeGenerator generator,
 		String literal)
 	{
-		literal = Utils.replace(literal,"\"","\\\"");
-		StringBuffer buf = new StringBuffer(literal);
-		buf.setCharAt(0,'"');
-		buf.setCharAt(literal.length()-1,'"');
-		return buf.toString();
+        StringBuilder sb = new StringBuilder();
+        StringBuffer is = new StringBuffer(literal);
+        
+        // Opening quote
+        //
+        sb.append('"');
+        
+        for (int i = 1; i < is.length() -1; i++) {
+            if  (is.charAt(i) == '\\') {
+                // Anything escaped is what it is! We assume that
+                // people know how to escape characters correctly. However
+                // we catch anything that does not need an escape in Java (which
+                // is what the default implementation is dealing with and remove 
+                // the escape. The C target does this for instance.
+                //
+                switch (is.charAt(i+1)) {
+                    // Pass through any escapes that Java also needs
+                    //
+                    case    '"':
+                    case    'n':
+                    case    'r':
+                    case    't':
+                    case    'b':
+                    case    'f':
+                    case    '\\':
+                    case    'u':    // Assume unnnn
+                        sb.append('\\');    // Pass the escape through
+                        break;
+                    default:
+                        // Remove the escape by virtue of not adding it here
+                        // Thus \' becomes ' and so on
+                        //
+                        break;
+                }
+                
+                // Go past the \ character
+                //
+                i++;
+            } else {
+                // Chracters that don't need \ in ANTLR 'strings' but do in Java
+                //
+                if (is.charAt(i) == '"') {
+                    // We need to escape " in Java
+                    //
+                    sb.append('\\');
+                }
+            }
+            // Add in the next character, which may have been escaped
+            //
+            sb.append(is.charAt(i));   
+        }
+        
+        // Append closing " and return
+        //
+        sb.append('"');
+        
+		return sb.toString();
 	}
 
 	/** Given a random string of Java unicode chars, return a new string with
@@ -233,6 +290,7 @@ public class Target {
 		if ( s==null ) {
 			return null;
 		}
+
 		StringBuffer buf = new StringBuffer();
 		if ( quoted ) {
 			buf.append('"');
@@ -277,6 +335,14 @@ public class Target {
 		return buf.toString();
 	}
 
+	public String encodeIntAsCharEscape(int v) {
+		if ( v<=127 ) {
+			return "\\"+Integer.toOctalString(v);
+		}
+		String hex = Integer.toHexString(v|0x10000).substring(1,5);
+		return "\\u"+hex;
+	}
+
 	/** Some targets only support ASCII or 8-bit chars/strings.  For example,
 	 *  C++ will probably want to return 0xFF here.
 	 */
diff --git a/runtime/Java/src/org/antlr/runtime/Parser.java b/tool/src/main/java/org/antlr/misc/Barrier.java
similarity index 59%
rename from runtime/Java/src/org/antlr/runtime/Parser.java
rename to tool/src/main/java/org/antlr/misc/Barrier.java
index 1000a52..bfcdbb3 100644
--- a/runtime/Java/src/org/antlr/runtime/Parser.java
+++ b/tool/src/main/java/org/antlr/misc/Barrier.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,41 +25,38 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.misc;
 
-/** A parser for TokenStreams.  "parser grammars" result in a subclass
- *  of this.
+/**A very simple barrier wait.  Once a thread has requested a
+ * wait on the barrier with waitForRelease, it cannot fool the
+ * barrier into releasing by "hitting" the barrier multiple times--
+ * the thread is blocked on the wait().
  */
-public class Parser extends BaseRecognizer {
-    protected TokenStream input;
+public class Barrier {
+    protected int threshold;
+    protected int count = 0;
 
-	public Parser(TokenStream input) {
-        setTokenStream(input);
+    public Barrier(int t) {
+        threshold = t;
     }
 
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
-	}
-
-	/** Set the token stream and reset the parser */
-	public void setTokenStream(TokenStream input) {
-		this.input = null;
-		reset();
-		this.input = input;
-	}
-
-    public TokenStream getTokenStream() {
-		return input;
-	}
-
-	public void traceIn(String ruleName, int ruleIndex)  {
-		super.traceIn(ruleName, ruleIndex, input.LT(1));
-	}
+    public synchronized void waitForRelease()
+        throws InterruptedException
+    {
+        count++;
+        // The final thread to reach barrier resets barrier and
+        // releases all threads
+        if ( count==threshold ) {
+            // notify blocked threads that threshold has been reached
+            action(); // perform the requested operation
+            notifyAll();
+        }
+        else while ( count<threshold ) {
+            wait();
+        }
+    }
 
-	public void traceOut(String ruleName, int ruleIndex)  {
-		super.traceOut(ruleName, ruleIndex, input.LT(1));
-	}
+    /** What to do when everyone reaches barrier */
+    public void action() {
+    }
 }
diff --git a/src/org/antlr/misc/BitSet.java b/tool/src/main/java/org/antlr/misc/BitSet.java
similarity index 97%
rename from src/org/antlr/misc/BitSet.java
rename to tool/src/main/java/org/antlr/misc/BitSet.java
index 2414d26..a8626ad 100644
--- a/src/org/antlr/misc/BitSet.java
+++ b/tool/src/main/java/org/antlr/misc/BitSet.java
@@ -122,11 +122,22 @@ public class BitSet implements IntSet, Cloneable {
 		}
 	}
 
-	public void addAll(List elements) {
+	public void addAll(Iterable elements) {
 		if ( elements==null ) {
 			return;
 		}
-		for (int i = 0; i < elements.size(); i++) {
+		Iterator it = elements.iterator();
+		while (it.hasNext()) {
+			Object o = (Object) it.next();
+			if ( !(o instanceof Integer) ) {
+				throw new IllegalArgumentException();
+			}
+			Integer eI = (Integer)o;
+			add(eI.intValue());
+		}
+		/*
+		int n = elements.size();
+		for (int i = 0; i < n; i++) {
 			Object o = elements.get(i);
 			if ( !(o instanceof Integer) ) {
 				throw new IllegalArgumentException();
@@ -134,6 +145,7 @@ public class BitSet implements IntSet, Cloneable {
 			Integer eI = (Integer)o;
 			add(eI.intValue());
 		}
+		 */
 	}
 
     public IntSet and(IntSet a) {
diff --git a/tool/src/main/java/org/antlr/misc/Graph.java b/tool/src/main/java/org/antlr/misc/Graph.java
new file mode 100644
index 0000000..0f2a71e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/Graph.java
@@ -0,0 +1,107 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import java.util.*;
+
+/** A generic graph with edges; Each node as a single Object payload.
+ *  This is only used to topologically sort a list of file dependencies
+ *  at the moment.
+ */
+public class Graph {
+
+    public static class Node {
+        Object payload;
+        List<Node> edges; // points at which nodes?
+
+        public Node(Object payload) { this.payload = payload; }
+
+        public void addEdge(Node n) {
+            if ( edges==null ) edges = new ArrayList<Node>();
+            if ( !edges.contains(n) ) edges.add(n);
+        }
+
+        public String toString() { return payload.toString(); }
+    }
+
+    /** Map from node payload to node containing it */
+    protected Map<Object,Node> nodes = new HashMap<Object,Node>();
+
+    public void addEdge(Object a, Object b) {
+        //System.out.println("add edge "+a+" to "+b);
+        Node a_node = getNode(a);
+        Node b_node = getNode(b);
+        a_node.addEdge(b_node);
+    }
+
+    protected Node getNode(Object a) {
+        Node existing = nodes.get(a);
+        if ( existing!=null ) return existing;
+        Node n = new Node(a);
+        nodes.put(a, n);
+        return n;
+    }
+
+    /** DFS-based topological sort.  A valid sort is the reverse of
+     *  the post-order DFA traversal.  Amazingly simple but true.
+     *  For sorting, I'm not following convention here since ANTLR
+     *  needs the opposite.  Here's what I assume for sorting:
+     *
+     *    If there exists an edge u -> v then u depends on v and v
+     *    must happen before u.
+     *
+     *  So if this gives nonreversed postorder traversal, I get the order
+     *  I want.
+     */
+    public List<Object> sort() {
+        Set<Node> visited = new HashSet<Node>();
+        ArrayList<Object> sorted = new ArrayList<Object>();
+        while ( visited.size() < nodes.size() ) {
+            // pick any unvisited node, n
+            Node n = null;
+            for (Iterator it = nodes.values().iterator(); it.hasNext();) {
+                n = (Node)it.next();
+                if ( !visited.contains(n) ) break;
+            }
+            DFS(n, visited, sorted);
+        }
+        return sorted;
+    }
+
+    public void DFS(Node n, Set<Node> visited, ArrayList<Object> sorted) {
+        if ( visited.contains(n) ) return;
+        visited.add(n);
+        if ( n.edges!=null ) {
+            for (Iterator it = n.edges.iterator(); it.hasNext();) {
+                Node target = (Node) it.next();
+                DFS(target, visited, sorted);
+            }
+        }
+        sorted.add(n.payload);
+    }
+}
\ No newline at end of file
diff --git a/src/org/antlr/misc/IntArrayList.java b/tool/src/main/java/org/antlr/misc/IntArrayList.java
similarity index 100%
rename from src/org/antlr/misc/IntArrayList.java
rename to tool/src/main/java/org/antlr/misc/IntArrayList.java
diff --git a/src/org/antlr/misc/IntSet.java b/tool/src/main/java/org/antlr/misc/IntSet.java
similarity index 100%
rename from src/org/antlr/misc/IntSet.java
rename to tool/src/main/java/org/antlr/misc/IntSet.java
diff --git a/tool/src/main/java/org/antlr/misc/Interval.java b/tool/src/main/java/org/antlr/misc/Interval.java
new file mode 100644
index 0000000..cad0cde
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/Interval.java
@@ -0,0 +1,142 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.misc;
+
+/** An immutable inclusive interval a..b */
+public class Interval {
+	public static final int INTERVAL_POOL_MAX_VALUE = 1000;
+
+	static Interval[] cache = new Interval[INTERVAL_POOL_MAX_VALUE+1];
+
+	public int a;
+	public int b;
+
+	public static int creates = 0;
+	public static int misses = 0;
+	public static int hits = 0;
+	public static int outOfRange = 0;
+
+	public Interval(int a, int b) { this.a=a; this.b=b; }
+
+	/** Interval objects are used readonly so share all with the
+	 *  same single value a==b up to some max size.  Use an array as a perfect hash.
+	 *  Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new
+	 *  Interval object with a..a in it.  On Java.g, 218623 IntervalSets
+	 *  have a..a (set with 1 element).
+	 */
+	public static Interval create(int a, int b) {
+		//return new Interval(a,b);
+		// cache just a..a
+		if ( a!=b || a<0 || a>INTERVAL_POOL_MAX_VALUE ) {
+			return new Interval(a,b);
+		}
+		if ( cache[a]==null ) {
+			cache[a] = new Interval(a,a);
+		}
+		return cache[a];
+	}
+
+	public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+		Interval other = (Interval)o;
+		return this.a==other.a && this.b==other.b;
+	}
+
+	/** Does this start completely before other? Disjoint */
+	public boolean startsBeforeDisjoint(Interval other) {
+		return this.a<other.a && this.b<other.a;
+	}
+
+	/** Does this start at or before other? Nondisjoint */
+	public boolean startsBeforeNonDisjoint(Interval other) {
+		return this.a<=other.a && this.b>=other.a;
+	}
+
+	/** Does this.a start after other.b? May or may not be disjoint */
+	public boolean startsAfter(Interval other) { return this.a>other.a; }
+
+	/** Does this start completely after other? Disjoint */
+	public boolean startsAfterDisjoint(Interval other) {
+		return this.a>other.b;
+	}
+
+	/** Does this start after other? NonDisjoint */
+	public boolean startsAfterNonDisjoint(Interval other) {
+		return this.a>other.a && this.a<=other.b; // this.b>=other.b implied
+	}
+
+	/** Are both ranges disjoint? I.e., no overlap? */
+	public boolean disjoint(Interval other) {
+		return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
+	}
+
+	/** Are two intervals adjacent such as 0..41 and 42..42? */
+	public boolean adjacent(Interval other) {
+		return this.a == other.b+1 || this.b == other.a-1;
+	}
+
+	public boolean properlyContains(Interval other) {
+		return other.a >= this.a && other.b <= this.b;
+	}
+
+	/** Return the interval computed from combining this and other */
+	public Interval union(Interval other) {
+		return Interval.create(Math.min(a,other.a), Math.max(b,other.b));
+	}
+
+	/** Return the interval in common between this and o */
+	public Interval intersection(Interval other) {
+		return Interval.create(Math.max(a,other.a), Math.min(b,other.b));
+	}
+
+	/** Return the interval with elements from this not in other;
+	 *  other must not be totally enclosed (properly contained)
+	 *  within this, which would result in two disjoint intervals
+	 *  instead of the single one returned by this method.
+	 */
+	public Interval differenceNotProperlyContained(Interval other) {
+		Interval diff = null;
+		// other.a to left of this.a (or same)
+		if ( other.startsBeforeNonDisjoint(this) ) {
+			diff = Interval.create(Math.max(this.a,other.b+1),
+								   this.b);
+		}
+
+		// other.a to right of this.a
+		else if ( other.startsAfterNonDisjoint(this) ) {
+			diff = Interval.create(this.a, other.a-1);
+		}
+		return diff;
+	}
+
+	public String toString() {
+		return a+".."+b;
+	}
+}
diff --git a/src/org/antlr/misc/IntervalSet.java b/tool/src/main/java/org/antlr/misc/IntervalSet.java
similarity index 88%
rename from src/org/antlr/misc/IntervalSet.java
rename to tool/src/main/java/org/antlr/misc/IntervalSet.java
index 27d1770..aca92b2 100644
--- a/src/org/antlr/misc/IntervalSet.java
+++ b/tool/src/main/java/org/antlr/misc/IntervalSet.java
@@ -47,17 +47,23 @@ import java.util.*;
  *  The ranges are ordered and disjoint so that 2..6 appears before 101..103.
  */
 public class IntervalSet implements IntSet {
+	public static final IntervalSet COMPLETE_SET = IntervalSet.of(0,Label.MAX_CHAR_VALUE);
+
 	/** The list of sorted, disjoint intervals. */
-    protected List intervals;
+    protected List<Interval> intervals;
 
-    /** Create a set with no elements */
+	/** Create a set with no elements */
     public IntervalSet() {
-        intervals = new ArrayList(2); // most sets are 1 or 2 elements
+        intervals = new ArrayList<Interval>(2); // most sets are 1 or 2 elements
     }
 
-    /** Create a set with a single element, el. */
+	public IntervalSet(List<Interval> intervals) {
+		this.intervals = intervals;
+	}
+
+	/** Create a set with a single element, el. */
     public static IntervalSet of(int a) {
-        IntervalSet s = new IntervalSet();
+		IntervalSet s = new IntervalSet();
         s.add(a);
         return s;
     }
@@ -87,6 +93,7 @@ public class IntervalSet implements IntSet {
         add(Interval.create(a,b));
     }
 
+	// copy on write so we can cache a..a intervals and sets of that
 	protected void add(Interval addition) {
 		//System.out.println("add "+addition+" to "+intervals.toString());
 		if ( addition.b<addition.a ) {
@@ -185,10 +192,11 @@ public class IntervalSet implements IntSet {
         }
         IntervalSet other = (IntervalSet)set;
         // walk set and add each interval
-        for (Iterator iter = other.intervals.iterator(); iter.hasNext();) {
-            Interval I = (Interval) iter.next();
-            this.add(I.a,I.b);
-        }
+		int n = other.intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = (Interval) other.intervals.get(i);
+			this.add(I.a,I.b);
+		}
     }
 
     public IntSet complement(int minElement, int maxElement) {
@@ -213,7 +221,8 @@ public class IntervalSet implements IntSet {
 		int maxElement = vocabularyIS.getMaxElement();
 
 		IntervalSet compl = new IntervalSet();
-		if ( intervals.size()==0 ) {
+		int n = intervals.size();
+		if ( n ==0 ) {
 			return compl;
 		}
 		Interval first = (Interval)intervals.get(0);
@@ -223,14 +232,14 @@ public class IntervalSet implements IntSet {
 			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
 			compl.addAll(a);
 		}
-		for (int i=1; i<intervals.size(); i++) { // from 2nd interval .. nth
+		for (int i=1; i<n; i++) { // from 2nd interval .. nth
 			Interval previous = (Interval)intervals.get(i-1);
 			Interval current = (Interval)intervals.get(i);
 			IntervalSet s = IntervalSet.of(previous.b+1, current.a-1);
 			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
 			compl.addAll(a);
 		}
-		Interval last = (Interval)intervals.get(intervals.size()-1);
+		Interval last = (Interval)intervals.get(n -1);
 		// add a range from last.b to maxElement constrained to vocab
 		if ( last.b < maxElement ) {
 			IntervalSet s = IntervalSet.of(last.b+1, maxElement);
@@ -252,10 +261,10 @@ public class IntervalSet implements IntSet {
 		// will be empty.  The only problem would be when this' set max value
 		// goes beyond MAX_CHAR_VALUE, but hopefully the constant MAX_CHAR_VALUE
 		// will prevent this.
-		return this.and(((IntervalSet)other).complement(0,Label.MAX_CHAR_VALUE));
+		return this.and(((IntervalSet)other).complement(COMPLETE_SET));
 	}
 
-    /** return a new set containing all elements in this but not in other.
+	/** return a new set containing all elements in this but not in other.
      *  Intervals may have to be broken up when ranges in this overlap
      *  with ranges in other.  other is assumed to be a subset of this;
      *  anything that is in other but not in this will be ignored.
@@ -376,8 +385,12 @@ public class IntervalSet implements IntSet {
 
     /** TODO: implement this! */
 	public IntSet or(IntSet a) {
-		throw new NoSuchMethodError();
-    }
+		IntervalSet o = new IntervalSet();
+		o.addAll(this);
+		o.addAll(a);
+		//throw new NoSuchMethodError();
+		return o;
+	}
 
     /** Return a new set with the intersection of this set with other.  Because
      *  the intervals are sorted, we can use an iterator for each list and
@@ -454,7 +467,21 @@ public class IntervalSet implements IntSet {
 
     /** Is el in any range of this set? */
     public boolean member(int el) {
-        for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = (Interval) intervals.get(i);
+			int a = I.a;
+			int b = I.b;
+			if ( el<a ) {
+				break; // list is sorted and el is before this interval; not here
+			}
+			if ( el>=a && el<=b ) {
+				return true; // found in this interval
+			}
+		}
+		return false;
+/*
+		for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
             Interval I = (Interval) iter.next();
             if ( el<I.a ) {
                 break; // list is sorted and el is before this interval; not here
@@ -464,6 +491,7 @@ public class IntervalSet implements IntSet {
             }
         }
         return false;
+        */
     }
 
     /** return true if this set has no members */
@@ -495,9 +523,9 @@ public class IntervalSet implements IntSet {
 		if ( isNil() ) {
 			return Label.INVALID;
 		}
-		Iterator iter = this.intervals.iterator();
-		while (iter.hasNext()) {
-			Interval I = (Interval) iter.next();
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = (Interval) intervals.get(i);
 			int a = I.a;
 			int b = I.b;
 			for (int v=a; v<=b; v++) {
@@ -508,7 +536,7 @@ public class IntervalSet implements IntSet {
 	}
 
     /** Return a list of Interval objects. */
-    public List getIntervals() {
+    public List<Interval> getIntervals() {
         return intervals;
     }
 
@@ -570,21 +598,23 @@ public class IntervalSet implements IntSet {
 
     public int size() {
 		int n = 0;
-		Iterator iter = this.intervals.iterator();
-		while (iter.hasNext()) {
-			Interval I = (Interval) iter.next();
-			int a = I.a;
-			int b = I.b;
-			n += (b-a+1);
+		int numIntervals = intervals.size();
+		if ( numIntervals==1 ) {
+			Interval firstInterval = this.intervals.get(0);
+			return firstInterval.b-firstInterval.a+1;
+		}
+		for (int i = 0; i < numIntervals; i++) {
+			Interval I = (Interval) intervals.get(i);
+			n += (I.b-I.a+1);
 		}
 		return n;
     }
 
     public List toList() {
 		List values = new ArrayList();
-		Iterator iter = this.intervals.iterator();
-		while (iter.hasNext()) {
-			Interval I = (Interval) iter.next();
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = (Interval) intervals.get(i);
 			int a = I.a;
 			int b = I.b;
 			for (int v=a; v<=b; v++) {
@@ -594,17 +624,38 @@ public class IntervalSet implements IntSet {
 		return values;
     }
 
+	/** Get the ith element of ordered set.  Used only by RandomPhrase so
+	 *  don't bother to implement if you're not doing that for a new
+	 *  ANTLR code gen target.
+	 */
+	public int get(int i) {
+		int n = intervals.size();
+		int index = 0;
+		for (int j = 0; j < n; j++) {
+			Interval I = (Interval) intervals.get(j);
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				if ( index==i ) {
+					return v;
+				}
+				index++;
+			}
+		}
+		return -1;
+	}
+
 	public int[] toArray() {
 		int[] values = new int[size()];
-		Iterator iter = this.intervals.iterator();
-		int i = 0;
-		while (iter.hasNext()) {
-			Interval I = (Interval) iter.next();
+		int n = intervals.size();
+		int j = 0;
+		for (int i = 0; i < n; i++) {
+			Interval I = (Interval) intervals.get(i);
 			int a = I.a;
 			int b = I.b;
 			for (int v=a; v<=b; v++) {
-				values[i] = v;
-				i++;
+				values[j] = v;
+				j++;
 			}
 		}
 		return values;
@@ -613,15 +664,13 @@ public class IntervalSet implements IntSet {
 	public org.antlr.runtime.BitSet toRuntimeBitSet() {
 		org.antlr.runtime.BitSet s =
 			new org.antlr.runtime.BitSet(getMaxElement()+1);
-		Iterator iter = this.intervals.iterator();
-		int i = 0;
-		while (iter.hasNext()) {
-			Interval I = (Interval) iter.next();
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = (Interval) intervals.get(i);
 			int a = I.a;
 			int b = I.b;
 			for (int v=a; v<=b; v++) {
 				s.add(v);
-				i++;
 			}
 		}
 		return s;
diff --git a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java b/tool/src/main/java/org/antlr/misc/MultiMap.java
similarity index 74%
rename from runtime/Java/src/org/antlr/runtime/EarlyExitException.java
rename to tool/src/main/java/org/antlr/misc/MultiMap.java
index 29f0865..7f40408 100644
--- a/runtime/Java/src/org/antlr/runtime/EarlyExitException.java
+++ b/tool/src/main/java/org/antlr/misc/MultiMap.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,17 +25,20 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.misc;
 
-/**  The recognizer did not match anything for a (..)+ loop. */
-public class EarlyExitException extends RecognitionException {
-	public int decisionNumber;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
 
-	/** Used for remote debugger deserialization */
-	public EarlyExitException() {;}
-	
-	public EarlyExitException(int decisionNumber, IntStream input) {
-		super(input);
-		this.decisionNumber = decisionNumber;
+/** A hash table that maps a key to a list of elements not just a single. */
+public class MultiMap<K, V> extends HashMap<K, List<V>> {
+	public void map(K key, V value) {
+		List<V> elementsForKey = get(key);
+		if ( elementsForKey==null ) {
+			elementsForKey = new ArrayList<V>();
+			super.put(key, elementsForKey);
+		}
+		elementsForKey.add(value);
 	}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java b/tool/src/main/java/org/antlr/misc/MutableInteger.java
similarity index 78%
rename from runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java
rename to tool/src/main/java/org/antlr/misc/MutableInteger.java
index b048aaf..bd977dc 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedRangeException.java
+++ b/tool/src/main/java/org/antlr/misc/MutableInteger.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,18 +25,18 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.misc;
 
-public class MismatchedRangeException extends RecognitionException {
-	public int a,b;
-
-	public MismatchedRangeException(int a, int b, IntStream input) {
-		super(input);
-		this.a = a;
-		this.b = b;
+/** Java won't let you modify an Integer; not sure how that's more
+ *  efficient, but...here's one that let's you modify it.
+ *  Frightening I have to implement this myself. Blech.
+ */
+public class MutableInteger {
+	public int value;
+	public MutableInteger() {
+		this(0);
 	}
-
-	public String toString() {
-		return "MismatchedNotSetException("+getUnexpectedType()+" not in ["+a+","+b+"])";
+	public MutableInteger(int value) {
+		this.value = value;
 	}
 }
diff --git a/src/org/antlr/misc/OrderedHashSet.java b/tool/src/main/java/org/antlr/misc/OrderedHashSet.java
similarity index 88%
rename from src/org/antlr/misc/OrderedHashSet.java
rename to tool/src/main/java/org/antlr/misc/OrderedHashSet.java
index 408a70f..0a021a5 100644
--- a/src/org/antlr/misc/OrderedHashSet.java
+++ b/tool/src/main/java/org/antlr/misc/OrderedHashSet.java
@@ -27,28 +27,28 @@
 */
 package org.antlr.misc;
 
-import org.antlr.tool.ErrorManager;
-
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
 
 /** A HashMap that remembers the order that the elements were added.
  *  You can alter the ith element with set(i,value) too :)  Unique list.
  *  I need the replace/set-element-i functionality so I'm subclassing
  *  OrderedHashSet.
  */
-public class OrderedHashSet extends HashSet {
+public class OrderedHashSet<T> extends HashSet {
     /** Track the elements as they are added to the set */
-    protected List elements = new ArrayList();
+    protected List<T> elements = new ArrayList<T>();
 
-    public Object get(int i) {
+    public T get(int i) {
         return elements.get(i);
     }
 
     /** Replace an existing value with a new value; updates the element
      *  list and the hash table, but not the key as that has not changed.
      */
-    public Object set(int i, Object value) {
-        Object oldElement = elements.get(i);
+    public T set(int i, T value) {
+        T oldElement = elements.get(i);
         elements.set(i,value); // update list
         super.remove(oldElement); // now update the set: remove/add
         super.add(value);
@@ -62,7 +62,7 @@ public class OrderedHashSet extends HashSet {
     public boolean add(Object value) {
         boolean result = super.add(value);
 		if ( result ) {  // only track if new element not in set
-			elements.add(value);
+			elements.add((T)value);
 		}
 		return result;
     }
@@ -83,15 +83,17 @@ public class OrderedHashSet extends HashSet {
     /** Return the List holding list of table elements.  Note that you are
      *  NOT getting a copy so don't write to the list.
      */
-    public List elements() {
+    public List<T> elements() {
         return elements;
     }
 
     public int size() {
-        if ( elements.size()!=super.size() ) {
+		/*
+		if ( elements.size()!=super.size() ) {
 			ErrorManager.internalError("OrderedHashSet: elements and set size differs; "+
 									   elements.size()+"!="+super.size());
         }
+        */
         return elements.size();
     }
 
diff --git a/src/org/antlr/misc/Utils.java b/tool/src/main/java/org/antlr/misc/Utils.java
similarity index 100%
rename from src/org/antlr/misc/Utils.java
rename to tool/src/main/java/org/antlr/misc/Utils.java
diff --git a/src/org/antlr/tool/ANTLRErrorListener.java b/tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java
similarity index 98%
rename from src/org/antlr/tool/ANTLRErrorListener.java
rename to tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java
index 32237a3..e067ee8 100644
--- a/src/org/antlr/tool/ANTLRErrorListener.java
+++ b/tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java b/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java
new file mode 100644
index 0000000..8ad2f66
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java
@@ -0,0 +1,311 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.Label;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+import org.antlr.grammar.v2.AssignTokenTypesWalker;
+
+/** Move all of the functionality from assign.types.g grammar file. */
+public class AssignTokenTypesBehavior extends AssignTokenTypesWalker {
+	protected static final Integer UNASSIGNED = Utils.integer(-1);
+	protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
+
+	protected Map<String,Integer> stringLiterals = new LinkedHashMap();
+	protected Map<String,Integer> tokens = new LinkedHashMap();
+	protected Map<String,String> aliases = new LinkedHashMap();
+	protected Map<String,String> aliasesReverseIndex = new HashMap<String,String>();
+
+	/** Track actual lexer rule defs so we don't get repeated token defs in
+	 *  generated lexer.
+	 */
+	protected Set<String> tokenRuleDefs = new HashSet();
+
+    @Override
+	protected void init(Grammar g) {
+		this.grammar = g;
+		currentRuleName = null;
+		if ( stringAlias==null ) {
+			// only init once; can't statically init since we need astFactory
+			initASTPatterns();
+		}
+	}
+
+	/** Track string literals (could be in tokens{} section) */
+    @Override
+	protected void trackString(GrammarAST t) {
+		// if lexer, don't allow aliasing in tokens section
+		if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
+			ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
+									  grammar,
+									  t.token,
+									  t.getText());
+			return;
+		}
+		// in a plain parser grammar rule, cannot reference literals
+		// (unless defined previously via tokenVocab option)
+		// don't warn until we hit root grammar as may be defined there.
+		if ( grammar.getGrammarIsRoot() &&
+			 grammar.type==Grammar.PARSER &&
+			 grammar.getTokenType(t.getText())== Label.INVALID )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
+									  grammar,
+									  t.token,
+									  t.getText());
+		}
+		// Don't record literals for lexers, they are things to match not tokens
+		if ( grammar.type==Grammar.LEXER ) {
+			return;
+		}
+		// otherwise add literal to token types if referenced from parser rule
+		// or in the tokens{} section
+		if ( (currentRuleName==null ||
+			  Character.isLowerCase(currentRuleName.charAt(0))) &&
+																grammar.getTokenType(t.getText())==Label.INVALID )
+		{
+			stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
+		}
+	}
+
+    @Override
+	protected void trackToken(GrammarAST t) {
+		// imported token names might exist, only add if new
+		// Might have ';'=4 in vocab import and SEMI=';'. Avoid
+		// setting to UNASSIGNED if we have loaded ';'/SEMI
+		if ( grammar.getTokenType(t.getText())==Label.INVALID &&
+			 tokens.get(t.getText())==null )
+		{
+			tokens.put(t.getText(), UNASSIGNED);
+		}
+	}
+
+    @Override
+	protected void trackTokenRule(GrammarAST t,
+								  GrammarAST modifier,
+								  GrammarAST block)
+	{
+		// imported token names might exist, only add if new
+		if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
+			if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
+				return;
+			}
+			if ( t.getText().equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
+				// don't add Tokens rule
+				return;
+			}
+
+			// track all lexer rules so we can look for token refs w/o
+			// associated lexer rules.
+			grammar.composite.lexerRules.add(t.getText());
+
+			int existing = grammar.getTokenType(t.getText());
+			if ( existing==Label.INVALID ) {
+				tokens.put(t.getText(), UNASSIGNED);
+			}
+			// look for "<TOKEN> : <literal> ;" pattern
+			// (can have optional action last)
+			if ( block.hasSameTreeStructure(charAlias) ||
+				 block.hasSameTreeStructure(stringAlias) ||
+				 block.hasSameTreeStructure(charAlias2) ||
+				 block.hasSameTreeStructure(stringAlias2) )
+			{
+				tokenRuleDefs.add(t.getText());
+				/*
+			Grammar parent = grammar.composite.getDelegator(grammar);
+			boolean importedByParserOrCombined =
+				parent!=null &&
+				(parent.type==Grammar.LEXER||parent.type==Grammar.PARSER);
+				*/
+				if ( grammar.type==Grammar.COMBINED || grammar.type==Grammar.LEXER ) {
+					// only call this rule an alias if combined or lexer
+					alias(t, (GrammarAST)block.getFirstChild().getFirstChild());
+				}
+			}
+		}
+		// else error
+	}
+
+    @Override
+	protected void alias(GrammarAST t, GrammarAST s) {
+		String tokenID = t.getText();
+		String literal = s.getText();
+		String prevAliasLiteralID = aliasesReverseIndex.get(literal);
+		if ( prevAliasLiteralID!=null ) { // we've seen this literal before
+			if ( tokenID.equals(prevAliasLiteralID) ) {
+				// duplicate but identical alias; might be tokens {A='a'} and
+				// lexer rule A : 'a' ;  Is ok, just return
+				return;
+			}
+
+			// give error unless both are rules (ok if one is in tokens section)
+			if ( !(tokenRuleDefs.contains(tokenID) && tokenRuleDefs.contains(prevAliasLiteralID)) )
+			{
+				// don't allow alias if A='a' in tokens section and B : 'a'; is rule.
+				// Allow if both are rules.  Will get DFA nondeterminism error later.
+				ErrorManager.grammarError(ErrorManager.MSG_TOKEN_ALIAS_CONFLICT,
+										  grammar,
+										  t.token,
+										  tokenID+"="+literal,
+										  prevAliasLiteralID);
+			}
+			return; // don't do the alias
+		}
+		int existingLiteralType = grammar.getTokenType(literal);
+		if ( existingLiteralType !=Label.INVALID ) {
+			// we've seen this before from a tokenVocab most likely
+			// don't assign a new token type; use existingLiteralType.
+			tokens.put(tokenID, existingLiteralType);
+		}
+		String prevAliasTokenID = aliases.get(tokenID);
+		if ( prevAliasTokenID!=null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT,
+									  grammar,
+									  t.token,
+									  tokenID+"="+literal,
+									  prevAliasTokenID);
+			return; // don't do the alias
+		}
+		aliases.put(tokenID, literal);
+		aliasesReverseIndex.put(literal, tokenID);
+	}
+
+    @Override
+	public void defineTokens(Grammar root) {
+/*
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+	System.out.println("aliasesReverseIndex="+aliasesReverseIndex);
+*/
+
+		assignTokenIDTypes(root);
+
+		aliasTokenIDsAndLiterals(root);
+
+		assignStringTypes(root);
+
+/*
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+*/
+		defineTokenNamesAndLiteralsInGrammar(root);
+	}
+
+/*
+protected void defineStringLiteralsFromDelegates() {
+	 if ( grammar.getGrammarIsMaster() && grammar.type==Grammar.COMBINED ) {
+		 List<Grammar> delegates = grammar.getDelegates();
+		 System.out.println("delegates in master combined: "+delegates);
+		 for (int i = 0; i < delegates.size(); i++) {
+			 Grammar d = (Grammar) delegates.get(i);
+			 Set<String> literals = d.getStringLiterals();
+			 for (Iterator it = literals.iterator(); it.hasNext();) {
+				 String literal = (String) it.next();
+				 System.out.println("literal "+literal);
+				 int ttype = grammar.getTokenType(literal);
+				 grammar.defineLexerRuleForStringLiteral(literal, ttype);
+			 }
+		 }
+	 }
+}
+*/
+
+    @Override
+	protected void assignStringTypes(Grammar root) {
+		// walk string literals assigning types to unassigned ones
+		Set s = stringLiterals.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String lit = (String) it.next();
+			Integer oldTypeI = (Integer)stringLiterals.get(lit);
+			int oldType = oldTypeI.intValue();
+			if ( oldType<Label.MIN_TOKEN_TYPE ) {
+				Integer typeI = Utils.integer(root.getNewTokenType());
+				stringLiterals.put(lit, typeI);
+				// if string referenced in combined grammar parser rule,
+				// automatically define in the generated lexer
+				root.defineLexerRuleForStringLiteral(lit, typeI.intValue());
+			}
+		}
+	}
+
+    @Override
+	protected void aliasTokenIDsAndLiterals(Grammar root) {
+		if ( root.type==Grammar.LEXER ) {
+			return; // strings/chars are never token types in LEXER
+		}
+		// walk aliases if any and assign types to aliased literals if literal
+		// was referenced
+		Set s = aliases.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			String literal = (String)aliases.get(tokenID);
+			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
+				stringLiterals.put(literal, tokens.get(tokenID));
+				// an alias still means you need a lexer rule for it
+				Integer typeI = (Integer)tokens.get(tokenID);
+				if ( !tokenRuleDefs.contains(tokenID) ) {
+					root.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI.intValue());
+				}
+			}
+		}
+	}
+
+    @Override
+	protected void assignTokenIDTypes(Grammar root) {
+		// walk token names, assigning values if unassigned
+		Set s = tokens.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			if ( tokens.get(tokenID)==UNASSIGNED ) {
+				tokens.put(tokenID, Utils.integer(root.getNewTokenType()));
+			}
+		}
+	}
+
+    @Override
+	protected void defineTokenNamesAndLiteralsInGrammar(Grammar root) {
+		Set s = tokens.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String tokenID = (String) it.next();
+			int ttype = ((Integer)tokens.get(tokenID)).intValue();
+			root.defineToken(tokenID, ttype);
+		}
+		s = stringLiterals.keySet();
+		for (Iterator it = s.iterator(); it.hasNext();) {
+			String lit = (String) it.next();
+			int ttype = ((Integer)stringLiterals.get(lit)).intValue();
+			root.defineToken(lit, ttype);
+		}
+	}
+
+}
diff --git a/src/org/antlr/tool/Attribute.java b/tool/src/main/java/org/antlr/tool/Attribute.java
similarity index 99%
rename from src/org/antlr/tool/Attribute.java
rename to tool/src/main/java/org/antlr/tool/Attribute.java
index 001b684..9356973 100644
--- a/src/org/antlr/tool/Attribute.java
+++ b/tool/src/main/java/org/antlr/tool/Attribute.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/src/org/antlr/tool/AttributeScope.java b/tool/src/main/java/org/antlr/tool/AttributeScope.java
similarity index 90%
rename from src/org/antlr/tool/AttributeScope.java
rename to tool/src/main/java/org/antlr/tool/AttributeScope.java
index fbc98a2..071acde 100644
--- a/src/org/antlr/tool/AttributeScope.java
+++ b/tool/src/main/java/org/antlr/tool/AttributeScope.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -28,6 +28,7 @@
 package org.antlr.tool;
 
 import antlr.Token;
+import org.antlr.codegen.CodeGenerator;
 
 import java.util.*;
 
@@ -52,6 +53,7 @@ public class AttributeScope {
 		tokenScope.addAttribute("pos", null);
 		tokenScope.addAttribute("channel", null);
 		tokenScope.addAttribute("tree", null);
+		tokenScope.addAttribute("int", null);
 	}
 
 	/** This scope is associated with which input token (for error handling)? */
@@ -75,10 +77,10 @@ public class AttributeScope {
 	public boolean isPredefinedRuleScope;
 
 	public boolean isPredefinedLexerRuleScope;
-	
+
 	/** The list of Attribute objects */
 
-	protected LinkedHashMap attributes = new LinkedHashMap();
+	protected LinkedHashMap<String,Attribute> attributes = new LinkedHashMap();
 
 	public AttributeScope(String name, Token derivedFromToken) {
 		this(null,name,derivedFromToken);
@@ -113,15 +115,11 @@ public class AttributeScope {
 	 *  would pass in definitions equal to the text in between {...} and
 	 *  separator=';'.  It results in two Attribute objects.
 	 */
-	public void addAttributes(String definitions, String separator) {
-        StringTokenizer st = new StringTokenizer(definitions,separator);
-		while (st.hasMoreElements()) {
-			String decl = (String) st.nextElement();
-			decl = decl.trim();
-			if ( decl.length()==0 ) {
-				break; // final bit of whitespace; ignore
-			}
-			Attribute attr = new Attribute(decl);
+	public void addAttributes(String definitions, int separator) {
+		List<String> attrs = new ArrayList<String>();
+		CodeGenerator.getListOfArgumentsFromAction(definitions,0,-1,separator,attrs);
+		for (String a : attrs) {
+			Attribute attr = new Attribute(a);
 			if ( !isReturnScope && attr.initValue!=null ) {
 				ErrorManager.grammarError(ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL,
 										  grammar,
@@ -142,8 +140,8 @@ public class AttributeScope {
 	}
 
 	/** Used by templates to get all attributes */
-	public List getAttributes() {
-		List a = new ArrayList();
+	public List<Attribute> getAttributes() {
+		List<Attribute> a = new ArrayList<Attribute>();
 		a.addAll(attributes.values());
 		return a;
 	}
diff --git a/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java b/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java
new file mode 100644
index 0000000..29beaef
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java
@@ -0,0 +1,265 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.misc.Utils;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.io.*;
+
+/** Given a grammar file, show the dependencies on .tokens etc...
+ *  Using ST, emit a simple "make compatible" list of dependencies.
+ *  For example, combined grammar T.g (no token import) generates:
+ *
+ *		TParser.java : T.g
+ * 		T.tokens : T.g
+ * 		T__g : T.g
+ *
+ *  For tree grammar TP with import of T.tokens:
+ *
+ * 		TP.g : T.tokens
+ * 		TP.java : TP.g
+ *
+ *  If "-lib libdir" is used on command-line with -depend, then include the
+ *  path like
+ *
+ * 		TP.g : libdir/T.tokens
+ *
+ *  Pay attention to -o as well:
+ *
+ * 		outputdir/TParser.java : T.g
+ *
+ *  So this output shows what the grammar depends on *and* what it generates.
+ *
+ *  Operate on one grammar file at a time.  If given a list of .g on the
+ *  command-line with -depend, just emit the dependencies.  The grammars
+ *  may depend on each other, but the order doesn't matter.  Build tools,
+ *  reading in this output, will know how to organize it.
+ *
+ *  This is a wee bit slow probably because the code generator has to load
+ *  all of its template files in order to figure out the file extension
+ *  for the generated recognizer.
+ *
+ *  This code was obvious until I removed redundant "./" on front of files
+ *  and had to escape spaces in filenames :(
+ */
+public class BuildDependencyGenerator {
+    protected String grammarFileName;
+    protected String tokenVocab;
+    protected Tool tool;
+    protected Grammar grammar;
+    protected CodeGenerator generator;
+    protected StringTemplateGroup templates;
+
+    public BuildDependencyGenerator(Tool tool, String grammarFileName)
+            throws IOException, antlr.TokenStreamException, antlr.RecognitionException {
+        this.tool = tool;
+        this.grammarFileName = grammarFileName;
+        grammar = tool.getRootGrammar(grammarFileName);
+        String language = (String) grammar.getOption("language");
+        generator = new CodeGenerator(tool, grammar, language);
+        generator.loadTemplates(language);
+    }
+
+    /** From T.g return a list of File objects that
+     *  name files ANTLR will emit from T.g.
+     */
+    public List<File> getGeneratedFileList() {
+        List<File> files = new ArrayList<File>();
+        File outputDir = tool.getOutputDirectory(grammarFileName);
+        if (outputDir.getName().equals(".")) {
+            outputDir = null;
+        } else if (outputDir.getName().indexOf(' ') >= 0) { // has spaces?
+            String escSpaces = Utils.replace(outputDir.toString(),
+                    " ",
+                    "\\ ");
+            outputDir = new File(escSpaces);
+        }
+        // add generated recognizer; e.g., TParser.java
+        String recognizer =
+                generator.getRecognizerFileName(grammar.name, grammar.type);
+        files.add(new File(outputDir, recognizer));
+        // add output vocab file; e.g., T.tokens. This is always generated to
+        // the base output directory, which will be just . if there is no -o option
+        //
+        files.add(new File(tool.getOutputDirectory(), generator.getVocabFileName()));
+        // are we generating a .h file?
+        StringTemplate headerExtST = null;
+        StringTemplate extST = generator.getTemplates().getInstanceOf("codeFileExtension");
+        if (generator.getTemplates().isDefined("headerFile")) {
+            headerExtST = generator.getTemplates().getInstanceOf("headerFileExtension");
+            String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
+            String fileName = grammar.name + suffix + headerExtST.toString();
+            files.add(new File(outputDir, fileName));
+        }
+        if (grammar.type == Grammar.COMBINED) {
+            // add autogenerated lexer; e.g., TLexer.java TLexer.h TLexer.tokens
+            // don't add T__.g (just a temp file)
+            
+            String suffix = Grammar.grammarTypeToFileNameSuffix[Grammar.LEXER];
+            String lexer = grammar.name + suffix + extST.toString();
+            files.add(new File(outputDir, lexer));
+
+            // TLexer.h
+            if (headerExtST != null) {
+                String header = grammar.name + suffix + headerExtST.toString();
+                files.add(new File(outputDir, header));
+            }
+        // for combined, don't generate TLexer.tokens
+        }
+
+        // handle generated files for imported grammars
+        List<Grammar> imports =
+                grammar.composite.getDelegates(grammar.composite.getRootGrammar());
+        for (Grammar g : imports) {
+            outputDir = tool.getOutputDirectory(g.getFileName());
+            String fname = groomQualifiedFileName(outputDir.toString(), g.getRecognizerName() + extST.toString());
+            files.add(new File(fname));
+        }
+
+        if (files.size() == 0) {
+            return null;
+        }
+        return files;
+    }
+
+    /**
+     * Return a list of File objects that name files ANTLR will read
+     * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option
+     * as well as any imported grammar files.
+     */
+    public List<File> getDependenciesFileList() {
+        // Find all the things other than imported grammars
+        List<File> files = getNonImportDependenciesFileList();
+
+        // Handle imported grammars
+        List<Grammar> imports =
+                grammar.composite.getDelegates(grammar.composite.getRootGrammar());
+        for (Grammar g : imports) {
+            String libdir = tool.getLibraryDirectory();
+            String fileName = groomQualifiedFileName(libdir, g.fileName);
+            files.add(new File(fileName));
+        }
+
+        if (files.size() == 0) {
+            return null;
+        }
+        return files;
+    }
+
+    /**
+     * Return a list of File objects that name files ANTLR will read
+     * to process T.g; This can only be .tokens files and only
+     * if they use the tokenVocab option.
+     *
+     * @return List of dependencies other than imported grammars
+     */
+    public List<File> getNonImportDependenciesFileList() {
+        List<File> files = new ArrayList<File>();
+
+        // handle token vocabulary loads
+        tokenVocab = (String) grammar.getOption("tokenVocab");
+        if (tokenVocab != null) {
+
+            File vocabFile = tool.getImportedVocabFile(tokenVocab);
+            files.add(vocabFile);
+        }
+
+        return files;
+    }
+
+    public StringTemplate getDependencies() {
+        loadDependencyTemplates();
+        StringTemplate dependenciesST = templates.getInstanceOf("dependencies");
+        dependenciesST.setAttribute("in", getDependenciesFileList());
+        dependenciesST.setAttribute("out", getGeneratedFileList());
+        dependenciesST.setAttribute("grammarFileName", grammar.fileName);
+        return dependenciesST;
+    }
+
+    public void loadDependencyTemplates() {
+        if (templates != null) {
+            return;
+        }
+        String fileName = "org/antlr/tool/templates/depend.stg";
+        ClassLoader cl = Thread.currentThread().getContextClassLoader();
+        InputStream is = cl.getResourceAsStream(fileName);
+        if (is == null) {
+            cl = ErrorManager.class.getClassLoader();
+            is = cl.getResourceAsStream(fileName);
+        }
+        if (is == null) {
+            ErrorManager.internalError("Can't load dependency templates: " + fileName);
+            return;
+        }
+        BufferedReader br = null;
+        try {
+            br = new BufferedReader(new InputStreamReader(is));
+            templates = new StringTemplateGroup(br,
+                    AngleBracketTemplateLexer.class);
+            br.close();
+        } catch (IOException ioe) {
+            ErrorManager.internalError("error reading dependency templates file " + fileName, ioe);
+        } finally {
+            if (br != null) {
+                try {
+                    br.close();
+                } catch (IOException ioe) {
+                    ErrorManager.internalError("cannot close dependency templates file " + fileName, ioe);
+                }
+            }
+        }
+    }
+
+    public String getTokenVocab() {
+        return tokenVocab;
+    }
+
+    public CodeGenerator getGenerator() {
+        return generator;
+    }    
+
+    public String groomQualifiedFileName(String outputDir, String fileName) {
+        if (outputDir.equals(".")) {
+            return fileName;
+        } else if (outputDir.indexOf(' ') >= 0) { // has spaces?
+            String escSpaces = Utils.replace(outputDir.toString(),
+                    " ",
+                    "\\ ");
+            return escSpaces + File.separator + fileName;
+        } else {
+            return outputDir + File.separator + fileName;
+        }
+    }
+}
diff --git a/tool/src/main/java/org/antlr/tool/CompositeGrammar.java b/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
new file mode 100644
index 0000000..bb15723
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
@@ -0,0 +1,519 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/package org.antlr.tool;
+
+import antlr.RecognitionException;
+import org.antlr.analysis.Label;
+import org.antlr.analysis.NFAState;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+import org.antlr.grammar.v2.AssignTokenTypesWalker;
+
+/** A tree of component (delegate) grammars.
+ *
+ *  Rules defined in delegates are "inherited" like multi-inheritance
+ *  so you can override them.  All token types must be consistent across
+ *  rules from all delegate grammars, so they must be stored here in one
+ *  central place.
+ *
+ *  We have to start out assuming a composite grammar situation as we can't
+ *  look into the grammar files a priori to see if there is a delegate
+ *  statement.  Because of this, and to avoid duplicating token type tracking
+ *  in each grammar, even single noncomposite grammars use one of these objects
+ *  to track token types.
+ */
+public class CompositeGrammar {
+	public static final int MIN_RULE_INDEX = 1;
+	
+	public CompositeGrammarTree delegateGrammarTreeRoot;
+
+	/** Used during getRuleReferenceClosure to detect computation cycles */
+	protected Set<NFAState> refClosureBusy = new HashSet<NFAState>();
+
+	/** Used to assign state numbers; all grammars in composite share common
+	 *  NFA space.  This NFA tracks state numbers number to state mapping.
+	 */
+	public int stateCounter = 0;
+
+	/** The NFA states in the NFA built from rules across grammars in composite.
+	 *  Maps state number to NFAState object.
+	 *  This is a Vector instead of a List because I need to be able to grow
+	 *  this properly.  After talking to Josh Bloch, Collections guy at Sun,
+	 *  I decided this was easiest solution.
+	 */
+	protected Vector<NFAState> numberToStateList = new Vector<NFAState>(1000);
+
+	/** Token names and literal tokens like "void" are uniquely indexed.
+	 *  with -1 implying EOF.  Characters are different; they go from
+	 *  -1 (EOF) to \uFFFE.  For example, 0 could be a binary byte you
+	 *  want to lexer.  Labels of DFA/NFA transitions can be both tokens
+	 *  and characters.  I use negative numbers for bookkeeping labels
+	 *  like EPSILON. Char/String literals and token types overlap in the same
+	 *  space, however.
+	 */
+	protected int maxTokenType = Label.MIN_TOKEN_TYPE-1;
+
+	/** Map token like ID (but not literals like "while") to its token type */
+	public Map tokenIDToTypeMap = new HashMap();
+
+	/** Map token literals like "while" to its token type.  It may be that
+	 *  WHILE="while"=35, in which case both tokenIDToTypeMap and this
+	 *  field will have entries both mapped to 35.
+	 */
+	public Map<String, Integer> stringLiteralToTypeMap = new HashMap<String, Integer>();
+	/** Reverse index for stringLiteralToTypeMap */
+	public Vector<String> typeToStringLiteralList = new Vector<String>();
+
+	/** Map a token type to its token name.
+	 *  Must subtract MIN_TOKEN_TYPE from index.
+	 */
+	public Vector<String> typeToTokenList = new Vector<String>();
+
+	/** If combined or lexer grammar, track the rules.
+	 * 	Track lexer rules so we can warn about undefined tokens.
+	 *  This is combined set of lexer rules from all lexer grammars
+	 *  seen in all imports.
+	 */
+	protected Set<String> lexerRules = new HashSet<String>();
+
+	/** Rules are uniquely labeled from 1..n among all grammars */
+	protected int ruleIndex = MIN_RULE_INDEX;
+
+	/** Map a rule index to its name; use a Vector on purpose as new
+	 *  collections stuff won't let me setSize and make it grow.  :(
+	 *  I need a specific guaranteed index, which the Collections stuff
+	 *  won't let me have.
+	 */
+	protected Vector<Rule> ruleIndexToRuleList = new Vector<Rule>();
+
+	public boolean watchNFAConversion = false;
+
+	protected void initTokenSymbolTables() {
+		// the faux token types take first NUM_FAUX_LABELS positions
+		// then we must have room for the predefined runtime token types
+		// like DOWN/UP used for tree parsing.
+		typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE-1);
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, "<INVALID>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, "<EOT>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, "<SEMPRED>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.DOWN-1, "DOWN");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.UP-1, "UP");
+		tokenIDToTypeMap.put("<INVALID>", Utils.integer(Label.INVALID));
+		tokenIDToTypeMap.put("<EOT>", Utils.integer(Label.EOT));
+		tokenIDToTypeMap.put("<SEMPRED>", Utils.integer(Label.SEMPRED));
+		tokenIDToTypeMap.put("<SET>", Utils.integer(Label.SET));
+		tokenIDToTypeMap.put("<EPSILON>", Utils.integer(Label.EPSILON));
+		tokenIDToTypeMap.put("EOF", Utils.integer(Label.EOF));
+		tokenIDToTypeMap.put("<EOR>", Utils.integer(Label.EOR_TOKEN_TYPE));
+		tokenIDToTypeMap.put("DOWN", Utils.integer(Label.DOWN));
+		tokenIDToTypeMap.put("UP", Utils.integer(Label.UP));
+	}
+
+	public CompositeGrammar() {
+		initTokenSymbolTables();
+	}
+
+	public CompositeGrammar(Grammar g) {
+		this();
+		setDelegationRoot(g);
+	}
+
+	public void setDelegationRoot(Grammar root) {
+		delegateGrammarTreeRoot = new CompositeGrammarTree(root);
+		root.compositeTreeNode = delegateGrammarTreeRoot;
+	}
+
+	public Rule getRule(String ruleName) {
+		return delegateGrammarTreeRoot.getRule(ruleName);
+	}
+
+	public Object getOption(String key) {
+		return delegateGrammarTreeRoot.getOption(key);
+	}
+
+	/** Add delegate grammar as child of delegator */
+	public void addGrammar(Grammar delegator, Grammar delegate) {
+		if ( delegator.compositeTreeNode==null ) {
+			delegator.compositeTreeNode = new CompositeGrammarTree(delegator);
+		}
+		delegator.compositeTreeNode.addChild(new CompositeGrammarTree(delegate));
+
+		/*// find delegator in tree so we can add a child to it
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(delegator);
+		t.addChild();
+		*/
+		// make sure new grammar shares this composite
+		delegate.composite = this;
+	}
+
+	/** Get parent of this grammar */
+	public Grammar getDelegator(Grammar g) {
+		CompositeGrammarTree me = delegateGrammarTreeRoot.findNode(g);
+		if ( me==null ) {
+			return null; // not found
+		}
+		if ( me.parent!=null ) {
+			return me.parent.grammar;
+		}
+		return null;
+	}
+
+	/** Get list of all delegates from all grammars in the delegate subtree of g.
+	 *  The grammars are in delegation tree preorder.  Don't include g itself
+	 *  in list as it is not a delegate of itself.
+	 */
+	public List<Grammar> getDelegates(Grammar g) {
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
+		if ( t==null ) {
+			return null; // no delegates
+		}
+		List<Grammar> grammars = t.getPostOrderedGrammarList();
+		grammars.remove(grammars.size()-1); // remove g (last one)
+		return grammars;
+	}
+
+	public List<Grammar> getDirectDelegates(Grammar g) {
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
+		List<CompositeGrammarTree> children = t.children;
+		if ( children==null ) {
+			return null;
+		}
+		List<Grammar> grammars = new ArrayList();
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = (CompositeGrammarTree) children.get(i);
+			grammars.add(child.grammar);
+		}
+		return grammars;
+	}
+
+	/** Get delegates below direct delegates of g */
+	public List<Grammar> getIndirectDelegates(Grammar g) {
+		List<Grammar> direct = getDirectDelegates(g);
+		List<Grammar> delegates = getDelegates(g);
+		delegates.removeAll(direct);
+		return delegates;
+	}
+
+	/** Return list of delegate grammars from root down to g.
+	 *  Order is root, ..., g.parent.  (g not included).
+	 */
+	public List<Grammar> getDelegators(Grammar g) {
+		if ( g==delegateGrammarTreeRoot.grammar ) {
+			return null;
+		}
+		List<Grammar> grammars = new ArrayList();
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
+		// walk backwards to root, collecting grammars
+		CompositeGrammarTree p = t.parent;
+		while ( p!=null ) {
+			grammars.add(0, p.grammar); // add to head so in order later
+			p = p.parent;
+		}
+		return grammars;
+	}
+
+	/** Get set of rules for grammar g that need to have manual delegation
+	 *  methods.  This is the list of rules collected from all direct/indirect
+	 *  delegates minus rules overridden in grammar g.
+	 *
+	 *  This returns null except for the delegate root because it is the only
+	 *  one that has to have a complete grammar rule interface.  The delegates
+	 *  should not be instantiated directly for use as parsers (you can create
+	 *  them to pass to the root parser's ctor as arguments).
+	 */
+	public Set<Rule> getDelegatedRules(Grammar g) {
+		if ( g!=delegateGrammarTreeRoot.grammar ) {
+			return null;
+		}
+		Set<Rule> rules = getAllImportedRules(g);
+		for (Iterator it = rules.iterator(); it.hasNext();) {
+			Rule r = (Rule) it.next();
+			Rule localRule = g.getLocallyDefinedRule(r.name);
+			// if locally defined or it's not local but synpred, don't make
+			// a delegation method
+			if ( localRule!=null || r.isSynPred ) {
+				it.remove(); // kill overridden rules
+			}
+		}
+		return rules;
+	}
+
+	/** Get all rule definitions from all direct/indirect delegate grammars
+	 *  of g.
+	 */
+	public Set<Rule> getAllImportedRules(Grammar g) {
+		Set<String> ruleNames = new HashSet();
+		Set<Rule> rules = new HashSet();
+		CompositeGrammarTree subtreeRoot = delegateGrammarTreeRoot.findNode(g);
+		List<Grammar> grammars = subtreeRoot.getPostOrderedGrammarList();
+		// walk all grammars
+		for (int i = 0; i < grammars.size(); i++) {
+			Grammar delegate = (org.antlr.tool.Grammar) grammars.get(i);
+			// for each rule in delegate, add to rules if no rule with that
+			// name as been seen.  (can't use removeAll; wrong hashcode/equals on Rule)
+			for (Iterator it = delegate.getRules().iterator(); it.hasNext();) {
+				Rule r = (Rule)it.next();
+				if ( !ruleNames.contains(r.name) ) {
+					ruleNames.add(r.name); // track that we've seen this
+					rules.add(r);
+				}
+			}
+		}
+		return rules;
+	}
+
+	public Grammar getRootGrammar() {
+		if ( delegateGrammarTreeRoot==null ) {
+			return null;
+		}
+		return delegateGrammarTreeRoot.grammar;
+	}
+
+	public Grammar getGrammar(String grammarName) {
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(grammarName);
+		if ( t!=null ) {
+			return t.grammar;
+		}
+		return null;
+	}
+
+	// NFA spans multiple grammars, must handle here
+
+	public int getNewNFAStateNumber() {
+		return stateCounter++;
+	}
+
+	public void addState(NFAState state) {
+		numberToStateList.setSize(state.stateNumber+1); // make sure we have room
+		numberToStateList.set(state.stateNumber, state);
+	}
+
+	public NFAState getState(int s) {
+		return (NFAState)numberToStateList.get(s);
+	}
+
+	public void assignTokenTypes() throws antlr.RecognitionException {
+		// ASSIGN TOKEN TYPES for all delegates (same walker)
+		//System.out.println("### assign types");
+		AssignTokenTypesWalker ttypesWalker = new AssignTokenTypesBehavior();
+		ttypesWalker.setASTNodeClass("org.antlr.tool.GrammarAST");
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = (Grammar)grammars.get(i);
+			try {
+				//System.out.println("    walking "+g.name);
+				ttypesWalker.grammar(g.getGrammarTree(), g);
+			}
+			catch (RecognitionException re) {
+				ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+								   re);
+			}
+		}
+		// the walker has filled literals, tokens, and alias tables.
+		// now tell it to define them in the root grammar
+		ttypesWalker.defineTokens(delegateGrammarTreeRoot.grammar);
+	}
+
+	public void defineGrammarSymbols() {
+		delegateGrammarTreeRoot.trimLexerImportsIntoCombined();
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = (Grammar)grammars.get(i);
+			g.defineGrammarSymbols();
+		}
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = (Grammar)grammars.get(i);
+			g.checkNameSpaceAndActions();
+		}
+		minimizeRuleSet();
+	}
+
+	public void createNFAs() {
+		if ( ErrorManager.doNotAttemptAnalysis() ) {
+			return;
+		}
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		List<String> names = new ArrayList<String>();
+		for (int i = 0; i < grammars.size(); i++) {
+			Grammar g = (Grammar) grammars.get(i);
+			names.add(g.name);
+		}
+		//System.out.println("### createNFAs for composite; grammars: "+names);
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = (Grammar)grammars.get(i);
+			g.createRuleStartAndStopNFAStates();
+		}
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = (Grammar)grammars.get(i);
+			g.buildNFA();
+		}
+	}
+
+	public void minimizeRuleSet() {
+		Set<String> ruleDefs = new HashSet<String>();
+		_minimizeRuleSet(ruleDefs, delegateGrammarTreeRoot);
+	}
+
+	public void _minimizeRuleSet(Set<String> ruleDefs,
+								 CompositeGrammarTree p) {
+		Set<String> localRuleDefs = new HashSet<String>();
+		Set<String> overrides = new HashSet<String>();
+		// compute set of non-overridden rules for this delegate
+		for (Rule r : p.grammar.getRules()) {
+			if ( !ruleDefs.contains(r.name) ) {
+				localRuleDefs.add(r.name);
+			}
+			else if ( !r.name.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
+				// record any overridden rule 'cept tokens rule
+				overrides.add(r.name);
+			}
+		}
+		//System.out.println("rule defs for "+p.grammar.name+": "+localRuleDefs);
+		//System.out.println("overridden rule for "+p.grammar.name+": "+overrides);
+		p.grammar.overriddenRules = overrides;
+
+		// make set of all rules defined thus far walking delegation tree.
+		// the same rule in two delegates resolves in favor of first found
+		// in tree therefore second must not be included
+		ruleDefs.addAll(localRuleDefs);
+
+		// pass larger set of defined rules to delegates
+		if ( p.children!=null ) {
+			for (CompositeGrammarTree delegate : p.children) {
+				_minimizeRuleSet(ruleDefs, delegate);
+			}
+		}
+	}
+
+	/*
+	public void minimizeRuleSet() {
+		Set<Rule> refs = _minimizeRuleSet(delegateGrammarTreeRoot);
+		System.out.println("all rule refs: "+refs);
+	}
+
+	public Set<Rule> _minimizeRuleSet(CompositeGrammarTree p) {
+		Set<Rule> refs = new HashSet<Rule>();
+		for (GrammarAST refAST : p.grammar.ruleRefs) {
+			System.out.println("ref "+refAST.getText()+": "+refAST.NFAStartState+
+							   " enclosing rule: "+refAST.NFAStartState.enclosingRule+
+							   " invoking rule: "+((NFAState)refAST.NFAStartState.transition[0].target).enclosingRule);
+			refs.add(((NFAState)refAST.NFAStartState.transition[0].target).enclosingRule);
+		}
+
+		if ( p.children!=null ) {
+			for (CompositeGrammarTree delegate : p.children) {
+				Set<Rule> delegateRuleRefs = _minimizeRuleSet(delegate);
+				refs.addAll(delegateRuleRefs);
+			}
+		}
+
+		return refs;
+	}
+	*/
+
+	/*
+	public void oldminimizeRuleSet() {
+		// first walk to remove all overridden rules
+		Set<String> ruleDefs = new HashSet<String>();
+		Set<String> ruleRefs = new HashSet<String>();
+		for (GrammarAST refAST : delegateGrammarTreeRoot.grammar.ruleRefs) {
+			String rname = refAST.getText();
+			ruleRefs.add(rname);
+		}
+		_minimizeRuleSet(ruleDefs,
+						 ruleRefs,
+						 delegateGrammarTreeRoot);
+		System.out.println("overall rule defs: "+ruleDefs);
+	}
+
+	public void _minimizeRuleSet(Set<String> ruleDefs,
+								 Set<String> ruleRefs,
+								 CompositeGrammarTree p) {
+		Set<String> localRuleDefs = new HashSet<String>();
+		for (Rule r : p.grammar.getRules()) {
+			if ( !ruleDefs.contains(r.name) ) {
+				localRuleDefs.add(r.name);
+				ruleDefs.add(r.name);
+			}
+		}
+		System.out.println("rule defs for "+p.grammar.name+": "+localRuleDefs);
+
+		// remove locally-defined rules not in ref set
+		// find intersection of local rules and references from delegator
+		// that is set of rules needed by delegator
+		Set<String> localRuleDefsSatisfyingRefsFromBelow = new HashSet<String>();
+		for (String r : ruleRefs) {
+			if ( localRuleDefs.contains(r) ) {
+				localRuleDefsSatisfyingRefsFromBelow.add(r);
+			}
+		}
+
+		// now get list of refs from localRuleDefsSatisfyingRefsFromBelow.
+		// Those rules are also allowed in this delegate
+		for (GrammarAST refAST : p.grammar.ruleRefs) {
+			if ( localRuleDefsSatisfyingRefsFromBelow.contains(refAST.enclosingRuleName) ) {
+				// found rule ref within needed rule
+			}
+		}
+
+		// remove rule refs not in the new rule def set
+
+		// walk all children, adding rules not already defined
+		if ( p.children!=null ) {
+			for (CompositeGrammarTree delegate : p.children) {
+				_minimizeRuleSet(ruleDefs, ruleRefs, delegate);
+			}
+		}
+	}
+	*/
+
+	/*
+	public void trackNFAStatesThatHaveLabeledEdge(Label label,
+												  NFAState stateWithLabeledEdge)
+	{
+		Set<NFAState> states = typeToNFAStatesWithEdgeOfTypeMap.get(label);
+		if ( states==null ) {
+			states = new HashSet<NFAState>();
+			typeToNFAStatesWithEdgeOfTypeMap.put(label, states);
+		}
+		states.add(stateWithLabeledEdge);
+	}
+
+	public Map<Label, Set<NFAState>> getTypeToNFAStatesWithEdgeOfTypeMap() {
+		return typeToNFAStatesWithEdgeOfTypeMap;
+	}
+
+	public Set<NFAState> getStatesWithEdge(Label label) {
+		return typeToNFAStatesWithEdgeOfTypeMap.get(label);
+	}
+*/
+}
diff --git a/tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java b/tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java
new file mode 100644
index 0000000..0b1144b
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java
@@ -0,0 +1,155 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A tree of grammars */
+public class CompositeGrammarTree {
+	protected List<CompositeGrammarTree> children;
+	public Grammar grammar;
+
+	/** Who is the parent node of this node; if null, implies node is root */
+	public CompositeGrammarTree parent;
+
+	public CompositeGrammarTree(Grammar g) {
+		grammar = g;
+	}
+
+	public void addChild(CompositeGrammarTree t) {
+		//System.out.println("add "+t.toStringTree()+" as child to "+this.toStringTree());
+		if ( t==null ) {
+			return; // do nothing upon addChild(null)
+		}
+		if ( children==null ) {
+			children = new ArrayList<CompositeGrammarTree>();
+		}
+		children.add(t);
+		t.parent = this;
+	}
+
+	/** Find a rule by looking in current grammar then down towards the
+	 *  delegate grammars.
+	 */
+	public Rule getRule(String ruleName) {
+		Rule r = grammar.getLocallyDefinedRule(ruleName);
+		for (int i = 0; r==null && children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			r = child.getRule(ruleName);
+		}
+		return r;
+	}
+
+	/** Find an option by looking up towards the root grammar rather than down */
+	public Object getOption(String key) {
+		Object o = grammar.getLocallyDefinedOption(key);
+		if ( o!=null ) {
+			return o;
+		}
+		if ( parent!=null ) {
+			return parent.getOption(key);
+		}
+		return null; // not found
+	}
+
+	public CompositeGrammarTree findNode(Grammar g) {
+		if ( g==null ) {
+			return null;
+		}
+		if ( this.grammar == g ) {
+			return this;
+		}
+		CompositeGrammarTree n = null;
+		for (int i = 0; n==null && children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			n = child.findNode(g);
+		}
+		return n;
+	}
+
+	public CompositeGrammarTree findNode(String grammarName) {
+		if ( grammarName==null ) {
+			return null;
+		}
+		if ( grammarName.equals(this.grammar.name) ) {
+			return this;
+		}
+		CompositeGrammarTree n = null;
+		for (int i = 0; n==null && children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			n = child.findNode(grammarName);
+		}
+		return n;
+	}
+
+	/** Return a postorder list of grammars; root is last in list */
+	public List<Grammar> getPostOrderedGrammarList() {
+		List<Grammar> grammars = new ArrayList<Grammar>();
+		_getPostOrderedGrammarList(grammars);
+		return grammars;
+	}
+
+	/** work for getPostOrderedGrammarList */
+	protected void _getPostOrderedGrammarList(List<Grammar> grammars) {
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			child._getPostOrderedGrammarList(grammars);
+		}
+		grammars.add(this.grammar);
+	}
+
+	/** Return a postorder list of grammars; root is last in list */
+	public List<Grammar> getPreOrderedGrammarList() {
+		List<Grammar> grammars = new ArrayList<Grammar>();
+		_getPreOrderedGrammarList(grammars);
+		return grammars;
+	}
+
+	protected void _getPreOrderedGrammarList(List<Grammar> grammars) {
+		grammars.add(this.grammar);
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			child._getPostOrderedGrammarList(grammars);
+		}
+	}
+
+	public void trimLexerImportsIntoCombined() {
+		CompositeGrammarTree p = this;
+		if ( p.grammar.type == Grammar.LEXER && p.parent!=null &&
+			 p.parent.grammar.type == Grammar.COMBINED )
+		{
+			//System.out.println("wacking "+p.grammar.name+" from "+p.parent.grammar.name);
+			p.parent.children.remove(this);
+		}
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			child.trimLexerImportsIntoCombined();
+		}
+	}
+}
\ No newline at end of file
diff --git a/src/org/antlr/tool/DOTGenerator.java b/tool/src/main/java/org/antlr/tool/DOTGenerator.java
similarity index 83%
rename from src/org/antlr/tool/DOTGenerator.java
rename to tool/src/main/java/org/antlr/tool/DOTGenerator.java
index 1fd4f46..05a8b7d 100644
--- a/src/org/antlr/tool/DOTGenerator.java
+++ b/tool/src/main/java/org/antlr/tool/DOTGenerator.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@ import org.antlr.stringtemplate.StringTemplateGroup;
 import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
 
 import java.util.*;
+import org.antlr.grammar.v2.ANTLRParser;
 
 /** The DOT (part of graphviz) generation aspect. */
 public class DOTGenerator {
@@ -65,7 +66,10 @@ public class DOTGenerator {
      *  from startState will be included.
      */
     public String getDOT(State startState) {
-        // The output DOT graph for visualization
+		if ( startState==null ) {
+			return null;
+		}
+		// The output DOT graph for visualization
 		StringTemplate dot = null;
 		markedStates = new HashSet();
         if ( startState instanceof DFAState ) {
@@ -73,7 +77,7 @@ public class DOTGenerator {
 			dot.setAttribute("startState",
 					Utils.integer(startState.stateNumber));
 			dot.setAttribute("useBox",
-							 Boolean.valueOf(Tool.internalOption_ShowNFConfigsInDFA));
+							 Boolean.valueOf(Tool.internalOption_ShowNFAConfigsInDFA));
 			walkCreatingDFADOT(dot, (DFAState)startState);
         }
         else {
@@ -182,14 +186,14 @@ public class DOTGenerator {
         // special case: if decision point, then line up the alt start states
         // unless it's an end of block
 		if ( ((NFAState)s).isDecisionState() ) {
-			GrammarAST n = ((NFAState)s).getAssociatedASTNode();
+			GrammarAST n = ((NFAState)s).associatedASTNode;
 			if ( n!=null && n.getType()!=ANTLRParser.EOB ) {
 				StringTemplate rankST = stlib.getInstanceOf("org/antlr/tool/templates/dot/decision-rank");
 				NFAState alt = (NFAState)s;
 				while ( alt!=null ) {
 					rankST.setAttribute("states", getStateLabel(alt));
-					if ( alt.transition(1)!=null ) {
-						alt = (NFAState)alt.transition(1).target;
+					if ( alt.transition[1] !=null ) {
+						alt = (NFAState)alt.transition[1].target;
 					}
 					else {
 						alt=null;
@@ -207,16 +211,24 @@ public class DOTGenerator {
                 RuleClosureTransition rr = ((RuleClosureTransition)edge);
                 // don't jump to other rules, but display edge to follow node
                 edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/edge");
-                edgeST.setAttribute("label", "<"+grammar.getRuleName(rr.getRuleIndex())+">");
-                edgeST.setAttribute("src", getStateLabel(s));
-                edgeST.setAttribute("target", getStateLabel(rr.getFollowState()));
+				if ( rr.rule.grammar != grammar ) {
+					edgeST.setAttribute("label", "<"+rr.rule.grammar.name+"."+rr.rule.name+">");
+				}
+				else {
+					edgeST.setAttribute("label", "<"+rr.rule.name+">");
+				}
+				edgeST.setAttribute("src", getStateLabel(s));
+				edgeST.setAttribute("target", getStateLabel(rr.followState));
 				edgeST.setAttribute("arrowhead", arrowhead);
                 dot.setAttribute("edges", edgeST);
-                walkRuleNFACreatingDOT(dot, rr.getFollowState());
+				walkRuleNFACreatingDOT(dot, rr.followState);
                 continue;
             }
-			if ( edge.isEpsilon() ) {
-				edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/epsilon-edge");				
+			if ( edge.isAction() ) {
+				edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/action-edge");
+			}
+			else if ( edge.isEpsilon() ) {
+				edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/epsilon-edge");
 			}
 			else {
 				edgeST = stlib.getInstanceOf("org/antlr/tool/templates/dot/edge");
@@ -274,7 +286,9 @@ public class DOTGenerator {
 		String label = edge.label.toString(grammar);
 		label = Utils.replace(label,"\\", "\\\\");
 		label = Utils.replace(label,"\"", "\\\"");
-        if ( label.equals(Label.EPSILON_STR) ) {
+		label = Utils.replace(label,"\n", "\\\\n");
+		label = Utils.replace(label,"\r", "");
+		if ( label.equals(Label.EPSILON_STR) ) {
             label = "e";
         }
 		State target = edge.target;
@@ -303,43 +317,51 @@ public class DOTGenerator {
             StringBuffer buf = new StringBuffer(250);
 			buf.append('s');
 			buf.append(s.stateNumber);
-			if ( Tool.internalOption_ShowNFConfigsInDFA ) {
-				buf.append("\\n");
-				// separate alts
-				Set alts = ((DFAState)s).getAltSet();
-				List altList = new ArrayList();
-				altList.addAll(alts);
-				Collections.sort(altList);
-				Set configurations = ((DFAState)s).getNFAConfigurations();
-				for (int altIndex = 0; altIndex < altList.size(); altIndex++) {
-					Integer altI = (Integer) altList.get(altIndex);
-					int alt = altI.intValue();
-					if ( altIndex>0 ) {
+			if ( Tool.internalOption_ShowNFAConfigsInDFA ) {
+				if ( s instanceof DFAState ) {
+					if ( ((DFAState)s).abortedDueToRecursionOverflow ) {
 						buf.append("\\n");
+						buf.append("abortedDueToRecursionOverflow");
 					}
-					buf.append("alt");
-					buf.append(alt);
-					buf.append(':');
-					// get a list of configs for just this alt
-					// it will help us print better later
-					List configsInAlt = new ArrayList();
-					for (Iterator it = configurations.iterator(); it.hasNext();) {
-						NFAConfiguration c = (NFAConfiguration) it.next();
-						if ( c.alt!=alt ) continue;
-						configsInAlt.add(c);
-					}
-					int n = 0;
-					for (int cIndex = 0; cIndex < configsInAlt.size(); cIndex++) {
-						NFAConfiguration c =
-							(NFAConfiguration)configsInAlt.get(cIndex);
-						n++;
-						buf.append(c.toString(false));
-						if ( (cIndex+1)<configsInAlt.size() ) {
-							buf.append(", ");
-						}
-						if ( n%5==0 && (configsInAlt.size()-cIndex)>3 ) {
+				}
+				Set alts = ((DFAState)s).getAltSet();
+				if ( alts!=null ) {
+					buf.append("\\n");
+					// separate alts
+					List altList = new ArrayList();
+					altList.addAll(alts);
+					Collections.sort(altList);
+					Set configurations = ((DFAState) s).nfaConfigurations;
+					for (int altIndex = 0; altIndex < altList.size(); altIndex++) {
+						Integer altI = (Integer) altList.get(altIndex);
+						int alt = altI.intValue();
+						if ( altIndex>0 ) {
 							buf.append("\\n");
 						}
+						buf.append("alt");
+						buf.append(alt);
+						buf.append(':');
+						// get a list of configs for just this alt
+						// it will help us print better later
+						List configsInAlt = new ArrayList();
+						for (Iterator it = configurations.iterator(); it.hasNext();) {
+							NFAConfiguration c = (NFAConfiguration) it.next();
+							if ( c.alt!=alt ) continue;
+							configsInAlt.add(c);
+						}
+						int n = 0;
+						for (int cIndex = 0; cIndex < configsInAlt.size(); cIndex++) {
+							NFAConfiguration c =
+								(NFAConfiguration)configsInAlt.get(cIndex);
+							n++;
+							buf.append(c.toString(false));
+							if ( (cIndex+1)<configsInAlt.size() ) {
+								buf.append(", ");
+							}
+							if ( n%5==0 && (configsInAlt.size()-cIndex)>3 ) {
+								buf.append("\\n");
+							}
+						}
 					}
 				}
 			}
diff --git a/src/org/antlr/tool/ErrorManager.java b/tool/src/main/java/org/antlr/tool/ErrorManager.java
similarity index 91%
rename from src/org/antlr/tool/ErrorManager.java
rename to tool/src/main/java/org/antlr/tool/ErrorManager.java
index 5941864..e8f99a1 100644
--- a/src/org/antlr/tool/ErrorManager.java
+++ b/tool/src/main/java/org/antlr/tool/ErrorManager.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -32,6 +32,7 @@ import org.antlr.Tool;
 import org.antlr.misc.BitSet;
 import org.antlr.analysis.DFAState;
 import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.stringtemplate.StringTemplateErrorListener;
 import org.antlr.stringtemplate.StringTemplateGroup;
@@ -145,7 +146,7 @@ public class ErrorManager {
 	public static final int MSG_MISSING_RULE_ARGS = 129;
 	public static final int MSG_RULE_HAS_NO_ARGS = 130;
 	public static final int MSG_ARGS_ON_TOKEN_REF = 131;
-	public static final int MSG_AMBIGUOUS_RULE_SCOPE = 132;
+	public static final int MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT = 132;
 	public static final int MSG_ILLEGAL_OPTION = 133;
 	public static final int MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT = 134;
 	public static final int MSG_UNDEFINED_TOKEN_REF_IN_REWRITE = 135;
@@ -168,12 +169,25 @@ public class ErrorManager {
 	public static final int MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR = 152;
 	public static final int MSG_REWRITE_FOR_MULTI_ELEMENT_ALT = 153;
 	public static final int MSG_RULE_INVALID_SET = 154;
+	public static final int MSG_HETERO_ILLEGAL_IN_REWRITE_ALT = 155;
+	public static final int MSG_NO_SUCH_GRAMMAR_SCOPE = 156;
+	public static final int MSG_NO_SUCH_RULE_IN_SCOPE = 157;
+	public static final int MSG_TOKEN_ALIAS_CONFLICT = 158;
+	public static final int MSG_TOKEN_ALIAS_REASSIGNMENT = 159;
+	public static final int MSG_TOKEN_VOCAB_IN_DELEGATE = 160;
+	public static final int MSG_INVALID_IMPORT = 161;
+	public static final int MSG_IMPORTED_TOKENS_RULE_EMPTY = 162;
+	public static final int MSG_IMPORT_NAME_CLASH = 163;
+	public static final int MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION = 164;
+	public static final int MSG_AST_OP_IN_ALT_WITH_REWRITE = 165;
+    public static final int MSG_WILDCARD_AS_ROOT = 166;
+    public static final int MSG_CONFLICTING_OPTION_IN_TREE_FILTER = 167;
 
 
 	// GRAMMAR WARNINGS
 	public static final int MSG_GRAMMAR_NONDETERMINISM = 200; // A predicts alts 1,2
 	public static final int MSG_UNREACHABLE_ALTS = 201;       // nothing predicts alt i
-	public static final int MSG_DANGLING_STATE = 202;        // no edges out of state
+	public static final int MSG_DANGLING_STATE = 202;         // no edges out of state
 	public static final int MSG_INSUFFICIENT_PREDICATES = 203;
 	public static final int MSG_DUPLICATE_SET_ENTRY = 204;    // (A|A)
 	public static final int MSG_ANALYSIS_ABORTED = 205;
@@ -185,9 +199,13 @@ public class ErrorManager {
 	public static final int MSG_NONREGULAR_DECISION = 211;
 
 
-	public static final int MAX_MESSAGE_NUMBER = 211;
+    // Dependency sorting errors
+    //
+    public static final int MSG_CIRCULAR_DEPENDENCY = 212; // t1.g -> t2.g -> t3.g ->t1.g
 
-	/** Do not do perform analysis and code gen if one of these happens */
+	public static final int MAX_MESSAGE_NUMBER = 212;
+
+	/** Do not do perform analysis if one of these happens */
 	public static final BitSet ERRORS_FORCING_NO_ANALYSIS = new BitSet() {
 		{
 			add(MSG_RULE_REDEFINITION);
@@ -195,15 +213,25 @@ public class ErrorManager {
 			add(MSG_LEFT_RECURSION_CYCLES);
 			add(MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION);
 			add(MSG_NO_RULES);
-			// TODO: ...
+			add(MSG_NO_SUCH_GRAMMAR_SCOPE);
+			add(MSG_NO_SUCH_RULE_IN_SCOPE);
+			add(MSG_LEXER_RULES_NOT_ALLOWED);
+            add(MSG_WILDCARD_AS_ROOT);
+            add(MSG_CIRCULAR_DEPENDENCY);
+            // TODO: ...
 		}
 	};
 
-	/** Do not do perform analysis and code gen if one of these happens */
+	/** Do not do code gen if one of these happens */
 	public static final BitSet ERRORS_FORCING_NO_CODEGEN = new BitSet() {
 		{
 			add(MSG_NONREGULAR_DECISION);
+			add(MSG_RECURSION_OVERLOW);
+			add(MSG_UNREACHABLE_ALTS);
 			add(MSG_FILE_AND_GRAMMAR_NAME_DIFFER);
+			add(MSG_INVALID_IMPORT);
+			add(MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION);
+            add(MSG_CIRCULAR_DEPENDENCY);
 			// TODO: ...
 		}
 	};
@@ -244,7 +272,7 @@ public class ErrorManager {
 	/** Track the number of errors regardless of the listener but track
 	 *  per thread.
 	 */
-	private static Map threadToErrorCountMap = new HashMap();
+	private static Map threadToErrorStateMap = new HashMap();
 
 	/** Each thread has its own ptr to a Tool object, which knows how
 	 *  to panic, for example.  In a GUI, the thread might just throw an Error
@@ -508,6 +536,10 @@ public class ErrorManager {
 		threadToListenerMap.put(Thread.currentThread(), listener);
 	}
 
+    public static void removeErrorListener() {
+        threadToListenerMap.remove(Thread.currentThread());
+    }
+
 	public static void setTool(Tool tool) {
 		threadToToolMap.put(Thread.currentThread(), tool);
 	}
@@ -558,17 +590,22 @@ public class ErrorManager {
 
 	public static ErrorState getErrorState() {
 		ErrorState ec =
-			(ErrorState)threadToErrorCountMap.get(Thread.currentThread());
+			(ErrorState)threadToErrorStateMap.get(Thread.currentThread());
 		if ( ec==null ) {
 			ec = new ErrorState();
-			threadToErrorCountMap.put(Thread.currentThread(), ec);
+			threadToErrorStateMap.put(Thread.currentThread(), ec);
 		}
 		return ec;
 	}
 
+	public static int getNumErrors() {
+		return getErrorState().errors;
+	}
+
 	public static void resetErrorState() {
-		ErrorState ec = new ErrorState();
-		threadToErrorCountMap.put(Thread.currentThread(), ec);
+        threadToListenerMap = new HashMap();        
+        ErrorState ec = new ErrorState();
+		threadToErrorStateMap.put(Thread.currentThread(), ec);
 	}
 
 	public static void info(String msg) {
@@ -624,12 +661,12 @@ public class ErrorManager {
 	public static void danglingState(DecisionProbe probe,
 									 DFAState d)
 	{
-		getErrorState().warnings++;
+		getErrorState().errors++;
 		Message msg = new GrammarDanglingStateMessage(probe,d);
-		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorState().errorMsgIDs.add(msg.msgID);
 		Set seen = (Set)emitSingleError.get("danglingState");
 		if ( !seen.contains(d.dfa.decisionNumber+"|"+d.getAltSet()) ) {
-			getErrorListener().warning(msg);
+			getErrorListener().error(msg);
 			// we've seen this decision and this alt set; never again
 			seen.add(d.dfa.decisionNumber+"|"+d.getAltSet());
 		}
@@ -646,17 +683,18 @@ public class ErrorManager {
 	public static void unreachableAlts(DecisionProbe probe,
 									   List alts)
 	{
-		getErrorState().warnings++;
+		getErrorState().errors++;
 		Message msg = new GrammarUnreachableAltsMessage(probe,alts);
-		getErrorState().warningMsgIDs.add(msg.msgID);
-		getErrorListener().warning(msg);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
 	}
 
 	public static void insufficientPredicates(DecisionProbe probe,
-											  List alts)
+											  DFAState d,
+											  Map<Integer, Set<Token>> altToUncoveredLocations)
 	{
 		getErrorState().warnings++;
-		Message msg = new GrammarInsufficientPredicatesMessage(probe,alts);
+		Message msg = new GrammarInsufficientPredicatesMessage(probe,d,altToUncoveredLocations);
 		getErrorState().warningMsgIDs.add(msg.msgID);
 		getErrorListener().warning(msg);
 	}
@@ -674,11 +712,11 @@ public class ErrorManager {
 										 Collection targetRules,
 										 Collection callSiteStates)
 	{
-		getErrorState().warnings++;
+		getErrorState().errors++;
 		Message msg = new RecursionOverflowMessage(probe,sampleBadState, alt,
 										 targetRules, callSiteStates);
-		getErrorState().warningMsgIDs.add(msg.msgID);
-		getErrorListener().warning(msg);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
 	}
 
 	/*
@@ -735,7 +773,7 @@ public class ErrorManager {
 									  Object arg,
 									  Object arg2)
 	{
-		getErrorState().errors++;
+		getErrorState().warnings++;
 		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
 		getErrorState().warningMsgIDs.add(msgID);
 		getErrorListener().warning(msg);
@@ -787,7 +825,8 @@ public class ErrorManager {
 	}
 
 	public static boolean doNotAttemptCodeGen() {
-		return !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_CODEGEN).isNil();
+		return doNotAttemptAnalysis() ||
+			   !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_CODEGEN).isNil();
 	}
 
 	/** Return first non ErrorManager code location for generating messages */
diff --git a/src/org/antlr/tool/FASerializer.java b/tool/src/main/java/org/antlr/tool/FASerializer.java
similarity index 95%
rename from src/org/antlr/tool/FASerializer.java
rename to tool/src/main/java/org/antlr/tool/FASerializer.java
index 992b31d..cdff01f 100644
--- a/src/org/antlr/tool/FASerializer.java
+++ b/tool/src/main/java/org/antlr/tool/FASerializer.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -62,6 +62,9 @@ public class FASerializer {
     }
 
 	public String serialize(State s) {
+		if ( s==null ) {
+			return "<no automaton>";
+		}
 		return serialize(s, true);
 	}
 
@@ -124,7 +127,7 @@ public class FASerializer {
             // will not be found and appear to be not in graph.  Must explicitly jump
             // to it, but don't "draw" an edge.
             if ( edge instanceof RuleClosureTransition ) {
-                walkFANormalizingStateNumbers(((RuleClosureTransition)edge).getFollowState());
+				walkFANormalizingStateNumbers(((RuleClosureTransition) edge).followState);
             }
         }
     }
@@ -149,9 +152,12 @@ public class FASerializer {
             Transition edge = (Transition) s.transition(i);
             StringBuffer buf = new StringBuffer();
             buf.append(stateStr);
-            if ( edge.isEpsilon() ) {
-                buf.append("->");
-            }
+			if ( edge.isAction() ) {
+				buf.append("-{}->");
+			}
+			else if ( edge.isEpsilon() ) {
+				buf.append("->");
+			}
 			else if ( edge.isSemanticPredicate() ) {
 				buf.append("-{"+edge.label.getSemanticContext()+"}?->");
 			}
@@ -188,7 +194,7 @@ public class FASerializer {
             // will not be found and appear to be not in graph.  Must explicitly jump
             // to it, but don't "draw" an edge.
             if ( edge instanceof RuleClosureTransition ) {
-                walkSerializingFA(lines, ((RuleClosureTransition)edge).getFollowState());
+				walkSerializingFA(lines, ((RuleClosureTransition) edge).followState);
             }
         }
 
diff --git a/src/org/antlr/tool/Grammar.java b/tool/src/main/java/org/antlr/tool/Grammar.java
similarity index 52%
rename from src/org/antlr/tool/Grammar.java
rename to tool/src/main/java/org/antlr/tool/Grammar.java
index 243bb8c..268e970 100644
--- a/src/org/antlr/tool/Grammar.java
+++ b/tool/src/main/java/org/antlr/tool/Grammar.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,16 +29,19 @@ package org.antlr.tool;
 
 import antlr.RecognitionException;
 import antlr.Token;
+import antlr.TokenStreamException;
 import antlr.TokenStreamRewriteEngine;
 import antlr.TokenWithIndex;
+import org.antlr.grammar.v2.*;
+import org.antlr.grammar.v3.*;
+
+import org.antlr.misc.*;
+import org.antlr.misc.Utils;
+
 import antlr.collections.AST;
 import org.antlr.Tool;
 import org.antlr.analysis.*;
 import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.Barrier;
-import org.antlr.misc.IntSet;
-import org.antlr.misc.IntervalSet;
-import org.antlr.misc.Utils;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
 
@@ -62,12 +65,15 @@ public class Grammar {
 	public static final int TOKEN_LABEL = 2;
 	public static final int RULE_LIST_LABEL = 3;
 	public static final int TOKEN_LIST_LABEL = 4;
-	public static final int CHAR_LABEL = 5; // used in lexer for x='a'
+    public static final int CHAR_LABEL = 5; // used in lexer for x='a'
+    public static final int WILDCARD_TREE_LABEL = 6; // Used in tree grammar x=.
+    public static final int WILDCARD_TREE_LIST_LABEL = 7; // Used in tree grammar x+=.
+
 
-	public static String[] LabelTypeToString =
-		{"<invalid>", "rule", "token", "rule-list", "token-list"};
+    public static String[] LabelTypeToString =
+		{"<invalid>", "rule", "token", "rule-list", "token-list", "wildcard-tree", "wildcard-tree-list"};
 
-    public static final String ARTIFICIAL_TOKENS_RULENAME = "Tokens";
+	public static final String ARTIFICIAL_TOKENS_RULENAME = "Tokens";
 	public static final String FRAGMENT_RULE_MODIFIER = "fragment";
 
 	public static final String SYNPREDGATE_ACTION_NAME = "synpredgate";
@@ -99,8 +105,8 @@ public class Grammar {
 		ANTLRLiteralCharValueEscape['\''] = "\\'";
 	}
 
-    public static final int LEXER = 1;
-    public static final int PARSER = 2;
+	public static final int LEXER = 1;
+	public static final int PARSER = 2;
 	public static final int TREE_PARSER = 3;
 	public static final int COMBINED = 4;
 	public static final String[] grammarTypeToString = new String[] {
@@ -119,12 +125,35 @@ public class Grammar {
 		"Parser" // if combined grammar, gen Parser and Lexer will be done later
 	};
 
+	/** Set of valid imports.  E.g., can only import a tree parser into
+	 *  another tree parser.  Maps delegate to set of delegator grammar types.
+	 *  validDelegations.get(LEXER) gives list of the kinds of delegators
+	 *  that can import lexers.
+	 */
+	public static MultiMap<Integer,Integer> validDelegations =
+		new MultiMap<Integer,Integer>() {
+			{
+				map(LEXER, LEXER);
+				map(LEXER, PARSER);
+				map(LEXER, COMBINED);
+
+				map(PARSER, PARSER);
+				map(PARSER, COMBINED);
+
+				map(TREE_PARSER, TREE_PARSER);
+
+				// TODO: allow COMBINED
+				// map(COMBINED, COMBINED);
+			}
+		};
+
 	/** This is the buffer of *all* tokens found in the grammar file
 	 *  including whitespace tokens etc...  I use this to extract
 	 *  lexer rules from combined grammars.
 	 */
 	protected TokenStreamRewriteEngine tokenBuffer;
 	public static final String IGNORE_STRING_IN_GRAMMAR_FILE_NAME = "__";
+	public static final String AUTO_GENERATED_TOKEN_NAME_PREFIX = "T__";
 
 	public static class Decision {
 		public int decision;
@@ -158,22 +187,21 @@ public class Grammar {
 	/** What name did the user provide for this grammar? */
 	public String name;
 
-    /** What type of grammar is this: lexer, parser, tree walker */
-    public int type;
+	/** What type of grammar is this: lexer, parser, tree walker */
+	public int type;
 
-    /** A list of options specified at the grammar level such as language=Java.
-     *  The value can be an AST for complicated values such as character sets.
-     *  There may be code generator specific options in here.  I do no
-     *  interpretation of the key/value pairs...they are simply available for
-     *  who wants them.
-     */
-    protected Map options;
+	/** A list of options specified at the grammar level such as language=Java.
+	 *  The value can be an AST for complicated values such as character sets.
+	 *  There may be code generator specific options in here.  I do no
+	 *  interpretation of the key/value pairs...they are simply available for
+	 *  who wants them.
+	 */
+	protected Map options;
 
-	public static final Set legalOptions =
+	public static final Set legalLexerOptions =
 			new HashSet() {
 				{
 				add("language"); add("tokenVocab");
-				add("output"); add("rewrite"); add("ASTLabelType");
 				add("TokenLabelType");
 				add("superClass");
 				add("filter");
@@ -183,11 +211,38 @@ public class Grammar {
 				}
 			};
 
+	public static final Set legalParserOptions =
+			new HashSet() {
+				{
+				add("language"); add("tokenVocab");
+				add("output"); add("rewrite"); add("ASTLabelType");
+				add("TokenLabelType");
+				add("superClass");
+				add("k");
+				add("backtrack");
+				add("memoize");
+				}
+			};
+
+    public static final Set legalTreeParserOptions =
+        new HashSet() {
+            {
+                add("language"); add("tokenVocab");
+                add("output"); add("rewrite"); add("ASTLabelType");
+                add("TokenLabelType");
+                add("superClass");
+                add("k");
+                add("backtrack");
+                add("memoize");
+                add("filter");
+            }
+        };
+
 	public static final Set doNotCopyOptionsToLexer =
 		new HashSet() {
 			{
 				add("output"); add("ASTLabelType"); add("superClass");
-			 	add("k"); add("backtrack"); add("memoize"); add("rewrite");
+				add("k"); add("backtrack"); add("memoize"); add("rewrite");
 			}
 		};
 
@@ -198,6 +253,29 @@ public class Grammar {
 				}
 			};
 
+	public static final Set legalBlockOptions =
+			new HashSet() {{add("k"); add("greedy"); add("backtrack"); add("memoize");}};
+
+	/** What are the default options for a subrule? */
+	public static final Map defaultBlockOptions =
+			new HashMap() {{put("greedy","true");}};
+
+	public static final Map defaultLexerBlockOptions =
+			new HashMap() {{put("greedy","true");}};
+
+	// Token options are here to avoid contaminating Token object in runtime
+	
+	/** Legal options for terminal refs like ID<node=MyVarNode> */
+	public static final Set legalTokenOptions =
+			new HashSet() {
+				{
+				add(defaultTokenOption);
+                add("associativity");
+				}
+			};
+	
+	public static final String defaultTokenOption = "node";
+
 	/** Is there a global fixed lookahead set for this grammar?
 	 *  If 0, nothing specified.  -1 implies we have not looked at
 	 *  the options table yet to set k.
@@ -213,42 +291,29 @@ public class Grammar {
 	protected Map actions = new HashMap();
 
 	/** The NFA that represents the grammar with edges labelled with tokens
-     *  or epsilon.  It is more suitable to analysis than an AST representation.
-     */
-    protected NFA nfa;
-
-    /** Token names and literal tokens like "void" are uniquely indexed.
-     *  with -1 implying EOF.  Characters are different; they go from
-     *  -1 (EOF) to \uFFFE.  For example, 0 could be a binary byte you
-     *  want to lexer.  Labels of DFA/NFA transitions can be both tokens
-     *  and characters.  I use negative numbers for bookkeeping labels
-     *  like EPSILON. Char/String literals and token types overlap in the same
-	 *  space, however.
-     */
-    protected int maxTokenType = Label.MIN_TOKEN_TYPE-1;
+	 *  or epsilon.  It is more suitable to analysis than an AST representation.
+	 */
+	public NFA nfa;
 
-	/** TODO: hook this to the charVocabulary option */
-	protected IntSet charVocabulary = null;
+	protected NFAFactory factory;
 
-    /** Map token like ID (but not literals like "while") to its token type */
-    protected Map tokenIDToTypeMap = new HashMap();
+	/** If this grammar is part of a larger composite grammar via delegate
+	 *  statement, then this points at the composite.  The composite holds
+	 *  a global list of rules, token types, decision numbers, etc...
+	 */
+	public CompositeGrammar composite;
 
-    /** Map token literals like "while" to its token type.  It may be that
-     *  WHILE="while"=35, in which case both tokenNameToTypeMap and this
-     *  field will have entries both mapped to 35.
-     */
-    protected Map stringLiteralToTypeMap = new HashMap();
+	/** A pointer back into grammar tree.  Needed so we can add delegates. */
+	public CompositeGrammarTree compositeTreeNode;
 
-    /** Map a token type to its token name.
-	 *  Must subtract MIN_TOKEN_TYPE from index.
+	/** If this is a delegate of another grammar, this is the label used
+	 *  as an instance var by that grammar to point at this grammar. null
+	 *  if no label was specified in the delegate statement.
 	 */
-    protected Vector typeToTokenList = new Vector();
+	public String label;
 
-	/** For interpreting and testing, you sometimes want to import token
-	 *  definitions from another grammar (instead of reading token defs from
-	 *  a file).
-	 */
-	protected Grammar importTokenVocabularyFromGrammar;
+	/** TODO: hook this to the charVocabulary option */
+	protected IntSet charVocabulary = null;
 
 	/** For ANTLRWorks, we want to be able to map a line:col to a specific
 	 *  decision DFA so it can display DFA.
@@ -257,32 +322,26 @@ public class Grammar {
 
 	public Tool tool;
 
-	/** The unique set of all rule references in any rule; set of Token
+	/** The unique set of all rule references in any rule; set of tree node
 	 *  objects so two refs to same rule can exist but at different line/position.
 	 */
-	protected Set<antlr.Token> ruleRefs = new HashSet<antlr.Token>();
+	protected Set<GrammarAST> ruleRefs = new HashSet<GrammarAST>();
+
+	protected Set<GrammarAST> scopedRuleRefs = new HashSet();
 
 	/** The unique set of all token ID references in any rule */
 	protected Set<antlr.Token> tokenIDRefs = new HashSet<antlr.Token>();
 
-	/** If combined or lexer grammar, track the rules; Set<String>.
-	 * 	Track lexer rules so we can warn about undefined tokens.
- 	 */
-	protected Set<String> lexerRules = new HashSet<String>();
-
-    /** Be able to assign a number to every decision in grammar;
-     *  decisions in 1..n
-     */
-    protected int decisionNumber = 0;
-
-    /** Rules are uniquely labeled from 1..n */
-    protected int ruleIndex = 1;
+	/** Be able to assign a number to every decision in grammar;
+	 *  decisions in 1..n
+	 */
+	protected int decisionCount = 0;
 
 	/** A list of all rules that are in any left-recursive cycle.  There
 	 *  could be multiple cycles, but this is a flat list of all problematic
 	 *  rules.
 	 */
-	protected Set leftRecursiveRules;
+	protected Set<Rule> leftRecursiveRules;
 
 	/** An external tool requests that DFA analysis abort prematurely.  Stops
 	 *  at DFA granularity, which are limited to a DFA size and time computation
@@ -297,51 +356,63 @@ public class Grammar {
 	 */
 	protected LinkedHashMap nameToSynpredASTMap;
 
-	/** Map a rule to it's Rule object
+    /** At least one rule has memoize=true */
+    public boolean atLeastOneRuleMemoizes;
+
+    /** At least one backtrack=true in rule or decision or grammar. */
+    public boolean atLeastOneBacktrackOption;
+
+	/** Was this created from a COMBINED grammar? */
+	public boolean implicitLexer;
+
+	/** Map a rule to it's Rule object */
+	protected LinkedHashMap<String,Rule> nameToRuleMap = new LinkedHashMap<String,Rule>();
+
+	/** If this rule is a delegate, some rules might be overridden; don't
+	 *  want to gen code for them.
+	 */
+	public Set<String> overriddenRules = new HashSet<String>();
+
+	/** The list of all rules referenced in this grammar, not defined here,
+	 *  and defined in a delegate grammar.  Not all of these will be generated
+	 *  in the recognizer for this file; only those that are affected by rule
+	 *  definitions in this grammar.  I am not sure the Java target will need
+	 *  this but I'm leaving in case other targets need it.
+	 *  @see NameSpaceChecker.lookForReferencesToUndefinedSymbols()
+	 */
+	protected Set<Rule> delegatedRuleReferences = new HashSet();
+
+	/** The ANTLRParser tracks lexer rules when reading combined grammars
+	 *  so we can build the Tokens rule.
 	 */
-	protected LinkedHashMap nameToRuleMap = new LinkedHashMap();
+	public List<String> lexerRuleNamesInCombined = new ArrayList<String>();
 
 	/** Track the scopes defined outside of rules and the scopes associated
 	 *  with all rules (even if empty).
 	 */
 	protected Map scopes = new HashMap();
 
-	/** Map a rule index to its name; use a Vector on purpose as new
-	 *  collections stuff won't let me setSize and make it grow.  :(
-	 *  I need a specific guaranteed index, which the Collections stuff
-	 *  won't let me have.
+	/** An AST that records entire input grammar with all rules.  A simple
+	 *  grammar with one rule, "grammar t; a : A | B ;", looks like:
+	 * ( grammar t ( rule a ( BLOCK ( ALT A ) ( ALT B ) ) <end-of-rule> ) )
 	 */
-	protected Vector ruleIndexToRuleList = new Vector();
-
-    /** An AST that records entire input grammar with all rules.  A simple
-     *  grammar with one rule, "grammar t; a : A | B ;", looks like:
-     * ( grammar t ( rule a ( BLOCK ( ALT A ) ( ALT B ) ) <end-of-rule> ) )
-     */
-    protected GrammarAST grammarTree = null;
-
-    /** Each subrule/rule is a decision point and we must track them so we
-     *  can go back later and build DFA predictors for them.  This includes
-     *  all the rules, subrules, optional blocks, ()+, ()* etc...  The
-     *  elements in this list are NFAState objects.
-     */
-	protected Vector indexToDecision = new Vector(INITIAL_DECISION_LIST_SIZE);
-
-    /** If non-null, this is the code generator we will use to generate
-     *  recognizers in the target language.
-     */
-    protected CodeGenerator generator;
-
-	NameSpaceChecker nameSpaceChecker = new NameSpaceChecker(this);
+	protected GrammarAST grammarTree = null;
 
-	/** Used during LOOK to detect computation cycles */
-	protected Set lookBusy = new HashSet();
+	/** Each subrule/rule is a decision point and we must track them so we
+	 *  can go back later and build DFA predictors for them.  This includes
+	 *  all the rules, subrules, optional blocks, ()+, ()* etc...
+	 */
+	protected Vector<Decision> indexToDecision =
+		new Vector<Decision>(INITIAL_DECISION_LIST_SIZE);
 
-	/** The checkForLeftRecursion method needs to track what rules it has
-	 *  visited to track infinite recursion.
+	/** If non-null, this is the code generator we will use to generate
+	 *  recognizers in the target language.
 	 */
-	protected Set visitedDuringRecursionCheck = null;
+	protected CodeGenerator generator;
+
+	public NameSpaceChecker nameSpaceChecker = new NameSpaceChecker(this);
 
-	protected boolean watchNFAConversion = false;
+	public LL1Analyzer ll1Analyzer = new LL1Analyzer(this);
 
 	/** For merged lexer/parsers, we must construct a separate lexer spec.
 	 *  This is the template for lexer; put the literals first then the
@@ -362,6 +433,7 @@ public class Grammar {
 			"  <options:{<it.name>=<it.value>;<\\n>}>\n" +
 			"}<\\n>\n" +
 			"<endif>\n" +
+			"<if(imports)>import <imports; separator=\", \">;<endif>\n" +
 			"<actionNames,actions:{n,a|@<n> {<a>}\n}>\n" +
 			"<literals:{<it.ruleName> : <it.literal> ;\n}>\n" +
 			"<rules>",
@@ -380,9 +452,10 @@ public class Grammar {
 
 	public int numberOfSemanticPredicates = 0;
 	public int numberOfManualLookaheadOptions = 0;
-	public Set setOfNondeterministicDecisionNumbers = new HashSet();
-	public Set setOfNondeterministicDecisionNumbersResolvedWithPredicates = new HashSet();
-	public Set setOfDFAWhoseConversionTerminatedEarly = new HashSet();
+	public Set<Integer> setOfNondeterministicDecisionNumbers = new HashSet<Integer>();
+	public Set<Integer> setOfNondeterministicDecisionNumbersResolvedWithPredicates =
+		new HashSet<Integer>();
+	public Set setOfDFAWhoseAnalysisTimedOut = new HashSet();
 
 	/** Track decisions with syn preds specified for reporting.
 	 *  This is the a set of BLOCK type AST nodes.
@@ -392,7 +465,7 @@ public class Grammar {
 	/** Track decisions that actually use the syn preds in the DFA.
 	 *  Computed during NFA to DFA conversion.
 	 */
-	public Set<DFA> decisionsWhoseDFAsUsesSynPreds = new HashSet();
+	public Set<DFA> decisionsWhoseDFAsUsesSynPreds = new HashSet<DFA>();
 
 	/** Track names of preds so we can avoid generating preds that aren't used
 	 *  Computed during NFA to DFA conversion.  Just walk accept states
@@ -407,8 +480,8 @@ public class Grammar {
 	 */
 	public Set<GrammarAST> blocksWithSemPreds = new HashSet();
 
-	/** Track decisions that actually use the syn preds in the DFA. Set<DFA> */
-	public Set decisionsWhoseDFAsUsesSemPreds = new HashSet();
+	/** Track decisions that actually use the syn preds in the DFA. */
+	public Set<DFA> decisionsWhoseDFAsUsesSemPreds = new HashSet();
 
 	protected boolean allDecisionDFACreated = false;
 
@@ -422,38 +495,46 @@ public class Grammar {
 	/** Factored out the sanity checking code; delegate to it. */
 	GrammarSanity sanity = new GrammarSanity(this);
 
+	/** Create a grammar from file name.  */
+	public Grammar(Tool tool, String fileName, CompositeGrammar composite) {
+		this.composite = composite;
+		setTool(tool);
+		setFileName(fileName);
+		// ensure we have the composite set to something
+		if ( composite.delegateGrammarTreeRoot==null ) {
+			composite.setDelegationRoot(this);
+		}		
+	}
+
+	/** Useful for when you are sure that you are not part of a composite
+	 *  already.  Used in Interp/RandomPhrase and testing.
+	 */
 	public Grammar() {
-		initTokenSymbolTables();
 		builtFromString = true;
+		composite = new CompositeGrammar(this);
 	}
 
+	/** Used for testing; only useful on noncomposite grammars.*/
 	public Grammar(String grammarString)
 			throws antlr.RecognitionException, antlr.TokenStreamException
 	{
-		builtFromString = true;
-		initTokenSymbolTables();
-		setFileName("<string>");
-		setGrammarContent(new StringReader(grammarString));
+		this(null, grammarString);
 	}
 
-	public Grammar(String fileName, String grammarString)
-			throws antlr.RecognitionException, antlr.TokenStreamException
+	/** Used for testing and Interp/RandomPhrase.  Only useful on
+	 *  noncomposite grammars.
+	 */
+	public Grammar(Tool tool, String grammarString)
+		throws antlr.RecognitionException
 	{
-		this(null, fileName, new StringReader(grammarString));
-	}
-
-    /** Create a grammar from a Reader.  Parse the grammar, building a tree
-     *  and loading a symbol table of sorts here in Grammar.  Then create
-     *  an NFA and associated factory.  Walk the AST representing the grammar,
-     *  building the state clusters of the NFA.
-     */
-    public Grammar(Tool tool, String fileName, Reader r)
-            throws antlr.RecognitionException, antlr.TokenStreamException
-    {
-		initTokenSymbolTables();
+		this();
 		setTool(tool);
-		setFileName(fileName);
-		setGrammarContent(r);
+		setFileName("<string>");
+		StringReader r = new StringReader(grammarString);
+		parseAndBuildAST(r);
+		composite.assignTokenTypes();
+		defineGrammarSymbols();
+		checkNameSpaceAndActions();
 	}
 
 	public void setFileName(String fileName) {
@@ -491,17 +572,33 @@ public class Grammar {
 		this.name = name;
 	}
 
-	public void setGrammarContent(String grammarString)
-		throws antlr.RecognitionException, antlr.TokenStreamException
-	{
-		setGrammarContent(new StringReader(grammarString));
+	public void setGrammarContent(String grammarString) throws RecognitionException {
+		StringReader r = new StringReader(grammarString);
+		parseAndBuildAST(r);
+		composite.assignTokenTypes();
+		composite.defineGrammarSymbols();
 	}
 
-	public void setGrammarContent(Reader r)
-		throws antlr.RecognitionException, antlr.TokenStreamException
+	public void parseAndBuildAST()
+		throws IOException
 	{
-		ErrorManager.resetErrorState(); // reset in case > 1 grammar in same thread
+		FileReader fr = null;
+		BufferedReader br = null;
+		try {
+			fr = new FileReader(fileName);
+			br = new BufferedReader(fr);
+			parseAndBuildAST(br);
+			br.close();
+			br = null;
+		}
+		finally {
+			if ( br!=null ) {
+				br.close();
+			}
+		}
+	}
 
+	public void parseAndBuildAST(Reader r) {
 		// BUILD AST FROM GRAMMAR
 		ANTLRLexer lexer = new ANTLRLexer(r);
 		lexer.setFilename(this.getFileName());
@@ -515,13 +612,35 @@ public class Grammar {
 		tokenBuffer.discard(ANTLRParser.COMMENT);
 		tokenBuffer.discard(ANTLRParser.SL_COMMENT);
 		ANTLRParser parser = new ANTLRParser(tokenBuffer);
-		parser.getASTFactory().setASTNodeClass(GrammarAST.class);
 		parser.setFilename(this.getFileName());
-		parser.setASTNodeClass("org.antlr.tool.GrammarAST");
-		parser.grammar(this);
+		try {
+			parser.grammar(this);
+		}
+		catch (TokenStreamException tse) {
+			ErrorManager.internalError("unexpected stream error from parsing "+fileName, tse);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.internalError("unexpected parser recognition error from "+fileName, re);
+		}
+
+        dealWithTreeFilterMode(); // tree grammar and filter=true?
+
+        if ( lexer.hasASTOperator && !buildAST() ) {
+			Object value = getOption("output");
+			if ( value == null ) {
+				ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+										    this, null);
+				setOption("output", "AST", null);
+			}
+			else {
+				ErrorManager.grammarError(ErrorManager.MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION,
+										  this, null, value);
+			}
+		}
+
 		grammarTree = (GrammarAST)parser.getAST();
 		setFileName(lexer.getFilename()); // the lexer #src might change name
-		if ( grammarTree.findFirstType(ANTLRParser.RULE)==null ) {
+		if ( grammarTree==null || grammarTree.findFirstType(ANTLRParser.RULE)==null ) {
 			ErrorManager.error(ErrorManager.MSG_NO_RULES, getFileName());
 			return;
 		}
@@ -529,30 +648,54 @@ public class Grammar {
 		// Get syn pred rules and add to existing tree
 		List synpredRules =
 			getArtificialRulesForSyntacticPredicates(parser,
-												 	 nameToSynpredASTMap);
+													 nameToSynpredASTMap);
 		for (int i = 0; i < synpredRules.size(); i++) {
 			GrammarAST rAST = (GrammarAST) synpredRules.get(i);
 			grammarTree.addChild(rAST);
 		}
+	}
+
+    protected void dealWithTreeFilterMode() {
+        Object filterMode = (String)getOption("filter");
+        if ( type==TREE_PARSER && filterMode!=null && filterMode.toString().equals("true") ) {
+            // check for conflicting options
+            // filter => backtrack=true
+            // filter&&output=AST => rewrite=true
+            // filter&&output!=AST => error
+            // any deviation from valid option set is an error
+            Object backtrack = (String)getOption("backtrack");
+            Object output = getOption("output");
+            Object rewrite = getOption("rewrite");
+            if ( backtrack!=null && !backtrack.toString().equals("true") ) {
+                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
+                                   "backtrack", backtrack);
+            }
+            if ( output!=null && !output.toString().equals("AST") ) {
+                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
+                                   "output", output);
+                setOption("output", "", null);
+            }
+            if ( rewrite!=null && !rewrite.toString().equals("true") ) {
+                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
+                                   "rewrite", rewrite);
+            }
+            // set options properly
+            setOption("backtrack", "true", null);
+            if ( output!=null && output.toString().equals("AST") ) {
+                setOption("rewrite", "true", null);
+            }
+            // @synpredgate set to state.backtracking==1 by code gen when filter=true
+            // superClass set in template target::treeParser
+        }
+    }
 
+	public void defineGrammarSymbols() {
 		if ( Tool.internalOption_PrintGrammarTree ) {
 			System.out.println(grammarTree.toStringList());
 		}
 
-		// ASSIGN TOKEN TYPES
-		//System.out.println("### assign types");
-		AssignTokenTypesWalker ttypesWalker = new AssignTokenTypesWalker();
-		ttypesWalker.setASTNodeClass("org.antlr.tool.GrammarAST");
-		try {
-			ttypesWalker.grammar(grammarTree, this);
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-							   re);
-		}
-
 		// DEFINE RULES
-		//System.out.println("### define rules");
+		//System.out.println("### define "+name+" rules");
 		DefineGrammarItemsWalker defineItemsWalker = new DefineGrammarItemsWalker();
 		defineItemsWalker.setASTNodeClass("org.antlr.tool.GrammarAST");
 		try {
@@ -562,15 +705,23 @@ public class Grammar {
 			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
 							   re);
 		}
+	}
 
-		// ANALYZE ACTIONS, LOOKING FOR LABEL AND ATTR REFS
+	/** ANALYZE ACTIONS, LOOKING FOR LABEL AND ATTR REFS, sanity check */
+	public void checkNameSpaceAndActions() {
 		examineAllExecutableActions();
 		checkAllRulesForUselessLabels();
 
 		nameSpaceChecker.checkConflicts();
 	}
 
-	/** If the grammar is a merged grammar, return the text of the implicit
+	/** Many imports are illegal such as lexer into a tree grammar */
+	public boolean validImport(Grammar delegate) {
+		List<Integer> validDelegators = validDelegations.get(delegate.type);
+		return validDelegators!=null && validDelegators.contains(this.type);
+	}
+
+	/** If the grammar is a combined grammar, return the text of the implicit
 	 *  lexer grammar.
 	 */
 	public String getLexerGrammar() {
@@ -604,21 +755,41 @@ public class Grammar {
 
 	public String getImplicitlyGeneratedLexerFileName() {
 		return name+
-			IGNORE_STRING_IN_GRAMMAR_FILE_NAME +
-			LEXER_GRAMMAR_FILE_EXTENSION;
+			   IGNORE_STRING_IN_GRAMMAR_FILE_NAME +
+			   LEXER_GRAMMAR_FILE_EXTENSION;
 	}
 
-	public File getImportedVocabFileName(String vocabName) {
-		return new File(tool.getLibraryDirectory(),
-						File.separator+
-							vocabName+
-							CodeGenerator.VOCAB_FILE_EXTENSION);
+	/** Get the name of the generated recognizer; may or may not be same
+	 *  as grammar name.
+	 *  Recognizer is TParser and TLexer from T if combined, else
+	 *  just use T regardless of grammar type.
+	 */
+	public String getRecognizerName() {
+		String suffix = "";
+		List<Grammar> grammarsFromRootToMe = composite.getDelegators(this);
+		//System.out.println("grammarsFromRootToMe="+grammarsFromRootToMe);
+		String qualifiedName = name;
+		if ( grammarsFromRootToMe!=null ) {
+			StringBuffer buf = new StringBuffer();
+			for (Grammar g : grammarsFromRootToMe) {
+				buf.append(g.name);
+				buf.append('_');
+			}
+			buf.append(name);
+			qualifiedName = buf.toString();
+		}
+		if ( type==Grammar.COMBINED ||
+			 (type==Grammar.LEXER && implicitLexer) )
+		{
+			suffix = Grammar.grammarTypeToFileNameSuffix[type];
+		}
+		return qualifiedName+suffix;
 	}
 
 	/** Parse a rule we add artificially that is a list of the other lexer
-     *  rules like this: "Tokens : ID | INT | SEMI ;"  nextToken() will invoke
-     *  this to set the current token.  Add char literals before
-     *  the rule references.
+	 *  rules like this: "Tokens : ID | INT | SEMI ;"  nextToken() will invoke
+	 *  this to set the current token.  Add char literals before
+	 *  the rule references.
 	 *
 	 *  If in filter mode, we want every alt to backtrack and we need to
 	 *  do k=1 to force the "first token def wins" rule.  Otherwise, the
@@ -626,18 +797,19 @@ public class Grammar {
 	 *
 	 *  The ANTLRParser antlr.g file now invokes this when parsing a lexer
 	 *  grammar, which I think is proper even though it peeks at the info
-	 *  that later phases will compute.  It gets a list of lexer rules
+	 *  that later phases will (re)compute.  It gets a list of lexer rules
 	 *  and builds a string representing the rule; then it creates a parser
 	 *  and adds the resulting tree to the grammar's tree.
-     */
-    public GrammarAST addArtificialMatchTokensRule(GrammarAST grammarAST,
-												   List ruleNames,
+	 */
+	public GrammarAST addArtificialMatchTokensRule(GrammarAST grammarAST,
+												   List<String> ruleNames,
+												   List<String> delegateNames,
 												   boolean filterMode) {
 		StringTemplate matchTokenRuleST = null;
 		if ( filterMode ) {
 			matchTokenRuleST = new StringTemplate(
 					ARTIFICIAL_TOKENS_RULENAME+
-						" options {k=1; backtrack=true;} : <rules; separator=\"|\">;",
+					" options {k=1; backtrack=true;} : <rules; separator=\"|\">;",
 					AngleBracketTemplateLexer.class);
 		}
 		else {
@@ -651,9 +823,13 @@ public class Grammar {
 			String rname = (String) ruleNames.get(i);
 			matchTokenRuleST.setAttribute("rules", rname);
 		}
+		for (int i = 0; i < delegateNames.size(); i++) {
+			String dname = (String) delegateNames.get(i);
+			matchTokenRuleST.setAttribute("rules", dname+".Tokens");
+		}
 		//System.out.println("tokens rule: "+matchTokenRuleST.toString());
 
-        ANTLRLexer lexer = new ANTLRLexer(new StringReader(matchTokenRuleST.toString()));
+		ANTLRLexer lexer = new ANTLRLexer(new StringReader(matchTokenRuleST.toString()));
 		lexer.setTokenObjectClass("antlr.TokenWithIndex");
 		TokenStreamRewriteEngine tokbuf =
 			new TokenStreamRewriteEngine(lexer);
@@ -661,12 +837,12 @@ public class Grammar {
 		tokbuf.discard(ANTLRParser.ML_COMMENT);
 		tokbuf.discard(ANTLRParser.COMMENT);
 		tokbuf.discard(ANTLRParser.SL_COMMENT);
-        ANTLRParser parser = new ANTLRParser(tokbuf);
-		parser.grammar = this;
-		parser.gtype = ANTLRParser.LEXER_GRAMMAR;
-        parser.setASTNodeClass("org.antlr.tool.GrammarAST");
-        try {
-            parser.rule();
+		ANTLRParser parser = new ANTLRParser(tokbuf);
+		parser.setGrammar(this);
+		parser.setGtype(ANTLRParser.LEXER_GRAMMAR);
+		parser.setASTNodeClass("org.antlr.tool.GrammarAST");
+		try {
+			parser.rule();
 			if ( Tool.internalOption_PrintGrammarTree ) {
 				System.out.println("Tokens rule: "+parser.getAST().toStringTree());
 			}
@@ -675,11 +851,11 @@ public class Grammar {
 				p = (GrammarAST)p.getNextSibling();
 			}
 			p.addChild(parser.getAST());
-        }
-        catch (Exception e) {
+		}
+		catch (Exception e) {
 			ErrorManager.error(ErrorManager.MSG_ERROR_CREATING_ARTIFICIAL_RULE,
 							   e);
-        }
+		}
 		return (GrammarAST)parser.getAST();
 	}
 
@@ -708,56 +884,66 @@ public class Grammar {
 		return rules;
 	}
 
-	protected void initTokenSymbolTables() {
-        // the faux token types take first NUM_FAUX_LABELS positions
-		// then we must have room for the predefined runtime token types
-		// like DOWN/UP used for tree parsing.
-        typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE-1);
-        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, "<INVALID>");
-        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, "<EOT>");
-        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, "<SEMPRED>");
-        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
-        typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.DOWN-1, "DOWN");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.UP-1, "UP");
-        tokenIDToTypeMap.put("<INVALID>", Utils.integer(Label.INVALID));
-        tokenIDToTypeMap.put("<EOT>", Utils.integer(Label.EOT));
-        tokenIDToTypeMap.put("<SEMPRED>", Utils.integer(Label.SEMPRED));
-        tokenIDToTypeMap.put("<SET>", Utils.integer(Label.SET));
-        tokenIDToTypeMap.put("<EPSILON>", Utils.integer(Label.EPSILON));
-		tokenIDToTypeMap.put("EOF", Utils.integer(Label.EOF));
-		tokenIDToTypeMap.put("<EOR>", Utils.integer(Label.EOR_TOKEN_TYPE));
-		tokenIDToTypeMap.put("DOWN", Utils.integer(Label.DOWN));
-		tokenIDToTypeMap.put("UP", Utils.integer(Label.UP));
-    }
-
-    /** Walk the list of options, altering this Grammar object according
-     *  to any I recognize.
-    protected void processOptions() {
-        Iterator optionNames = options.keySet().iterator();
-        while (optionNames.hasNext()) {
-            String optionName = (String) optionNames.next();
-            Object value = options.get(optionName);
-            if ( optionName.equals("tokenVocab") ) {
+	/** Walk the list of options, altering this Grammar object according
+	 *  to any I recognize.
+	protected void processOptions() {
+		Iterator optionNames = options.keySet().iterator();
+		while (optionNames.hasNext()) {
+			String optionName = (String) optionNames.next();
+			Object value = options.get(optionName);
+			if ( optionName.equals("tokenVocab") ) {
 
-            }
-        }
-    }
-     */
+			}
+		}
+	}
+	 */
 
-    public void createNFAs() {
-		//System.out.println("### create NFAs");
+	/** Define all the rule begin/end NFAStates to solve forward reference
+	 *  issues.  Critical for composite grammars too.
+	 *  This is normally called on all root/delegates manually and then
+	 *  buildNFA() is called afterwards because the NFA construction needs
+	 *  to see rule start/stop states from potentially every grammar. Has
+	 *  to be have these created a priori.  Testing routines will often
+	 *  just call buildNFA(), which forces a call to this method if not
+	 *  done already. Works ONLY for single noncomposite grammars.
+	 */
+	public void createRuleStartAndStopNFAStates() {
+		//System.out.println("### createRuleStartAndStopNFAStates "+getGrammarTypeString()+" grammar "+name+" NFAs");
 		if ( nfa!=null ) {
+			return;
+		}
+		nfa = new NFA(this);
+		factory = new NFAFactory(nfa);
+
+		Collection rules = getRules();
+		for (Iterator itr = rules.iterator(); itr.hasNext();) {
+			Rule r = (Rule) itr.next();
+			String ruleName = r.name;
+			NFAState ruleBeginState = factory.newState();
+			ruleBeginState.setDescription("rule "+ruleName+" start");
+			ruleBeginState.enclosingRule = r;
+			r.startState = ruleBeginState;
+			NFAState ruleEndState = factory.newState();
+			ruleEndState.setDescription("rule "+ruleName+" end");
+			ruleEndState.setAcceptState(true);
+			ruleEndState.enclosingRule = r;
+			r.stopState = ruleEndState;
+		}
+	}
+
+	public void buildNFA() {
+		if ( nfa==null ) {
+			createRuleStartAndStopNFAStates();
+		}
+		if ( nfa.complete ) {
 			// don't let it create more than once; has side-effects
 			return;
 		}
+		//System.out.println("### build "+getGrammarTypeString()+" grammar "+name+" NFAs");
 		if ( getRules().size()==0 ) {
 			return;
 		}
-		nfa = new NFA(this); // create NFA that TreeToNFAConverter'll fill in
-		NFAFactory factory = new NFAFactory(nfa);
+
 		TreeToNFAConverter nfaBuilder = new TreeToNFAConverter(this, nfa, factory);
 		try {
 			nfaBuilder.grammar(grammarTree);
@@ -767,25 +953,39 @@ public class Grammar {
 							   name,
 							   re);
 		}
-		//System.out.println("NFA has "+factory.getNumberOfStates()+" states");
+		nfa.complete = true;
 	}
 
 	/** For each decision in this grammar, compute a single DFA using the
-     *  NFA states associated with the decision.  The DFA construction
-     *  determines whether or not the alternatives in the decision are
-     *  separable using a regular lookahead language.
-     *
-     *  Store the lookahead DFAs in the AST created from the user's grammar
-     *  so the code generator or whoever can easily access it.
-     *
-     *  This is a separate method because you might want to create a
-     *  Grammar without doing the expensive analysis.
-     */
-    public void createLookaheadDFAs() {
+	 *  NFA states associated with the decision.  The DFA construction
+	 *  determines whether or not the alternatives in the decision are
+	 *  separable using a regular lookahead language.
+	 *
+	 *  Store the lookahead DFAs in the AST created from the user's grammar
+	 *  so the code generator or whoever can easily access it.
+	 *
+	 *  This is a separate method because you might want to create a
+	 *  Grammar without doing the expensive analysis.
+	 */
+	public void createLookaheadDFAs() {
+		createLookaheadDFAs(true);
+	}
+
+	public void createLookaheadDFAs(boolean wackTempStructures) {
 		if ( nfa==null ) {
-			createNFAs();
+			buildNFA();
 		}
 
+		// CHECK FOR LEFT RECURSION; Make sure we can actually do analysis
+		checkAllRulesForLeftRecursion();
+
+		/*
+		// was there a severe problem while sniffing the grammar?
+		if ( ErrorManager.doNotAttemptAnalysis() ) {
+			return;
+		}
+		*/
+
 		long start = System.currentTimeMillis();
 
 		//System.out.println("### create DFAs");
@@ -793,8 +993,43 @@ public class Grammar {
 		if ( NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION ) {
 			for (int decision=1; decision<=numDecisions; decision++) {
 				NFAState decisionStartState = getDecisionNFAStartState(decision);
+				if ( leftRecursiveRules.contains(decisionStartState.enclosingRule) ) {
+					// don't bother to process decisions within left recursive rules.
+					if ( composite.watchNFAConversion ) {
+						System.out.println("ignoring decision "+decision+
+										   " within left-recursive rule "+decisionStartState.enclosingRule.name);
+					}
+					continue;
+				}
 				if ( !externalAnalysisAbort && decisionStartState.getNumberOfTransitions()>1 ) {
-					createLookaheadDFA(decision);
+					Rule r = decisionStartState.enclosingRule;
+					if ( r.isSynPred && !synPredNamesUsedInDFA.contains(r.name) ) {
+						continue;
+					}
+					DFA dfa = null;
+					// if k=* or k=1, try LL(1)
+					if ( getUserMaxLookahead(decision)==0 ||
+						 getUserMaxLookahead(decision)==1 )
+					{
+						dfa = createLL_1_LookaheadDFA(decision);
+					}
+					if ( dfa==null ) {
+						if ( composite.watchNFAConversion ) {
+							System.out.println("decision "+decision+
+											   " not suitable for LL(1)-optimized DFA analysis");
+						}
+						dfa = createLookaheadDFA(decision, wackTempStructures);
+					}
+					if ( dfa.startState==null ) {
+						// something went wrong; wipe out DFA
+						setLookaheadDFA(decision, null);
+					}
+					if ( Tool.internalOption_PrintDFA ) {
+						System.out.println("DFA d="+decision);
+						FASerializer serializer = new FASerializer(nfa.grammar);
+						String result = serializer.serialize(dfa.startState);
+						System.out.println(result);
+					}
 				}
 			}
 		}
@@ -829,57 +1064,242 @@ public class Grammar {
 		allDecisionDFACreated = true;
 	}
 
-	public void createLookaheadDFA(int decision) {
+	public DFA createLL_1_LookaheadDFA(int decision) {
 		Decision d = getDecision(decision);
-		String enclosingRule = d.startState.getEnclosingRule();
-		Rule r = getRule(enclosingRule);
+		String enclosingRule = d.startState.enclosingRule.name;
+		Rule r = d.startState.enclosingRule;
+		NFAState decisionStartState = getDecisionNFAStartState(decision);
+
+		if ( composite.watchNFAConversion ) {
+			System.out.println("--------------------\nattempting LL(1) DFA (d="
+							   +decisionStartState.getDecisionNumber()+") for "+
+							   decisionStartState.getDescription());
+		}
 
-		//System.out.println("createLookaheadDFA(): "+enclosingRule+" dec "+decision+"; synprednames prev used "+synPredNamesUsedInDFA);
 		if ( r.isSynPred && !synPredNamesUsedInDFA.contains(enclosingRule) ) {
-			return;
+			return null;
 		}
+
+		// compute lookahead for each alt
+		int numAlts = getNumberOfAltsForDecisionNFA(decisionStartState);
+		LookaheadSet[] altLook = new LookaheadSet[numAlts+1];
+		for (int alt = 1; alt <= numAlts; alt++) {
+			int walkAlt =
+				decisionStartState.translateDisplayAltToWalkAlt(alt);
+			NFAState altLeftEdge = getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+			NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
+			//System.out.println("alt "+alt+" start state = "+altStartState.stateNumber);
+			altLook[alt] = ll1Analyzer.LOOK(altStartState);
+			//System.out.println("alt "+alt+": "+altLook[alt].toString(this));
+		}
+
+		// compare alt i with alt j for disjointness
+		boolean decisionIsLL_1 = true;
+outer:
+		for (int i = 1; i <= numAlts; i++) {
+			for (int j = i+1; j <= numAlts; j++) {
+				/*
+				System.out.println("compare "+i+", "+j+": "+
+								   altLook[i].toString(this)+" with "+
+								   altLook[j].toString(this));
+				*/
+				LookaheadSet collision = altLook[i].intersection(altLook[j]);
+				if ( !collision.isNil() ) {
+					//System.out.println("collision (non-LL(1)): "+collision.toString(this));
+					decisionIsLL_1 = false;
+					break outer;
+				}
+			}
+		}
+
+		boolean foundConfoundingPredicate =
+			ll1Analyzer.detectConfoundingPredicates(decisionStartState);
+		if ( decisionIsLL_1 && !foundConfoundingPredicate ) {
+			// build an LL(1) optimized DFA with edge for each altLook[i]
+			if ( NFAToDFAConverter.debug ) {
+				System.out.println("decision "+decision+" is simple LL(1)");
+			}
+			DFA lookaheadDFA = new LL1DFA(decision, decisionStartState, altLook);
+			setLookaheadDFA(decision, lookaheadDFA);
+			updateLineColumnToLookaheadDFAMap(lookaheadDFA);
+			return lookaheadDFA;
+		}
+
+		// not LL(1) but perhaps we can solve with simplified predicate search
+		// even if k=1 set manually, only resolve here if we have preds; i.e.,
+		// don't resolve etc...
+
+		/*
+		SemanticContext visiblePredicates =
+			ll1Analyzer.getPredicates(decisionStartState);
+		boolean foundConfoundingPredicate =
+			ll1Analyzer.detectConfoundingPredicates(decisionStartState);
+			*/
+
+		// exit if not forced k=1 or we found a predicate situation we
+		// can't handle: predicates in rules invoked from this decision.
+		if ( getUserMaxLookahead(decision)!=1 || // not manually set to k=1
+			 !getAutoBacktrackMode(decision) ||
+			 foundConfoundingPredicate )
+		{
+			//System.out.println("trying LL(*)");
+			return null;
+		}
+
+		List<IntervalSet> edges = new ArrayList<IntervalSet>();
+		for (int i = 1; i < altLook.length; i++) {
+			LookaheadSet s = altLook[i];
+			edges.add((IntervalSet)s.tokenTypeSet);
+		}
+		List<IntervalSet> disjoint = makeEdgeSetsDisjoint(edges);
+		//System.out.println("disjoint="+disjoint);
+
+		MultiMap<IntervalSet, Integer> edgeMap = new MultiMap<IntervalSet, Integer>();
+		for (int i = 0; i < disjoint.size(); i++) {
+			IntervalSet ds = (IntervalSet) disjoint.get(i);
+			for (int alt = 1; alt < altLook.length; alt++) {
+				LookaheadSet look = altLook[alt];
+				if ( !ds.and(look.tokenTypeSet).isNil() ) {
+					edgeMap.map(ds, alt);
+				}
+			}
+		}
+		//System.out.println("edge map: "+edgeMap);
+
+		// TODO: how do we know we covered stuff?
+
+		// build an LL(1) optimized DFA with edge for each altLook[i]
+		DFA lookaheadDFA = new LL1DFA(decision, decisionStartState, edgeMap);
+		setLookaheadDFA(decision, lookaheadDFA);
+
+		// create map from line:col to decision DFA (for ANTLRWorks)
+		updateLineColumnToLookaheadDFAMap(lookaheadDFA);
+
+		return lookaheadDFA;
+	}
+
+	private void updateLineColumnToLookaheadDFAMap(DFA lookaheadDFA) {
+		GrammarAST decisionAST = nfa.grammar.getDecisionBlockAST(lookaheadDFA.decisionNumber);
+		int line = decisionAST.getLine();
+		int col = decisionAST.getColumn();
+		lineColumnToLookaheadDFAMap.put(new StringBuffer().append(line + ":")
+										.append(col).toString(), lookaheadDFA);
+	}
+
+	protected List<IntervalSet> makeEdgeSetsDisjoint(List<IntervalSet> edges) {
+		OrderedHashSet<IntervalSet> disjointSets = new OrderedHashSet<IntervalSet>();
+		// walk each incoming edge label/set and add to disjoint set
+		int numEdges = edges.size();
+		for (int e = 0; e < numEdges; e++) {
+			IntervalSet t = (IntervalSet) edges.get(e);
+			if ( disjointSets.contains(t) ) { // exact set present
+				continue;
+			}
+
+			// compare t with set i for disjointness
+			IntervalSet remainder = t; // remainder starts out as whole set to add
+			int numDisjointElements = disjointSets.size();
+			for (int i = 0; i < numDisjointElements; i++) {
+				IntervalSet s_i = (IntervalSet)disjointSets.get(i);
+
+				if ( t.and(s_i).isNil() ) { // nothing in common
+					continue;
+				}
+				//System.out.println(label+" collides with "+rl);
+
+				// For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
+				// (ignoring s_i-t if nil; don't put in list)
+
+				// Replace existing s_i with intersection since we
+				// know that will always be a non nil character class
+				IntervalSet intersection = (IntervalSet)s_i.and(t);
+				disjointSets.set(i, intersection);
+
+				// Compute s_i-t to see what is in current set and not in incoming
+				IntSet existingMinusNewElements = s_i.subtract(t);
+				//System.out.println(s_i+"-"+t+"="+existingMinusNewElements);
+				if ( !existingMinusNewElements.isNil() ) {
+					// found a new character class, add to the end (doesn't affect
+					// outer loop duration due to n computation a priori.
+					disjointSets.add(existingMinusNewElements);
+				}
+
+				// anything left to add to the reachableLabels?
+				remainder = (IntervalSet)t.subtract(s_i);
+				if ( remainder.isNil() ) {
+					break; // nothing left to add to set.  done!
+				}
+
+				t = remainder;
+			}
+			if ( !remainder.isNil() ) {
+				disjointSets.add(remainder);
+			}
+		}
+		return disjointSets.elements();
+	}
+
+	public DFA createLookaheadDFA(int decision, boolean wackTempStructures) {
+		Decision d = getDecision(decision);
+		String enclosingRule = d.startState.enclosingRule.name;
+		Rule r = d.startState.enclosingRule;
+
+		//System.out.println("createLookaheadDFA(): "+enclosingRule+" dec "+decision+"; synprednames prev used "+synPredNamesUsedInDFA);
 		NFAState decisionStartState = getDecisionNFAStartState(decision);
 		long startDFA=0,stopDFA=0;
-		if ( watchNFAConversion ) {
+		if ( composite.watchNFAConversion ) {
 			System.out.println("--------------------\nbuilding lookahead DFA (d="
 							   +decisionStartState.getDecisionNumber()+") for "+
 							   decisionStartState.getDescription());
 			startDFA = System.currentTimeMillis();
 		}
+
 		DFA lookaheadDFA = new DFA(decision, decisionStartState);
-		if ( (lookaheadDFA.analysisAborted() && // did analysis bug out?
-			 lookaheadDFA.getUserMaxLookahead()!=1) || // either k=* or k>1
-			 (lookaheadDFA.probe.isNonLLStarDecision() && // >1 alt recurses, k=*
-		      lookaheadDFA.getAutoBacktrackMode()) )
-		{
-			// set k=1 option if not already k=1 and try again
-			// clean up tracking stuff
+		// Retry to create a simpler DFA if analysis failed (non-LL(*),
+		// recursion overflow, or time out).
+		boolean failed =
+			lookaheadDFA.analysisTimedOut() ||
+			lookaheadDFA.probe.isNonLLStarDecision() ||
+			lookaheadDFA.probe.analysisOverflowed();
+		if ( failed && lookaheadDFA.okToRetryDFAWithK1() ) {
+			// set k=1 option and try again.
+			// First, clean up tracking stuff
 			decisionsWhoseDFAsUsesSynPreds.remove(lookaheadDFA);
 			// TODO: clean up synPredNamesUsedInDFA also (harder)
+			d.blockAST.setBlockOption(this, "k", Utils.integer(1));
+			if ( composite.watchNFAConversion ) {
+				System.out.print("trying decision "+decision+
+								 " again with k=1; reason: "+
+								 lookaheadDFA.getReasonForFailure());
+			}
 			lookaheadDFA = null; // make sure other memory is "free" before redoing
-			d.blockAST.setOption(this, "k", Utils.integer(1));
-			//System.out.println("trying decision "+decision+" again with k=1");
 			lookaheadDFA = new DFA(decision, decisionStartState);
-			if ( lookaheadDFA.analysisAborted() ) { // did analysis bug out?
-				ErrorManager.internalError("could not even do k=1 for decision "+decision);
-			}
 		}
+		if ( lookaheadDFA.analysisTimedOut() ) { // did analysis bug out?
+			ErrorManager.internalError("could not even do k=1 for decision "+
+									   decision+"; reason: "+
+									   lookaheadDFA.getReasonForFailure());
+		}
+
 
 		setLookaheadDFA(decision, lookaheadDFA);
 
+		if ( wackTempStructures ) {
+			for (DFAState s : lookaheadDFA.getUniqueStates().values()) {
+				s.reset();
+			}
+		}
+
 		// create map from line:col to decision DFA (for ANTLRWorks)
-		GrammarAST decisionAST = nfa.grammar.getDecisionBlockAST(lookaheadDFA.decisionNumber);
-		int line = decisionAST.getLine();
-		int col = decisionAST.getColumn();
-		lineColumnToLookaheadDFAMap.put(new StringBuffer().append(line + ":")
-										.append(col).toString(), lookaheadDFA);
+		updateLineColumnToLookaheadDFAMap(lookaheadDFA);
 
-		if ( watchNFAConversion ) {
+		if ( composite.watchNFAConversion ) {
 			stopDFA = System.currentTimeMillis();
 			System.out.println("cost: "+lookaheadDFA.getNumberOfStates()+
 							   " states, "+(int)(stopDFA-startDFA)+" ms");
 		}
 		//System.out.println("after create DFA; synPredNamesUsedInDFA="+synPredNamesUsedInDFA);
+		return lookaheadDFA;
 	}
 
 	/** Terminate DFA creation (grammar analysis).
@@ -894,44 +1314,50 @@ public class Grammar {
 
 	/** Return a new unique integer in the token type space */
 	public int getNewTokenType() {
-		maxTokenType++;
-		return maxTokenType;
+		composite.maxTokenType++;
+		return composite.maxTokenType;
 	}
 
 	/** Define a token at a particular token type value.  Blast an
-	 *  old value with a new one.  This is called directly during import vocab
-     *  operation to set up tokens with specific values.
-     */
-    public void defineToken(String text, int tokenType) {
-		if ( tokenIDToTypeMap.get(text)!=null ) {
+	 *  old value with a new one.  This is called normal grammar processsing
+	 *  and during import vocab operations to set tokens with specific values.
+	 */
+	public void defineToken(String text, int tokenType) {
+		//System.out.println("defineToken("+text+", "+tokenType+")");
+		if ( composite.tokenIDToTypeMap.get(text)!=null ) {
 			// already defined?  Must be predefined one like EOF;
 			// do nothing
 			return;
 		}
 		// the index in the typeToTokenList table is actually shifted to
 		// hold faux labels as you cannot have negative indices.
-        if ( text.charAt(0)=='\'' ) {
-            stringLiteralToTypeMap.put(text, Utils.integer(tokenType));
-        }
-        else { // must be a label like ID
-            tokenIDToTypeMap.put(text, Utils.integer(tokenType));
-        }
+		if ( text.charAt(0)=='\'' ) {
+			composite.stringLiteralToTypeMap.put(text, Utils.integer(tokenType));
+			// track in reverse index too
+			if ( tokenType>=composite.typeToStringLiteralList.size() ) {
+				composite.typeToStringLiteralList.setSize(tokenType+1);
+			}
+			composite.typeToStringLiteralList.set(tokenType, text);
+		}
+		else { // must be a label like ID
+			composite.tokenIDToTypeMap.put(text, Utils.integer(tokenType));
+		}
 		int index = Label.NUM_FAUX_LABELS+tokenType-1;
 		//System.out.println("defining "+name+" token "+text+" at type="+tokenType+", index="+index);
-		this.maxTokenType = Math.max(this.maxTokenType, tokenType);
-        if ( index>=typeToTokenList.size() ) {
-			typeToTokenList.setSize(index+1);
+		composite.maxTokenType = Math.max(composite.maxTokenType, tokenType);
+		if ( index>=composite.typeToTokenList.size() ) {
+			composite.typeToTokenList.setSize(index+1);
 		}
-		String prevToken = (String)typeToTokenList.get(index);
+		String prevToken = (String)composite.typeToTokenList.get(index);
 		if ( prevToken==null || prevToken.charAt(0)=='\'' ) {
 			// only record if nothing there before or if thing before was a literal
-			typeToTokenList.set(index, text);
+			composite.typeToTokenList.set(index, text);
 		}
-    }
+	}
 
 	/** Define a new rule.  A new rule index is created by incrementing
-     *  ruleIndex.
-     */
+	 *  ruleIndex.
+	 */
 	public void defineRule(antlr.Token ruleToken,
 						   String modifier,
 						   Map options,
@@ -940,24 +1366,33 @@ public class Grammar {
 						   int numAlts)
 	{
 		String ruleName = ruleToken.getText();
-		/*
-		System.out.println("defineRule("+ruleName+",modifier="+modifier+
-						   "): index="+ruleIndex);
-		*/
-		if ( getRule(ruleName)!=null ) {
+		if ( getLocallyDefinedRule(ruleName)!=null ) {
 			ErrorManager.grammarError(ErrorManager.MSG_RULE_REDEFINITION,
 									  this, ruleToken, ruleName);
-        }
+			return;
+		}
 
-		Rule r = new Rule(this, ruleName, ruleIndex, numAlts);
+		if ( (type==Grammar.PARSER||type==Grammar.TREE_PARSER) &&
+			 Character.isUpperCase(ruleName.charAt(0)) )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED,
+									  this, ruleToken, ruleName);
+			return;
+		}
+
+		Rule r = new Rule(this, ruleName, composite.ruleIndex, numAlts);
+		/*
+		System.out.println("defineRule("+ruleName+",modifier="+modifier+
+						   "): index="+r.index+", nalts="+numAlts);
+		*/
 		r.modifier = modifier;
-        nameToRuleMap.put(ruleName, r);
+		nameToRuleMap.put(ruleName, r);
 		setRuleAST(ruleName, tree);
 		r.setOptions(options, ruleToken);
 		r.argActionAST = argActionAST;
-        ruleIndexToRuleList.setSize(ruleIndex+1);
-        ruleIndexToRuleList.set(ruleIndex, ruleName);
-        ruleIndex++;
+		composite.ruleIndexToRuleList.setSize(composite.ruleIndex+1);
+		composite.ruleIndexToRuleList.set(composite.ruleIndex, r);
+		composite.ruleIndex++;
 		if ( ruleName.startsWith(SYNPRED_RULE_PREFIX) ) {
 			r.isSynPred = true;
 		}
@@ -972,8 +1407,9 @@ public class Grammar {
 		if ( nameToSynpredASTMap==null ) {
 			nameToSynpredASTMap = new LinkedHashMap();
 		}
-		String predName = null;
-		predName = SYNPRED_RULE_PREFIX+(nameToSynpredASTMap.size() + 1);
+		String predName =
+			SYNPRED_RULE_PREFIX+(nameToSynpredASTMap.size() + 1)+"_"+name;
+		blockAST.setTreeEnclosingRuleNameDeeply(predName);
 		nameToSynpredASTMap.put(predName, blockAST);
 		return predName;
 	}
@@ -992,11 +1428,17 @@ public class Grammar {
 	public void synPredUsedInDFA(DFA dfa, SemanticContext semCtx) {
 		decisionsWhoseDFAsUsesSynPreds.add(dfa);
 		semCtx.trackUseOfSyntacticPredicates(this); // walk ctx looking for preds
-		//System.out.println("after tracking use for dec "+dfa.decisionNumber+": "+synPredNamesUsedInDFA);
 	}
 
+	/*
+	public Set<Rule> getRuleNamesVisitedDuringLOOK() {
+		return rulesSensitiveToOtherRules;
+	}
+	*/
+
 	/** Given @scope::name {action} define it for this grammar.  Later,
-	 *  the code generator will ask for the actions table.
+	 *  the code generator will ask for the actions table.  For composite
+     *  grammars, make sure header action propogates down to all delegates.
 	 */
 	public void defineNamedAction(GrammarAST ampersandAST,
 								  String scope,
@@ -1022,7 +1464,30 @@ public class Grammar {
 		else {
 			scopeActions.put(actionName,actionAST);
 		}
-	}
+        // propogate header (regardless of scope (lexer, parser, ...) ?
+        if ( this==composite.getRootGrammar() && actionName.equals("header") ) {
+            List<Grammar> allgrammars = composite.getRootGrammar().getDelegates();
+            for (Grammar g : allgrammars) {
+                g.defineNamedAction(ampersandAST, scope, nameAST, actionAST);
+            }
+        }
+    }
+
+    public void setSynPredGateIfNotAlready(StringTemplate gateST) {
+        String scope = getDefaultActionScope(type);
+        Map actionsForGrammarScope = (Map)actions.get(scope);
+        // if no synpredgate action set by user then set
+        if ( (actionsForGrammarScope==null ||
+             !actionsForGrammarScope.containsKey(Grammar.SYNPREDGATE_ACTION_NAME)) )
+        {
+            if ( actionsForGrammarScope==null ) {
+                actionsForGrammarScope=new HashMap();
+                actions.put(scope, actionsForGrammarScope);
+            }
+            actionsForGrammarScope.put(Grammar.SYNPREDGATE_ACTION_NAME,
+                                       gateST);
+        }
+    }
 
 	public Map getActions() {
 		return actions;
@@ -1075,9 +1540,9 @@ public class Grammar {
 				buf.append("}");
 			}
 			else if ( t.getType()==ANTLRParser.SEMPRED ||
-				t.getType()==ANTLRParser.SYN_SEMPRED ||
-				t.getType()==ANTLRParser.GATED_SEMPRED ||
-				t.getType()==ANTLRParser.BACKTRACK_SEMPRED )
+					  t.getType()==ANTLRParser.SYN_SEMPRED ||
+					  t.getType()==ANTLRParser.GATED_SEMPRED ||
+					  t.getType()==ANTLRParser.BACKTRACK_SEMPRED )
 			{
 				buf.append("{");
 				buf.append(t.getText());
@@ -1095,9 +1560,11 @@ public class Grammar {
 		String ruleText = buf.toString();
 		//System.out.println("[["+ruleText+"]]");
 		// now put the rule into the lexer grammar template
-		lexerGrammarST.setAttribute("rules", ruleText);
+		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
+			lexerGrammarST.setAttribute("rules", ruleText);
+		}
 		// track this lexer rule's name
-		lexerRules.add(ruleToken.getText());
+		composite.lexerRules.add(ruleToken.getText());
 	}
 
 	/** If someone does PLUS='+' in the parser, must make sure we get
@@ -1107,39 +1574,97 @@ public class Grammar {
 													   String literal,
 													   int tokenType)
 	{
-		//System.out.println("defineLexerRuleForAliasedStringLiteral: "+literal+" "+tokenType);
-		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",
-									tokenID,
-									Utils.integer(tokenType),
-									literal);
+		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
+			//System.out.println("defineLexerRuleForAliasedStringLiteral: "+literal+" "+tokenType);
+			lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",
+										tokenID,
+										Utils.integer(tokenType),
+										literal);
+		}
 		// track this lexer rule's name
-		lexerRules.add(tokenID);
+		composite.lexerRules.add(tokenID);
 	}
 
 	public void defineLexerRuleForStringLiteral(String literal, int tokenType) {
 		//System.out.println("defineLexerRuleForStringLiteral: "+literal+" "+tokenType);
-		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",
-									computeTokenNameFromLiteral(tokenType,literal),
-									Utils.integer(tokenType),
-									literal);
+		// compute new token name like T237 and define it as having tokenType
+		String tokenID = computeTokenNameFromLiteral(tokenType,literal);
+		defineToken(tokenID, tokenType);
+		// tell implicit lexer to define a rule to match the literal
+		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
+			lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",
+										tokenID,
+										Utils.integer(tokenType),
+										literal);
+		}
+	}
+
+	public Rule getLocallyDefinedRule(String ruleName) {
+		Rule r = nameToRuleMap.get(ruleName);
+		return r;
 	}
 
 	public Rule getRule(String ruleName) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		Rule r = composite.getRule(ruleName);
+		/*
+		if ( r!=null && r.grammar != this ) {
+			System.out.println(name+".getRule("+ruleName+")="+r);
+		}
+		*/
 		return r;
 	}
 
-    public int getRuleIndex(String ruleName) {
-		Rule r = getRule(ruleName);
+	public Rule getRule(String scopeName, String ruleName) {
+		if ( scopeName!=null ) { // scope override
+			Grammar scope = composite.getGrammar(scopeName);
+			if ( scope==null ) {
+				return null;
+			}
+			return scope.getLocallyDefinedRule(ruleName);
+		}
+		return getRule(ruleName);
+	}
+
+	public int getRuleIndex(String scopeName, String ruleName) {
+		Rule r = getRule(scopeName, ruleName);
 		if ( r!=null ) {
 			return r.index;
 		}
-        return INVALID_RULE_INDEX;
-    }
+		return INVALID_RULE_INDEX;
+	}
 
-    public String getRuleName(int ruleIndex) {
-        return (String)ruleIndexToRuleList.get(ruleIndex);
-    }
+	public int getRuleIndex(String ruleName) {
+		return getRuleIndex(null, ruleName);
+	}
+
+	public String getRuleName(int ruleIndex) {
+		Rule r = composite.ruleIndexToRuleList.get(ruleIndex);
+		if ( r!=null ) {
+			return r.name;
+		}
+		return null;
+	}
+
+	/** Should codegen.g gen rule for ruleName?
+	 * 	If synpred, only gen if used in a DFA.
+	 *  If regular rule, only gen if not overridden in delegator
+	 *  Always gen Tokens rule though.
+	 */
+	public boolean generateMethodForRule(String ruleName) {
+		if ( ruleName.equals(ARTIFICIAL_TOKENS_RULENAME) ) {
+			// always generate Tokens rule to satisfy lexer interface
+			// but it may have no alternatives.
+			return true;
+		}
+		if ( overriddenRules.contains(ruleName) ) {
+			// don't generate any overridden rules
+			return false;
+		}
+		// generate if non-synpred or synpred used in a DFA
+		Rule r = getLocallyDefinedRule(ruleName);
+		return !r.isSynPred ||
+			   (r.isSynPred&&synPredNamesUsedInDFA.contains(ruleName));
+	}
 
 	public AttributeScope defineGlobalScope(String name, Token scopeAction) {
 		AttributeScope scope = new AttributeScope(this, name, scopeAction);
@@ -1178,7 +1703,7 @@ public class Grammar {
 	 *  Rule object to actually define it.
 	 */
 	protected void defineLabel(Rule r, antlr.Token label, GrammarAST element, int type) {
-        boolean err = nameSpaceChecker.checkForLabelTypeMismatch(r, label, type);
+		boolean err = nameSpaceChecker.checkForLabelTypeMismatch(r, label, type);
 		if ( err ) {
 			return;
 		}
@@ -1189,28 +1714,48 @@ public class Grammar {
 									antlr.Token label,
 									GrammarAST tokenRef)
 	{
-        Rule r = getRule(ruleName);
+		Rule r = getLocallyDefinedRule(ruleName);
 		if ( r!=null ) {
 			if ( type==LEXER &&
 				 (tokenRef.getType()==ANTLRParser.CHAR_LITERAL||
-				 tokenRef.getType()==ANTLRParser.BLOCK||
-				 tokenRef.getType()==ANTLRParser.NOT||
-				 tokenRef.getType()==ANTLRParser.CHAR_RANGE||
-				 tokenRef.getType()==ANTLRParser.WILDCARD))
+				  tokenRef.getType()==ANTLRParser.BLOCK||
+				  tokenRef.getType()==ANTLRParser.NOT||
+				  tokenRef.getType()==ANTLRParser.CHAR_RANGE||
+				  tokenRef.getType()==ANTLRParser.WILDCARD))
 			{
-				defineLabel(r, label, tokenRef, CHAR_LABEL);				
+				defineLabel(r, label, tokenRef, CHAR_LABEL);
 			}
-			else {
+            else {
 				defineLabel(r, label, tokenRef, TOKEN_LABEL);
 			}
 		}
 	}
 
-	public void defineRuleRefLabel(String ruleName,
+    public void defineWildcardTreeLabel(String ruleName,
+                                           antlr.Token label,
+                                           GrammarAST tokenRef)
+    {
+        Rule r = getLocallyDefinedRule(ruleName);
+        if ( r!=null ) {
+            defineLabel(r, label, tokenRef, WILDCARD_TREE_LABEL);
+        }
+    }
+
+    public void defineWildcardTreeListLabel(String ruleName,
+                                           antlr.Token label,
+                                           GrammarAST tokenRef)
+    {
+        Rule r = getLocallyDefinedRule(ruleName);
+        if ( r!=null ) {
+            defineLabel(r, label, tokenRef, WILDCARD_TREE_LIST_LABEL);
+        }
+    }
+
+    public void defineRuleRefLabel(String ruleName,
 								   antlr.Token label,
 								   GrammarAST ruleRef)
 	{
-		Rule r = getRule(ruleName);
+		Rule r = getLocallyDefinedRule(ruleName);
 		if ( r!=null ) {
 			defineLabel(r, label, ruleRef, RULE_LABEL);
 		}
@@ -1220,7 +1765,7 @@ public class Grammar {
 									 antlr.Token label,
 									 GrammarAST element)
 	{
-		Rule r = getRule(ruleName);
+		Rule r = getLocallyDefinedRule(ruleName);
 		if ( r!=null ) {
 			defineLabel(r, label, element, TOKEN_LIST_LABEL);
 		}
@@ -1230,7 +1775,7 @@ public class Grammar {
 									antlr.Token label,
 									GrammarAST element)
 	{
-		Rule r = getRule(ruleName);
+		Rule r = getLocallyDefinedRule(ruleName);
 		if ( r!=null ) {
 			if ( !r.getHasMultipleReturnValues() ) {
 				ErrorManager.grammarError(
@@ -1250,13 +1795,27 @@ public class Grammar {
 		for (Iterator it = rewriteElements.iterator(); it.hasNext();) {
 			GrammarAST el = (GrammarAST) it.next();
 			if ( el.getType()==ANTLRParser.LABEL ) {
-				Rule r = getRule(el.enclosingRule);
 				String labelName = el.getText();
-				LabelElementPair pair = r.getLabel(labelName);
-				// if valid label and type is what we're looking for
+				Rule enclosingRule = getLocallyDefinedRule(el.enclosingRuleName);
+				LabelElementPair pair = enclosingRule.getLabel(labelName);
+                /*
+                // if tree grammar and we have a wildcard, only notice it
+                // when looking for rule labels not token label. x=. should
+                // look like a rule ref since could be subtree.
+                if ( type==TREE_PARSER && pair!=null &&
+                     pair.elementRef.getType()==ANTLRParser.WILDCARD )
+                {
+                    if ( labelType==WILDCARD_TREE_LABEL ) {
+                        labels.add(labelName);
+                        continue;
+                    }
+                    else continue;
+                }
+                 */
+                // if valid label and type is what we're looking for
 				// and not ref to old value val $rule, add to list
 				if ( pair!=null && pair.type==labelType &&
-					 !labelName.equals(el.enclosingRule) )
+					 !labelName.equals(el.enclosingRuleName) )
 				{
 					labels.add(labelName);
 				}
@@ -1278,16 +1837,16 @@ public class Grammar {
 			List<GrammarAST> actions = r.getInlineActions();
 			for (int i = 0; i < actions.size(); i++) {
 				GrammarAST actionAST = (GrammarAST) actions.get(i);
-				ActionAnalysisLexer sniffer =
-					new ActionAnalysisLexer(this, r.name, actionAST);
+				ActionAnalysis sniffer =
+					new ActionAnalysis(this, r.name, actionAST);
 				sniffer.analyze();
 			}
 			// walk any named actions like @init, @after
 			Collection<GrammarAST> namedActions = r.getActions().values();
 			for (Iterator it2 = namedActions.iterator(); it2.hasNext();) {
 				GrammarAST actionAST = (GrammarAST) it2.next();
-				ActionAnalysisLexer sniffer =
-					new ActionAnalysisLexer(this, r.name, actionAST);
+				ActionAnalysis sniffer =
+					new ActionAnalysis(this, r.name, actionAST);
 				sniffer.analyze();
 			}
 		}
@@ -1309,10 +1868,10 @@ public class Grammar {
 		}
 	}
 
-    /** A label on a rule is useless if the rule has no return value, no
-     *  tree or template output, and it is not referenced in an action.
-     */
-    protected void removeUselessLabels(Map ruleToElementLabelPairMap) {
+	/** A label on a rule is useless if the rule has no return value, no
+	 *  tree or template output, and it is not referenced in an action.
+	 */
+	protected void removeUselessLabels(Map ruleToElementLabelPairMap) {
 		if ( ruleToElementLabelPairMap==null ) {
 			return;
 		}
@@ -1339,15 +1898,29 @@ public class Grammar {
 	 *
 	 *  This data is also used to verify that all rules have been defined.
 	 */
-	public void altReferencesRule(String ruleName, GrammarAST refAST, int outerAltNum) {
-		Rule r = getRule(ruleName);
+	public void altReferencesRule(String enclosingRuleName,
+								  GrammarAST refScopeAST,
+								  GrammarAST refAST,
+								  int outerAltNum)
+	{
+		/* Do nothing for now; not sure need; track S.x as x
+		String scope = null;
+		Grammar scopeG = null;
+		if ( refScopeAST!=null ) {
+			if ( !scopedRuleRefs.contains(refScopeAST) ) {
+				scopedRuleRefs.add(refScopeAST);
+			}
+			scope = refScopeAST.getText();
+		}
+		*/
+		Rule r = getRule(enclosingRuleName);
 		if ( r==null ) {
-			return;
+			return; // no error here; see NameSpaceChecker
 		}
 		r.trackRuleReferenceInAlt(refAST, outerAltNum);
 		antlr.Token refToken = refAST.getToken();
-		if ( !ruleRefs.contains(refToken) ) {
-			ruleRefs.add(refToken);
+		if ( !ruleRefs.contains(refAST) ) {
+			ruleRefs.add(refAST);
 		}
 	}
 
@@ -1358,7 +1931,7 @@ public class Grammar {
 	 *  Rewrite rules force tracking of all tokens.
 	 */
 	public void altReferencesTokenID(String ruleName, GrammarAST refAST, int outerAltNum) {
-		Rule r = getRule(ruleName);
+		Rule r = getLocallyDefinedRule(ruleName);
 		if ( r==null ) {
 			return;
 		}
@@ -1392,9 +1965,9 @@ public class Grammar {
 	 *  successfully on these.  Useful to skip these rules then and also
 	 *  for ANTLRWorks to highlight them.
 	 */
-	public Set getLeftRecursiveRules() {
+	public Set<Rule> getLeftRecursiveRules() {
 		if ( nfa==null ) {
-			createNFAs();
+			buildNFA();
 		}
 		if ( leftRecursiveRules!=null ) {
 			return leftRecursiveRules;
@@ -1403,11 +1976,12 @@ public class Grammar {
 		return leftRecursiveRules;
 	}
 
-	public void checkRuleReference(GrammarAST refAST,
+	public void checkRuleReference(GrammarAST scopeAST,
+								   GrammarAST refAST,
 								   GrammarAST argsAST,
 								   String currentRuleName)
 	{
-		sanity.checkRuleReference(refAST, argsAST, currentRuleName);
+		sanity.checkRuleReference(scopeAST, refAST, argsAST, currentRuleName);
 	}
 
 	/** Rules like "a : ;" and "a : {...} ;" should not generate
@@ -1428,32 +2002,41 @@ public class Grammar {
 		GrammarAST aRuleRefNode =
 			block.findFirstType(ANTLRParser.RULE_REF);
 		if ( aTokenRefNode==null&&
-			aStringLiteralRefNode==null&&
-			aCharLiteralRefNode==null&&
-			aWildcardRefNode==null&&
-			aRuleRefNode==null )
+			 aStringLiteralRefNode==null&&
+			 aCharLiteralRefNode==null&&
+			 aWildcardRefNode==null&&
+			 aRuleRefNode==null )
 		{
 			return true;
 		}
 		return false;
 	}
 
-    public int getTokenType(String tokenName) {
-        Integer I = null;
-        if ( tokenName.charAt(0)=='\'') {
-            I = (Integer)stringLiteralToTypeMap.get(tokenName);
-        }
-        else { // must be a label like ID
-            I = (Integer)tokenIDToTypeMap.get(tokenName);
-        }
-        int i = (I!=null)?I.intValue():Label.INVALID;
+	public boolean isAtomTokenType(int ttype) {
+		return ttype == ANTLRParser.WILDCARD||
+			   ttype == ANTLRParser.CHAR_LITERAL||
+			   ttype == ANTLRParser.CHAR_RANGE||
+			   ttype == ANTLRParser.STRING_LITERAL||
+			   ttype == ANTLRParser.NOT||
+			   (type != LEXER && ttype == ANTLRParser.TOKEN_REF);
+	}
+
+	public int getTokenType(String tokenName) {
+		Integer I = null;
+		if ( tokenName.charAt(0)=='\'') {
+			I = (Integer)composite.stringLiteralToTypeMap.get(tokenName);
+		}
+		else { // must be a label like ID
+			I = (Integer)composite.tokenIDToTypeMap.get(tokenName);
+		}
+		int i = (I!=null)?I.intValue():Label.INVALID;
 		//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
-        return i;
-    }
+		return i;
+	}
 
 	/** Get the list of tokens that are IDs like BLOCK and LPAREN */
 	public Set getTokenIDs() {
-		return tokenIDToTypeMap.keySet();
+		return composite.tokenIDToTypeMap.keySet();
 	}
 
 	/** Return an ordered integer list of token types that have no
@@ -1474,8 +2057,8 @@ public class Grammar {
 	/** Get a list of all token IDs and literals that have an associated
 	 *  token type.
 	 */
-	public Set getTokenDisplayNames() {
-		Set names = new HashSet();
+	public Set<String> getTokenDisplayNames() {
+		Set<String> names = new HashSet<String>();
 		for (int t =Label.MIN_TOKEN_TYPE; t <=getMaxTokenType(); t++) {
 			names.add(getTokenDisplayName(t));
 		}
@@ -1488,32 +2071,36 @@ public class Grammar {
 	 *
 	 *  11/26/2005: I changed literals to always be '...' even for strings.
 	 *  This routine still works though.
-     */
-    public static int getCharValueFromGrammarCharLiteral(String literal) {
-        if ( literal.length()==3 ) {
-			// 'x'
-            return literal.charAt(1); // no escape char
-        }
-        else if ( literal.length() == 4 )
-        {
-			// '\x'  (antlr lexer will catch invalid char)
-			int escChar = literal.charAt(2);
-			int charVal = ANTLRLiteralEscapedCharValue[escChar];
-			if ( charVal==0 ) {
-				// Unnecessary escapes like '\{' should just yield {
-				return escChar;
-			}
-			return charVal;
-        }
-        else if( literal.length() == 8 )
-        {
-        	// '\u1234'
-        	String unicodeChars = literal.substring(3,literal.length()-1);
-    		return Integer.parseInt(unicodeChars, 16);
-         }
-		ErrorManager.assertTrue(false, "invalid char literal: "+literal);
-		return -1;
-    }
+	 */
+	public static int getCharValueFromGrammarCharLiteral(String literal) {
+		switch ( literal.length() ) {
+			case 3 :
+				// 'x'
+				return literal.charAt(1); // no escape char
+			case 4 :
+				// '\x'  (antlr lexer will catch invalid char)
+				if ( Character.isDigit(literal.charAt(2)) ) {
+					ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
+									   "invalid char literal: "+literal);
+					return -1;
+				}
+				int escChar = literal.charAt(2);
+				int charVal = ANTLRLiteralEscapedCharValue[escChar];
+				if ( charVal==0 ) {
+					// Unnecessary escapes like '\{' should just yield {
+					return escChar;
+				}
+				return charVal;
+			case 8 :
+				// '\u1234'
+				String unicodeChars = literal.substring(3,literal.length()-1);
+				return Integer.parseInt(unicodeChars, 16);
+			default :
+				ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
+								   "invalid char literal: "+literal);
+				return -1;
+		}
+	}
 
 	/** ANTLR does not convert escape sequences during the parse phase because
 	 *  it could not know how to print String/char literals back out when
@@ -1546,6 +2133,11 @@ public class Grammar {
 					i+=4-1; // loop will inc by 1; only jump 3 then
 					buf.append((char)val);
 				}
+				else if ( Character.isDigit(c) ) {
+					ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
+									   "invalid char literal: "+literal);
+					buf.append("\\"+(char)c);
+				}
 				else {
 					buf.append((char)ANTLRLiteralEscapedCharValue[c]); // normal \x escape
 				}
@@ -1560,26 +2152,121 @@ public class Grammar {
 
 	/** Pull your token definitions from an existing grammar in memory.
 	 *  You must use Grammar() ctor then this method then setGrammarContent()
-	 *  to make this work.  This is useful primarily for testing and
-	 *  interpreting grammars.  Return the max token type found.
+	 *  to make this work.  This was useful primarily for testing and
+	 *  interpreting grammars until I added import grammar functionality.
+	 *  When you import a grammar you implicitly import its vocabulary as well
+	 *  and keep the same token type values.
+	 *
+	 *  Returns the max token type found.
 	 */
 	public int importTokenVocabulary(Grammar importFromGr) {
 		Set importedTokenIDs = importFromGr.getTokenIDs();
 		for (Iterator it = importedTokenIDs.iterator(); it.hasNext();) {
 			String tokenID = (String) it.next();
 			int tokenType = importFromGr.getTokenType(tokenID);
-			maxTokenType = Math.max(maxTokenType,tokenType);
+			composite.maxTokenType = Math.max(composite.maxTokenType,tokenType);
 			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
 				//System.out.println("import token from grammar "+tokenID+"="+tokenType);
 				defineToken(tokenID, tokenType);
 			}
 		}
-		return maxTokenType; // return max found
+		return composite.maxTokenType; // return max found
+	}
+
+	/** Import the rules/tokens of a delegate grammar. All delegate grammars are
+	 *  read during the ctor of first Grammar created.
+	 *
+	 *  Do not create NFA here because NFA construction needs to hook up with
+	 *  overridden rules in delegation root grammar.
+	 */
+	public void importGrammar(GrammarAST grammarNameAST, String label) {
+		String grammarName = grammarNameAST.getText();
+		//System.out.println("import "+gfile.getName());
+		String gname = grammarName + GRAMMAR_FILE_EXTENSION;
+		BufferedReader br = null;
+		try {
+			String fullName = tool.getLibraryFile(gname);
+			FileReader fr = new FileReader(fullName);
+			br = new BufferedReader(fr);
+			Grammar delegateGrammar = null;
+			delegateGrammar = new Grammar(tool, gname, composite);
+			delegateGrammar.label = label;
+
+			addDelegateGrammar(delegateGrammar);
+
+			delegateGrammar.parseAndBuildAST(br);
+			if ( !validImport(delegateGrammar) ) {
+				ErrorManager.grammarError(ErrorManager.MSG_INVALID_IMPORT,
+										  this,
+										  grammarNameAST.token,
+										  this,
+										  delegateGrammar);
+				return;
+			}
+			if ( this.type==COMBINED &&
+				 (delegateGrammar.name.equals(this.name+grammarTypeToFileNameSuffix[LEXER])||
+				  delegateGrammar.name.equals(this.name+grammarTypeToFileNameSuffix[PARSER])) )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_IMPORT_NAME_CLASH,
+										  this,
+										  grammarNameAST.token,
+										  this,
+										  delegateGrammar);
+				return;
+			}
+			if ( delegateGrammar.grammarTree!=null ) {
+				// we have a valid grammar
+				// deal with combined grammars
+				if ( delegateGrammar.type == LEXER && this.type == COMBINED ) {
+					// ooops, we wasted some effort; tell lexer to read it in
+					// later
+					lexerGrammarST.setAttribute("imports", grammarName);
+					// but, this parser grammar will need the vocab
+					// so add to composite anyway so we suck in the tokens later
+				}
+			}
+			//System.out.println("Got grammar:\n"+delegateGrammar);
+		}
+		catch (IOException ioe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
+							   gname,
+							   ioe);
+		}
+		finally {
+			if ( br!=null ) {
+				try {
+					br.close();
+				}
+				catch (IOException ioe) {
+					ErrorManager.error(ErrorManager.MSG_CANNOT_CLOSE_FILE,
+									   gname,
+									   ioe);
+				}
+			}
+		}
+	}
+
+	/** add new delegate to composite tree */
+	protected void addDelegateGrammar(Grammar delegateGrammar) {
+		CompositeGrammarTree t = composite.delegateGrammarTreeRoot.findNode(this);
+		t.addChild(new CompositeGrammarTree(delegateGrammar));
+		// make sure new grammar shares this composite
+		delegateGrammar.composite = this.composite;
 	}
 
 	/** Load a vocab file <vocabName>.tokens and return max token type found. */
-	public int importTokenVocabulary(String vocabName) {
-		File fullFile = getImportedVocabFileName(vocabName);
+	public int importTokenVocabulary(GrammarAST tokenVocabOptionAST,
+									 String vocabName)
+	{
+		if ( !getGrammarIsRoot() ) {
+			ErrorManager.grammarWarning(ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE,
+										this,
+										tokenVocabOptionAST.token,
+										name);
+			return composite.maxTokenType;
+		}
+
+		File fullFile = tool.getImportedVocabFile(vocabName);
 		try {
 			FileReader fr = new FileReader(fullFile);
 			BufferedReader br = new BufferedReader(fr);
@@ -1632,7 +2319,7 @@ public class Grammar {
 				int tokenType = (int)tokenizer.nval;
 				token = tokenizer.nextToken();
 				//System.out.println("import "+tokenID+"="+tokenType);
-				maxTokenType = Math.max(maxTokenType,tokenType);
+				composite.maxTokenType = Math.max(composite.maxTokenType,tokenType);
 				defineToken(tokenID, tokenType);
 				lineNum++;
 				if ( token != StreamTokenizer.TT_EOL ) {
@@ -1661,7 +2348,7 @@ public class Grammar {
 							   fullFile,
 							   e);
 		}
-		return maxTokenType;
+		return composite.maxTokenType;
 	}
 
 	/** Given a token type, get a meaningful name for it such as the ID
@@ -1679,42 +2366,42 @@ public class Grammar {
 		}
 		// faux label?
 		else if ( ttype<0 ) {
-			tokenName = (String)typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype);
+			tokenName = (String)composite.typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype);
 		}
 		else {
 			// compute index in typeToTokenList for ttype
 			index = ttype-1; // normalize to 0..n-1
 			index += Label.NUM_FAUX_LABELS;     // jump over faux tokens
 
-			if ( index<typeToTokenList.size() ) {
-				tokenName = (String)typeToTokenList.get(index);
+			if ( index<composite.typeToTokenList.size() ) {
+				tokenName = (String)composite.typeToTokenList.get(index);
+				if ( tokenName!=null &&
+					 tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) )
+				{
+					tokenName = composite.typeToStringLiteralList.get(ttype);
+				}
 			}
 			else {
 				tokenName = String.valueOf(ttype);
 			}
 		}
-		//System.out.println("getTokenDisplaYanme ttype="+ttype+", index="+index+", name="+tokenName);
+		//System.out.println("getTokenDisplayName ttype="+ttype+", index="+index+", name="+tokenName);
 		return tokenName;
 	}
 
 	/** Get the list of ANTLR String literals */
-	public Set getStringLiterals() {
-		return stringLiteralToTypeMap.keySet();
+	public Set<String> getStringLiterals() {
+		return composite.stringLiteralToTypeMap.keySet();
+	}
+
+	public String getGrammarTypeString() {
+		return grammarTypeToString[type];
 	}
 
 	public int getGrammarMaxLookahead() {
 		if ( global_k>=0 ) {
 			return global_k;
 		}
-		/*
-		Integer kI = (Integer)getOption("k");
-		if ( kI!=null ) {
-			global_k = kI.intValue();
-		}
-		else {
-			global_k = 0;
-		}
-		*/
 		Object k = getOption("k");
 		if ( k==null ) {
 			global_k = 0;
@@ -1735,8 +2422,8 @@ public class Grammar {
 	/** Save the option key/value pair and process it; return the key
 	 *  or null if invalid option.
 	 */
-    public String setOption(String key, Object value, antlr.Token optionsStartToken) {
-		if ( !legalOptions.contains(key) ) {
+	public String setOption(String key, Object value, antlr.Token optionsStartToken) {
+		if ( legalOption(key) ) {
 			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
 									  this,
 									  optionsStartToken,
@@ -1746,30 +2433,50 @@ public class Grammar {
 		if ( !optionIsValid(key, value) ) {
 			return null;
 		}
-		if ( options==null ) {
+        if ( key.equals("backtrack") && value.toString().equals("true") ) {
+            composite.getRootGrammar().atLeastOneBacktrackOption = true;
+        }
+        if ( options==null ) {
 			options = new HashMap();
 		}
 		options.put(key, value);
 		return key;
-    }
+	}
+
+	public boolean legalOption(String key) {
+		switch ( type ) {
+			case LEXER :
+				return !legalLexerOptions.contains(key);
+			case PARSER :
+				return !legalParserOptions.contains(key);
+			case TREE_PARSER :
+				return !legalTreeParserOptions.contains(key);
+			default :
+				return !legalParserOptions.contains(key);
+		}
+	}
 
-    public void setOptions(Map options, antlr.Token optionsStartToken) {
+	public void setOptions(Map options, antlr.Token optionsStartToken) {
 		if ( options==null ) {
 			this.options = null;
 			return;
 		}
-        Set keys = options.keySet();
-        for (Iterator it = keys.iterator(); it.hasNext();) {
-            String optionName = (String) it.next();
-            Object optionValue = options.get(optionName);
-            String stored=setOption(optionName, optionValue, optionsStartToken);
+		Set keys = options.keySet();
+		for (Iterator it = keys.iterator(); it.hasNext();) {
+			String optionName = (String) it.next();
+			Object optionValue = options.get(optionName);
+			String stored=setOption(optionName, optionValue, optionsStartToken);
 			if ( stored==null ) {
 				it.remove();
 			}
-        }
-    }
+		}
+	}
 
-    public Object getOption(String key) {
+	public Object getOption(String key) {
+		return composite.getOption(key);
+	}
+
+	public Object getLocallyDefinedOption(String key) {
 		Object value = null;
 		if ( options!=null ) {
 			value = options.get(key);
@@ -1778,7 +2485,50 @@ public class Grammar {
 			value = defaultOptions.get(key);
 		}
 		return value;
-    }
+	}
+
+	public Object getBlockOption(GrammarAST blockAST, String key) {
+		String v = (String)blockAST.getBlockOption(key);
+		if ( v!=null ) {
+			return v;
+		}
+		if ( type==Grammar.LEXER ) {
+			return defaultLexerBlockOptions.get(key);
+		}
+		return defaultBlockOptions.get(key);
+	}
+
+	public int getUserMaxLookahead(int decision) {
+		int user_k = 0;
+		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decision);
+		Object k = blockAST.getBlockOption("k");
+		if ( k==null ) {
+			user_k = nfa.grammar.getGrammarMaxLookahead();
+			return user_k;
+		}
+		if (k instanceof Integer) {
+			Integer kI = (Integer)k;
+			user_k = kI.intValue();
+		}
+		else {
+			// must be String "*"
+			if ( k.equals("*") ) {
+				user_k = 0;
+			}
+		}
+		return user_k;
+	}
+
+	public boolean getAutoBacktrackMode(int decision) {
+		NFAState decisionNFAStartState = getDecisionNFAStartState(decision);
+		String autoBacktrack =
+			(String)getBlockOption(decisionNFAStartState.associatedASTNode, "backtrack");
+		
+		if ( autoBacktrack==null ) {
+			autoBacktrack = (String)nfa.grammar.getOption("backtrack");
+		}
+		return autoBacktrack!=null&&autoBacktrack.equals("true");
+	}
 
 	public boolean optionIsValid(String key, Object value) {
 		return true;
@@ -1787,7 +2537,15 @@ public class Grammar {
 	public boolean buildAST() {
 		String outputType = (String)getOption("output");
 		if ( outputType!=null ) {
-			return outputType.equals("AST");
+			return outputType.toString().equals("AST");
+		}
+		return false;
+	}
+
+	public boolean rewriteMode() {
+		Object outputType = getOption("rewrite");
+		if ( outputType!=null ) {
+			return outputType.toString().equals("true");
 		}
 		return false;
 	}
@@ -1799,66 +2557,132 @@ public class Grammar {
 	public boolean buildTemplate() {
 		String outputType = (String)getOption("output");
 		if ( outputType!=null ) {
-			return outputType.equals("template");
+			return outputType.toString().equals("template");
 		}
 		return false;
 	}
 
-    public Collection getRules() {
-        return nameToRuleMap.values();
-    }
+	public Collection<Rule> getRules() {
+		return nameToRuleMap.values();
+	}
 
-	public void setRuleAST(String ruleName, GrammarAST t) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
-		if ( r!=null ) {
-			r.tree = t;
-			r.EORNode = t.getLastChild();
-		}
+	/** Get the set of Rules that need to have manual delegations
+	 *  like "void rule() { importedGrammar.rule(); }"
+	 *
+	 *  If this grammar is master, get list of all rule definitions from all
+	 *  delegate grammars.  Only master has complete interface from combined
+	 *  grammars...we will generated delegates as helper objects.
+	 *
+	 *  Composite grammars that are not the root/master do not have complete
+	 *  interfaces.  It is not my intention that people use subcomposites.
+	 *  Only the outermost grammar should be used from outside code.  The
+	 *  other grammar components are specifically generated to work only
+	 *  with the master/root. 
+	 *
+	 *  delegatedRules = imported - overridden
+	 */
+	public Set<Rule> getDelegatedRules() {
+		return composite.getDelegatedRules(this);
 	}
 
-    public void setRuleStartState(String ruleName, NFAState startState) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
-		if ( r!=null ) {
-	        r.startState = startState;
+	/** Get set of all rules imported from all delegate grammars even if
+	 *  indirectly delegated.
+	 */
+	public Set<Rule> getAllImportedRules() {
+		return composite.getAllImportedRules(this);
+	}
+
+	/** Get list of all delegates from all grammars directly or indirectly
+	 *  imported into this grammar.
+	 */
+	public List<Grammar> getDelegates() {
+		return composite.getDelegates(this);
+	}
+
+	public List<String> getDelegateNames() {
+		// compute delegates:{Grammar g | return g.name;}
+		List<String> names = new ArrayList<String>();
+		List<Grammar> delegates = composite.getDelegates(this);
+		if ( delegates!=null ) {
+			for (Grammar g : delegates) {
+				names.add(g.name);
+			}
 		}
-    }
+		return names;
+	}
+
+	public List<Grammar> getDirectDelegates() {
+		return composite.getDirectDelegates(this);
+	}
+	
+	/** Get delegates below direct delegates */
+	public List<Grammar> getIndirectDelegates() {
+		return composite.getIndirectDelegates(this);
+	}
+
+	/** Get list of all delegators.  This amounts to the grammars on the path
+	 *  to the root of the delegation tree.
+	 */
+	public List<Grammar> getDelegators() {
+		return composite.getDelegators(this);
+	}
+
+	/** Who's my direct parent grammar? */
+	public Grammar getDelegator() {
+		return composite.getDelegator(this);
+	}
+
+	public Set<Rule> getDelegatedRuleReferences() {
+		return delegatedRuleReferences;
+	}
+
+	public boolean getGrammarIsRoot() {
+		return composite.delegateGrammarTreeRoot.grammar == this;
+	}
 
-    public void setRuleStopState(String ruleName, NFAState stopState) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
+	public void setRuleAST(String ruleName, GrammarAST t) {
+		Rule r = getLocallyDefinedRule(ruleName);
 		if ( r!=null ) {
-	        r.stopState = stopState;
+			r.tree = t;
+			r.EORNode = t.getLastChild();
 		}
-    }
+	}
 
 	public NFAState getRuleStartState(String ruleName) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		return getRuleStartState(null, ruleName);
+	}
+
+	public NFAState getRuleStartState(String scopeName, String ruleName) {
+		Rule r = getRule(scopeName, ruleName);
 		if ( r!=null ) {
+			//System.out.println("getRuleStartState("+scopeName+", "+ruleName+")="+r.startState);
 			return r.startState;
 		}
+		//System.out.println("getRuleStartState("+scopeName+", "+ruleName+")=null");
 		return null;
 	}
 
 	public String getRuleModifier(String ruleName) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
+		Rule r = getRule(ruleName);
 		if ( r!=null ) {
 			return r.modifier;
 		}
 		return null;
 	}
 
-    public NFAState getRuleStopState(String ruleName) {
-		Rule r = (Rule)nameToRuleMap.get(ruleName);
+	public NFAState getRuleStopState(String ruleName) {
+		Rule r = getRule(ruleName);
 		if ( r!=null ) {
 			return r.stopState;
 		}
 		return null;
-    }
+	}
 
-    public int assignDecisionNumber(NFAState state) {
-        decisionNumber++;
-        state.setDecisionNumber(decisionNumber);
-        return decisionNumber;
-    }
+	public int assignDecisionNumber(NFAState state) {
+		decisionCount++;
+		state.setDecisionNumber(decisionCount);
+		return decisionCount;
+	}
 
 	protected Decision getDecision(int decision) {
 		int index = decision-1;
@@ -1876,27 +2700,27 @@ public class Grammar {
 		}
 		Decision d = new Decision();
 		d.decision = decision;
-        indexToDecision.setSize(getNumberOfDecisions());
-        indexToDecision.set(index, d);
+		indexToDecision.setSize(getNumberOfDecisions());
+		indexToDecision.set(index, d);
 		return d;
 	}
 
-    public List getDecisionNFAStartStateList() {
+	public List getDecisionNFAStartStateList() {
 		List states = new ArrayList(100);
 		for (int d = 0; d < indexToDecision.size(); d++) {
-			Decision dec = (Decision) indexToDecision.elementAt(d);
+			Decision dec = (Decision) indexToDecision.get(d);
 			states.add(dec.startState);
 		}
-        return states;
-    }
+		return states;
+	}
 
-    public NFAState getDecisionNFAStartState(int decision) {
-        Decision d = getDecision(decision);
+	public NFAState getDecisionNFAStartState(int decision) {
+		Decision d = getDecision(decision);
 		if ( d==null ) {
 			return null;
 		}
 		return d.startState;
-    }
+	}
 
 	public DFA getLookaheadDFA(int decision) {
 		Decision d = getDecision(decision);
@@ -1972,7 +2796,7 @@ public class Grammar {
     */
 
 	public int getNumberOfDecisions() {
-		return decisionNumber;
+		return decisionCount;
 	}
 
 	public int getNumberOfCyclicDecisions() {
@@ -1999,7 +2823,7 @@ public class Grammar {
 	public void setLookaheadDFA(int decision, DFA lookaheadDFA) {
 		Decision d = createDecision(decision);
 		d.dfa = lookaheadDFA;
-		GrammarAST ast = d.startState.getAssociatedASTNode();
+		GrammarAST ast = d.startState.associatedASTNode;
 		ast.setLookaheadDFA(lookaheadDFA);
 	}
 
@@ -2019,9 +2843,9 @@ public class Grammar {
 	}
 
 	/** How many token types have been allocated so far? */
-    public int getMaxTokenType() {
-        return maxTokenType;
-    }
+	public int getMaxTokenType() {
+		return composite.maxTokenType;
+	}
 
 	/** What is the max char value possible for this grammar's target?  Use
 	 *  unicode max if no target defined.
@@ -2072,7 +2896,7 @@ public class Grammar {
 			return '\''+ANTLRLiteralCharValueEscape[c]+'\'';
 		}
 		if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN &&
-			!Character.isISOControl((char)c) ) {
+			 !Character.isISOControl((char)c) ) {
 			if ( c=='\\' ) {
 				return "'\\\\'";
 			}
@@ -2088,23 +2912,23 @@ public class Grammar {
 		return unicodeStr;
 	}
 
-    /** For lexer grammars, return everything in unicode not in set.
-     *  For parser and tree grammars, return everything in token space
-     *  from MIN_TOKEN_TYPE to last valid token type or char value.
-     */
-    public IntSet complement(IntSet set) {
-        //System.out.println("complement "+set.toString(this));
-        //System.out.println("vocabulary "+getTokenTypes().toString(this));
-        IntSet c = set.complement(getTokenTypes());
-        //System.out.println("result="+c.toString(this));
-        return c;
-    }
+	/** For lexer grammars, return everything in unicode not in set.
+	 *  For parser and tree grammars, return everything in token space
+	 *  from MIN_TOKEN_TYPE to last valid token type or char value.
+	 */
+	public IntSet complement(IntSet set) {
+		//System.out.println("complement "+set.toString(this));
+		//System.out.println("vocabulary "+getTokenTypes().toString(this));
+		IntSet c = set.complement(getTokenTypes());
+		//System.out.println("result="+c.toString(this));
+		return c;
+	}
 
-    public IntSet complement(int atom) {
-        return complement(IntervalSet.of(atom));
-    }
+	public IntSet complement(int atom) {
+		return complement(IntervalSet.of(atom));
+	}
 
-	/** Given set tree like ( SET A B ) in lexer, check that A and B
+	/** Given set tree like ( SET A B ), check that A and B
 	 *  are both valid sets themselves, else we must tree like a BLOCK
 	 */
 	public boolean isValidSet(TreeToNFAConverter nfabuilder, GrammarAST t) {
@@ -2139,31 +2963,31 @@ public class Grammar {
 		}
 		IntSet elements = null;
 		//System.out.println("parsed tree: "+r.tree.toStringTree());
-	    elements = nfabuilder.setRule(r.tree);
+		elements = nfabuilder.setRule(r.tree);
 		//System.out.println("elements="+elements);
 		return elements;
 	}
 
 	/** Decisions are linked together with transition(1).  Count how
-     *  many there are.  This is here rather than in NFAState because
-     *  a grammar decides how NFAs are put together to form a decision.
-     */
-    public int getNumberOfAltsForDecisionNFA(NFAState decisionState) {
-        if ( decisionState==null ) {
-            return 0;
-        }
-        int n = 1;
-        NFAState p = decisionState;
-        while ( p.transition(1)!=null ) {
-            n++;
-            p = (NFAState)p.transition(1).target;
-        }
-        return n;
-    }
+	 *  many there are.  This is here rather than in NFAState because
+	 *  a grammar decides how NFAs are put together to form a decision.
+	 */
+	public int getNumberOfAltsForDecisionNFA(NFAState decisionState) {
+		if ( decisionState==null ) {
+			return 0;
+		}
+		int n = 1;
+		NFAState p = decisionState;
+		while ( p.transition[1] !=null ) {
+			n++;
+			p = (NFAState)p.transition[1].target;
+		}
+		return n;
+	}
 
-    /** Get the ith alternative (1..n) from a decision; return null when
-     *  an invalid alt is requested.  I must count in to find the right
-     *  alternative number.  For (A|B), you get NFA structure (roughly):
+	/** Get the ith alternative (1..n) from a decision; return null when
+	 *  an invalid alt is requested.  I must count in to find the right
+	 *  alternative number.  For (A|B), you get NFA structure (roughly):
 	 *
 	 *  o->o-A->o
 	 *  |
@@ -2171,114 +2995,62 @@ public class Grammar {
 	 *
 	 *  This routine returns the leftmost state for each alt.  So alt=1, returns
 	 *  the upperleft most state in this structure.
-     */
-    public NFAState getNFAStateForAltOfDecision(NFAState decisionState, int alt) {
-        if ( decisionState==null || alt<=0 ) {
-            return null;
-        }
-        int n = 1;
-        NFAState p = decisionState;
-        while ( p!=null ) {
-            if ( n==alt ) {
-                return p;
-            }
-            n++;
-            Transition next = p.transition(1);
-            p = null;
-            if ( next!=null ) {
-                p = (NFAState)next.target;
-            }
-        }
-        return null;
-    }
-
-	/** From an NFA state, s, find the set of all labels reachable from s.
-	 *  This computes FIRST, FOLLOW and any other lookahead computation
-	 *  depending on where s is.
-	 *
-	 *  Record, with EOR_TOKEN_TYPE, if you hit the end of a rule so we can
-	 *  know at runtime (when these sets are used) to start walking up the
-	 *  follow chain to compute the real, correct follow set.
-	 *
-	 *  This routine will only be used on parser and tree parser grammars.
-	 *
-	 *  TODO: it does properly handle a : b A ; where b is nullable
-	 *  Actually it stops at end of rules, returning EOR.  Hmm...
-	 *  should check for that and keep going.
 	 */
-	public LookaheadSet LOOK(NFAState s) {
-		lookBusy.clear();
-		return _LOOK(s);
-	}
-
-	protected LookaheadSet _LOOK(NFAState s) {
-		if ( s.isAcceptState() ) {
-			return new LookaheadSet(Label.EOR_TOKEN_TYPE);
-		}
-
-		if ( lookBusy.contains(s) ) {
-			// return a copy of an empty set; we may modify set inline
-			return new LookaheadSet();
-		}
-		lookBusy.add(s);
-		Transition transition0 = s.transition(0);
-		if ( transition0==null ) {
+	public NFAState getNFAStateForAltOfDecision(NFAState decisionState, int alt) {
+		if ( decisionState==null || alt<=0 ) {
 			return null;
 		}
-
-		if ( transition0.label.isAtom() ) {
-			int atom = transition0.label.getAtom();
-			if ( atom==Label.EOF ) {
-				return LookaheadSet.EOF();
-			}
-			return new LookaheadSet(atom);
-		}
-		if ( transition0.label.isSet() ) {
-			IntSet sl = transition0.label.getSet();
-			LookaheadSet laSet = new LookaheadSet(sl);
-			if ( laSet.member(Label.EOF) ) {
-				laSet.remove(Label.EOF);
-				laSet.hasEOF = true;
+		int n = 1;
+		NFAState p = decisionState;
+		while ( p!=null ) {
+			if ( n==alt ) {
+				return p;
 			}
-			return laSet;
-		}
-        LookaheadSet tset = _LOOK((NFAState)transition0.target);
-		if ( tset.member(Label.EOR_TOKEN_TYPE) ) {
-			if ( transition0 instanceof RuleClosureTransition ) {
-				// we called a rule that found the end of the rule.
-				// That means the rule is nullable and we need to
-				// keep looking at what follows the rule ref.  E.g.,
-				// a : b A ; where b is nullable means that LOOK(a)
-				// should include A.
-				RuleClosureTransition ruleInvocationTrans =
-					(RuleClosureTransition)transition0;
-				// remove the EOR and get what follows
-				tset.remove(Label.EOR_TOKEN_TYPE);
-				LookaheadSet fset =
-					_LOOK((NFAState)ruleInvocationTrans.getFollowState());
-				tset.orInPlace(fset);
+			n++;
+			Transition next = p.transition[1];
+			p = null;
+			if ( next!=null ) {
+				p = (NFAState)next.target;
 			}
 		}
+		return null;
+	}
 
-		Transition transition1 = s.transition(1);
-		if ( transition1!=null ) {
-			LookaheadSet tset1 = _LOOK((NFAState)transition1.target);
-			tset.orInPlace(tset1);
+	/*
+	public void computeRuleFOLLOWSets() {
+		if ( getNumberOfDecisions()==0 ) {
+			createNFAs();
+		}
+		for (Iterator it = getRules().iterator(); it.hasNext();) {
+			Rule r = (Rule)it.next();
+			if ( r.isSynPred ) {
+				continue;
+			}
+			LookaheadSet s = ll1Analyzer.FOLLOW(r);
+			System.out.println("FOLLOW("+r.name+")="+s);
 		}
-		return tset;
 	}
+	*/
 
-    public void setCodeGenerator(CodeGenerator generator) {
-        this.generator = generator;
-    }
+	public LookaheadSet FIRST(NFAState s) {
+		return ll1Analyzer.FIRST(s);
+	}
 
-    public CodeGenerator getCodeGenerator() {
-        return generator;
-    }
+	public LookaheadSet LOOK(NFAState s) {
+		return ll1Analyzer.LOOK(s);
+	}
 
-    public GrammarAST getGrammarTree() {
-        return grammarTree;
-    }
+	public void setCodeGenerator(CodeGenerator generator) {
+		this.generator = generator;
+	}
+
+	public CodeGenerator getCodeGenerator() {
+		return generator;
+	}
+
+	public GrammarAST getGrammarTree() {
+		return grammarTree;
+	}
 
 	public Tool getTool() {
 		return tool;
@@ -2293,37 +3065,27 @@ public class Grammar {
 	 *  if there is an aliased name from tokens like PLUS='+', use it.
 	 */
 	public String computeTokenNameFromLiteral(int tokenType, String literal) {
-		return "T"+tokenType;
+		return AUTO_GENERATED_TOKEN_NAME_PREFIX +tokenType;
 	}
 
-    public String toString() {
-        return grammarTreeToString(grammarTree);
-    }
+	public String toString() {
+		return grammarTreeToString(grammarTree);
+	}
 
 	public String grammarTreeToString(GrammarAST t) {
 		return grammarTreeToString(t, true);
 	}
 
 	public String grammarTreeToString(GrammarAST t, boolean showActions) {
-        String s = null;
-        try {
-            s = t.getLine()+":"+t.getColumn()+": ";
-            s += new ANTLRTreePrinter().toString((AST)t, this, showActions);
-        }
-        catch (Exception e) {
-            ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-							   t,
-							   e);
-        }
-        return s;
-    }
-
-	public void setWatchNFAConversion(boolean watchNFAConversion) {
-		this.watchNFAConversion = watchNFAConversion;
-	}
-
-	public boolean getWatchNFAConversion() {
-		return watchNFAConversion;
+		String s = null;
+		try {
+			s = t.getLine()+":"+t.getColumn()+": ";
+			s += new ANTLRTreePrinter().toString((AST)t, this, showActions);
+		}
+		catch (Exception e) {
+			s = "<invalid or missing tree structure>";
+		}
+		return s;
 	}
 
 	public void printGrammar(PrintStream output) {
diff --git a/src/org/antlr/tool/GrammarAST.java b/tool/src/main/java/org/antlr/tool/GrammarAST.java
similarity index 81%
rename from src/org/antlr/tool/GrammarAST.java
rename to tool/src/main/java/org/antlr/tool/GrammarAST.java
index 688908e..b9515d5 100644
--- a/src/org/antlr/tool/GrammarAST.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarAST.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@ import org.antlr.analysis.DFA;
 import org.antlr.analysis.NFAState;
 import org.antlr.misc.IntSet;
 import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.grammar.v2.ANTLRParser;
 
 import java.util.*;
 
@@ -60,7 +61,7 @@ public class GrammarAST extends BaseAST {
 	/** This AST node was created from what token? */
     public Token token = null;
 
-    protected String enclosingRule = null;
+    public String enclosingRuleName;
 
 	/** If this is a RULE node then track rule's start, stop tokens' index. */
 	public int ruleStartTokenIndex;
@@ -88,7 +89,7 @@ public class GrammarAST extends BaseAST {
     protected IntSet setValue = null;
 
     /** If this is a BLOCK node, track options here */
-    protected Map options;
+    protected Map<String,Object> blockOptions;
 
 	/** If this is a BLOCK node for a rewrite rule, track referenced
 	 *  elements here.  Don't track elements in nested subrules.
@@ -106,14 +107,9 @@ public class GrammarAST extends BaseAST {
 	 *
 	 *  If BLOCK then tracks every element at that level and below.
 	 */
-	public Set<GrammarAST> rewriteRefsDeep;	
+	public Set<GrammarAST> rewriteRefsDeep;
 
-	public static final Set legalBlockOptions =
-			new HashSet() {{add("k"); add("greedy"); add("backtrack"); add("memoize");}};
-
-	/** What are the default options for a subrule? */
-    public static final Map defaultBlockOptions =
-            new HashMap() {{put("greedy","true");}};
+	public Map<String,Object> terminalOptions;
 
 	/** if this is an ACTION node, this is the outermost enclosing
 	 *  alt num in rule.  For actions, define.g sets these (used to
@@ -130,7 +126,23 @@ public class GrammarAST extends BaseAST {
 	 *  a label if someone does $tokenref or $ruleref in an action.
 	 */
 	public StringTemplate code;
+    
+    /**
+     * 
+     * @return
+     */
+    public Map<String, Object> getBlockOptions() {
+        return blockOptions;
+    }
 
+    /**
+     * 
+     * @param blockOptions
+     */
+    public void setBlockOptions(Map<String, Object> blockOptions) {
+        this.blockOptions = blockOptions;
+    }
+        
 	public GrammarAST() {;}
 
 	public GrammarAST(int t, String txt) {
@@ -142,8 +154,15 @@ public class GrammarAST extends BaseAST {
     }
 
     public void initialize(AST ast) {
-		this.token = ((GrammarAST)ast).token;
-    }
+		GrammarAST t = ((GrammarAST)ast);
+		this.token = t.token;
+		this.enclosingRuleName = t.enclosingRuleName;
+		this.ruleStartTokenIndex = t.ruleStartTokenIndex;
+		this.ruleStopTokenIndex = t.ruleStopTokenIndex;
+		this.setValue = t.setValue;
+		this.blockOptions = t.blockOptions;
+		this.outerAltNum = t.outerAltNum;
+	}
 
     public void initialize(Token token) {
         this.token = token;
@@ -172,8 +191,22 @@ public class GrammarAST extends BaseAST {
 	/** Save the option key/value pair and process it; return the key
 	 *  or null if invalid option.
 	 */
-	public String setOption(Grammar grammar, String key, Object value) {
-		if ( !legalBlockOptions.contains(key) ) {
+	public String setBlockOption(Grammar grammar, String key, Object value) {
+		if ( blockOptions == null ) {
+			blockOptions = new HashMap();
+		}
+		return setOption(blockOptions, Grammar.legalBlockOptions, grammar, key, value);
+	}
+
+	public String setTerminalOption(Grammar grammar, String key, Object value) {
+		if ( terminalOptions == null ) {
+			terminalOptions = new HashMap<String,Object>();
+		}
+		return setOption(terminalOptions, Grammar.legalTokenOptions, grammar, key, value);
+	}
+
+	public String setOption(Map options, Set legalOptions, Grammar grammar, String key, Object value) {
+		if ( !legalOptions.contains(key) ) {
 			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
 									  grammar,
 									  token,
@@ -186,46 +219,39 @@ public class GrammarAST extends BaseAST {
 				value = vs.substring(1,vs.length()-1); // strip quotes
             }
         }
-		if ( options==null ) {
-			options = new HashMap();
-		}
 		if ( key.equals("k") ) {
 			grammar.numberOfManualLookaheadOptions++;
 		}
+        if ( key.equals("backtrack") && value.toString().equals("true") ) {
+            grammar.composite.getRootGrammar().atLeastOneBacktrackOption = true;
+        }        
         options.put(key, value);
 		return key;
     }
 
-    public Object getOption(String key) {
+    public Object getBlockOption(String key) {
 		Object value = null;
-		if ( options!=null ) {
-			value = options.get(key);
-		}
-		if ( value==null ) {
-			value = defaultBlockOptions.get(key);
+		if ( blockOptions != null ) {
+			value = blockOptions.get(key);
 		}
 		return value;
 	}
 
     public void setOptions(Grammar grammar, Map options) {
 		if ( options==null ) {
-			this.options = null;
+			this.blockOptions = null;
 			return;
 		}
 		Set keys = options.keySet();
 		for (Iterator it = keys.iterator(); it.hasNext();) {
 			String optionName = (String) it.next();
-			String stored=setOption(grammar, optionName, options.get(optionName));
+			String stored= setBlockOption(grammar, optionName, options.get(optionName));
 			if ( stored==null ) {
 				it.remove();
 			}
 		}
     }
 
-    public Map getOptions() {
-        return options;
-    }
-
     public String getText() {
         if ( token!=null ) {
             return token.getText();
@@ -284,15 +310,7 @@ public class GrammarAST extends BaseAST {
         token.setColumn(col);
     }
 
-    public void setEnclosingRule(String rule) {
-        this.enclosingRule = rule;
-    }
-
-    public String getEnclosingRule() {
-        return enclosingRule;
-    }
-
-    public IntSet getSetValue() {
+ 	public IntSet getSetValue() {
         return setValue;
     }
 
@@ -372,12 +390,25 @@ public class GrammarAST extends BaseAST {
 		return null;
 	}
 
-	/** Make nodes unique based upon Token so we can add them to a Set; if
+    public int getNumberOfChildrenWithType(int ttype) {
+        AST p = this.getFirstChild();
+        int n = 0;
+        while ( p!=null ) {
+            if ( p.getType()==ttype ) n++;
+            p = p.getNextSibling();
+        }
+        return n;
+    }
+
+    /** Make nodes unique based upon Token so we can add them to a Set; if
 	 *  not a GrammarAST, check type.
 	 */
-	public boolean equals(AST ast) {
+	public boolean equals(Object ast) {
+		if ( this == ast ) {
+			return true;
+		}
 		if ( !(ast instanceof GrammarAST) ) {
-			return this.getType() == ast.getType();
+			return this.getType() == ((AST)ast).getType();
 		}
 		GrammarAST t = (GrammarAST)ast;
 		return token.getLine() == t.getLine() &&
@@ -443,10 +474,6 @@ public class GrammarAST extends BaseAST {
 		return dup_t;
 	}
 
-	public static void main(String[] args) {
-		GrammarAST t = new GrammarAST();
-	}
-
 	/** Duplicate tree including siblings of root. */
 	public static GrammarAST dupListNoActions(GrammarAST t, GrammarAST parent) {
 		GrammarAST result = dupTreeNoActions(t, parent);            // if t == null, then result==null
@@ -479,13 +506,16 @@ public class GrammarAST extends BaseAST {
 			return null;
 		}
 		if ( ttype==ANTLRParser.BANG || ttype==ANTLRParser.ROOT ) {
-			return (GrammarAST)t.getFirstChild(); // return x from ^(ROOT x)
+			// return x from ^(ROOT x)
+			return (GrammarAST)dupListNoActions((GrammarAST)t.getFirstChild(), t);
 		}
-		if ( (ttype==ANTLRParser.ASSIGN||ttype==ANTLRParser.PLUS_ASSIGN) &&
+        /* DOH!  Must allow labels for sem preds
+        if ( (ttype==ANTLRParser.ASSIGN||ttype==ANTLRParser.PLUS_ASSIGN) &&
 			 (parent==null||parent.getType()!=ANTLRParser.OPTIONS) )
 		{
 			return dupTreeNoActions(t.getChild(1), t); // return x from ^(ASSIGN label x)
 		}
+		*/
 		GrammarAST result = dup(t);		// make copy of root
 		// copy all children of root.
 		GrammarAST kids = dupListNoActions((GrammarAST)t.getFirstChild(), t);
@@ -493,4 +523,14 @@ public class GrammarAST extends BaseAST {
 		return result;
 	}
 
+	public void setTreeEnclosingRuleNameDeeply(String rname) {
+		GrammarAST t = this;
+		t.enclosingRuleName = rname;
+		t = t.getChild(0);
+		while (t != null) {						// for each sibling of the root
+			t.setTreeEnclosingRuleNameDeeply(rname);
+			t = (GrammarAST)t.getNextSibling();
+		}
+	}
+
 }
diff --git a/src/org/antlr/tool/GrammarAnalysisAbortedMessage.java b/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
similarity index 87%
rename from src/org/antlr/tool/GrammarAnalysisAbortedMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
index d4e07bb..c64eb6e 100644
--- a/src/org/antlr/tool/GrammarAnalysisAbortedMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,14 +29,6 @@ package org.antlr.tool;
 
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.analysis.DecisionProbe;
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.NFAState;
-import org.antlr.analysis.SemanticContext;
-import antlr.Token;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
 
 /** Reports the condition that ANTLR's LL(*) analysis engine terminated
  *  early.
@@ -59,7 +51,7 @@ public class GrammarAnalysisAbortedMessage extends Message {
 		}
 		StringTemplate st = getMessageTemplate();
 		st.setAttribute("enclosingRule",
-						probe.dfa.getNFADecisionStartState().getEnclosingRule());
+						probe.dfa.getNFADecisionStartState().enclosingRule.name);
 
 		return super.toString(st);
 	}
diff --git a/src/org/antlr/tool/GrammarDanglingStateMessage.java b/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
similarity index 84%
rename from src/org/antlr/tool/GrammarDanglingStateMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
index d829dc5..0d0b245 100644
--- a/src/org/antlr/tool/GrammarDanglingStateMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,23 +27,20 @@
 */
 package org.antlr.tool;
 
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.analysis.DecisionProbe;
 import org.antlr.analysis.DFAState;
-import org.antlr.analysis.NFAState;
-import org.antlr.analysis.SemanticContext;
-import antlr.Token;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.stringtemplate.StringTemplate;
 
-import java.util.Iterator;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
-import java.util.Set;
 
 /** Reports a potential parsing issue with a decision; the decision is
  *  nondeterministic in some way.
  */
 public class GrammarDanglingStateMessage extends Message {
 	public DecisionProbe probe;
-    public DFAState problemState;
+	public DFAState problemState;
 
 	public GrammarDanglingStateMessage(DecisionProbe probe,
 									   DFAState problemState)
@@ -61,8 +58,14 @@ public class GrammarDanglingStateMessage extends Message {
 		if ( fileName!=null ) {
 			file = fileName;
 		}
+		List labels = probe.getSampleNonDeterministicInputSequence(problemState);
+		String input = probe.getInputSequenceDisplay(labels);
 		StringTemplate st = getMessageTemplate();
-		st.setAttribute("danglingAlts", problemState.getAltSet());
+		List alts = new ArrayList();
+		alts.addAll(problemState.getAltSet());
+		Collections.sort(alts);
+		st.setAttribute("danglingAlts", alts);
+		st.setAttribute("input", input);
 
 		return super.toString(st);
 	}
diff --git a/src/org/antlr/tool/GrammarInsufficientPredicatesMessage.java b/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
similarity index 61%
rename from src/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
index a918714..f07c965 100644
--- a/src/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -28,26 +28,24 @@
 package org.antlr.tool;
 
 import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.NFAState;
-import org.antlr.analysis.SemanticContext;
+import org.antlr.analysis.*;
 import antlr.Token;
 
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
 
 public class GrammarInsufficientPredicatesMessage extends Message {
 	public DecisionProbe probe;
-    public List alts;
+    public Map<Integer, Set<Token>> altToLocations;
+	public DFAState problemState;
 
 	public GrammarInsufficientPredicatesMessage(DecisionProbe probe,
-												List alts)
+												DFAState problemState,
+												Map<Integer, Set<Token>> altToLocations)
 	{
 		super(ErrorManager.MSG_INSUFFICIENT_PREDICATES);
 		this.probe = probe;
-		this.alts = alts;
+		this.problemState = problemState;
+		this.altToLocations = altToLocations;
 	}
 
 	public String toString() {
@@ -59,7 +57,29 @@ public class GrammarInsufficientPredicatesMessage extends Message {
 			file = fileName;
 		}
 		StringTemplate st = getMessageTemplate();
-		st.setAttribute("alts", alts);
+		// convert to string key to avoid 3.1 ST bug
+		Map<String, Set<Token>> altToLocationsWithStringKey = new LinkedHashMap<String, Set<Token>>();
+		List<Integer> alts = new ArrayList<Integer>();
+		alts.addAll(altToLocations.keySet());
+		Collections.sort(alts);
+		for (Integer altI : alts) {
+			altToLocationsWithStringKey.put(altI.toString(), altToLocations.get(altI));
+			/*
+			List<String> tokens = new ArrayList<String>();
+			for (Token t : altToLocations.get(altI)) {
+				tokens.add(t.toString());
+			}
+			Collections.sort(tokens);
+			System.out.println("tokens=\n"+tokens);
+			*/
+		}
+		st.setAttribute("altToLocations", altToLocationsWithStringKey);
+
+		List<Label> sampleInputLabels = problemState.dfa.probe.getSampleNonDeterministicInputSequence(problemState);
+		String input = problemState.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
+		st.setAttribute("upon", input);
+
+		st.setAttribute("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
 
 		return super.toString(st);
 	}
diff --git a/src/org/antlr/tool/GrammarNonDeterminismMessage.java b/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
similarity index 93%
rename from src/org/antlr/tool/GrammarNonDeterminismMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
index bad9525..9c70b5a 100644
--- a/src/org/antlr/tool/GrammarNonDeterminismMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -79,8 +79,8 @@ public class GrammarNonDeterminismMessage extends Message {
 				// reset the line/col to the token definition (pick last one)
 				NFAState ruleStart =
 					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
-				line = ruleStart.getAssociatedASTNode().getLine();
-				column = ruleStart.getAssociatedASTNode().getColumn();
+				line = ruleStart.associatedASTNode.getLine();
+				column = ruleStart.associatedASTNode.getColumn();
 				st.setAttribute("disabled", tokenName);
 			}
 		}
@@ -97,8 +97,7 @@ public class GrammarNonDeterminismMessage extends Message {
 				Integer displayAltI = (Integer) iter.next();
 				if ( DecisionProbe.verbose ) {
 					int tracePathAlt =
-						nfaStart.translateDisplayAltToWalkAlt(probe.dfa,
-															  displayAltI.intValue());
+						nfaStart.translateDisplayAltToWalkAlt(displayAltI.intValue());
 					if ( firstAlt == 0 ) {
 						firstAlt = tracePathAlt;
 					}
@@ -122,6 +121,7 @@ public class GrammarNonDeterminismMessage extends Message {
 				}
 			}
 		}
+		st.setAttribute("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
 		return super.toString(st);
 	}
 
diff --git a/src/org/antlr/tool/GrammarReport.java b/tool/src/main/java/org/antlr/tool/GrammarReport.java
similarity index 96%
rename from src/org/antlr/tool/GrammarReport.java
rename to tool/src/main/java/org/antlr/tool/GrammarReport.java
index 41002db..94cae73 100644
--- a/src/org/antlr/tool/GrammarReport.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarReport.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -58,7 +58,7 @@ public class GrammarReport {
 		buf.append('\t');
 		buf.append(grammar.name);
 		buf.append('\t');
-		buf.append(Grammar.grammarTypeToString[grammar.type]);
+		buf.append(grammar.getGrammarTypeString());
 		buf.append('\t');
 		buf.append(grammar.getOption("language"));
 		int totalNonSynPredProductions = 0;
@@ -153,7 +153,7 @@ public class GrammarReport {
 		buf.append('\t');
 		buf.append(grammar.setOfNondeterministicDecisionNumbersResolvedWithPredicates.size());
 		buf.append('\t');
-		buf.append(grammar.setOfDFAWhoseConversionTerminatedEarly.size());
+		buf.append(grammar.setOfDFAWhoseAnalysisTimedOut.size());
 		buf.append('\t');
 		buf.append(ErrorManager.getErrorState().errors);
 		buf.append('\t');
@@ -204,14 +204,14 @@ public class GrammarReport {
 		return buf.toString();
 	}
 
-	public String getEarlyTerminationReport() {
+	public String getAnalysisTimeoutReport() {
 		StringBuffer buf = new StringBuffer();
 		buf.append("NFA conversion early termination report:");
 		buf.append(newline);
 		buf.append("Number of NFA conversions that terminated early: ");
-		buf.append(grammar.setOfDFAWhoseConversionTerminatedEarly.size());
+		buf.append(grammar.setOfDFAWhoseAnalysisTimedOut.size());
 		buf.append(newline);
-		buf.append(getDFALocations(grammar.setOfDFAWhoseConversionTerminatedEarly));
+		buf.append(getDFALocations(grammar.setOfDFAWhoseAnalysisTimedOut));
 		return buf.toString();
 	}
 
@@ -227,12 +227,12 @@ public class GrammarReport {
 			}
 			decisions.add(Utils.integer(dfa.decisionNumber));
 			buf.append("Rule ");
-			buf.append(dfa.decisionNFAStartState.getEnclosingRule());
+			buf.append(dfa.decisionNFAStartState.enclosingRule.name);
 			buf.append(" decision ");
 			buf.append(dfa.decisionNumber);
 			buf.append(" location ");
 			GrammarAST decisionAST =
-				dfa.decisionNFAStartState.getAssociatedASTNode();
+				dfa.decisionNFAStartState.associatedASTNode;
 			buf.append(decisionAST.getLine());
 			buf.append(":");
 			buf.append(decisionAST.getColumn());
diff --git a/src/org/antlr/tool/GrammarSanity.java b/tool/src/main/java/org/antlr/tool/GrammarSanity.java
similarity index 68%
rename from src/org/antlr/tool/GrammarSanity.java
rename to tool/src/main/java/org/antlr/tool/GrammarSanity.java
index 743c3b3..62b62c0 100644
--- a/src/org/antlr/tool/GrammarSanity.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarSanity.java
@@ -1,3 +1,30 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.tool;
 
 import org.antlr.analysis.NFAState;
@@ -8,9 +35,15 @@ import java.util.List;
 import java.util.HashSet;
 import java.util.ArrayList;
 import java.util.Set;
+import org.antlr.grammar.v2.ANTLRParser;
 
 /** Factor out routines that check sanity of rules, alts, grammars, etc.. */
 public class GrammarSanity {
+	/** The checkForLeftRecursion method needs to track what rules it has
+	 *  visited to track infinite recursion.
+	 */
+	protected Set<Rule> visitedDuringRecursionCheck = null;
+
 	protected Grammar grammar;
 	public GrammarSanity(Grammar grammar) {
 		this.grammar = grammar;
@@ -20,21 +53,20 @@ public class GrammarSanity {
 	 *  of troublesome rule cycles.  This method has two side-effects: it notifies
 	 *  the error manager that we have problems and it sets the list of
 	 *  recursive rules that we should ignore during analysis.
-	 *
-	 *  Return type: List<Set<String(rule-name)>>.
 	 */
-	public List checkAllRulesForLeftRecursion() {
-		grammar.createNFAs(); // make sure we have NFAs
+	public List<Set<Rule>> checkAllRulesForLeftRecursion() {
+		grammar.buildNFA(); // make sure we have NFAs
 		grammar.leftRecursiveRules = new HashSet();
-		List listOfRecursiveCycles = new ArrayList(); // List<Set<String(rule-name)>>
-		for (int i = 0; i < grammar.ruleIndexToRuleList.size(); i++) {
-			String ruleName = (String)grammar.ruleIndexToRuleList.elementAt(i);
-			if ( ruleName!=null ) {
-				NFAState s = grammar.getRuleStartState(ruleName);
-				grammar.visitedDuringRecursionCheck = new HashSet();
-				grammar.visitedDuringRecursionCheck.add(ruleName);
+		List<Set<Rule>> listOfRecursiveCycles = new ArrayList();
+		for (int i = 0; i < grammar.composite.ruleIndexToRuleList.size(); i++) {
+			Rule r = grammar.composite.ruleIndexToRuleList.elementAt(i);
+			if ( r!=null ) {
+				visitedDuringRecursionCheck = new HashSet();
+				visitedDuringRecursionCheck.add(r);
 				Set visitedStates = new HashSet();
-				traceStatesLookingForLeftRecursion(s, visitedStates, listOfRecursiveCycles);
+				traceStatesLookingForLeftRecursion(r.startState,
+												   visitedStates,
+												   listOfRecursiveCycles);
 			}
 		}
 		if ( listOfRecursiveCycles.size()>0 ) {
@@ -55,7 +87,7 @@ public class GrammarSanity {
 	 */
 	protected boolean traceStatesLookingForLeftRecursion(NFAState s,
 														 Set visitedStates,
-														 List listOfRecursiveCycles)
+														 List<Set<Rule>> listOfRecursiveCycles)
 	{
 		if ( s.isAcceptState() ) {
 			// this rule must be nullable!
@@ -68,33 +100,35 @@ public class GrammarSanity {
 		}
 		visitedStates.add(s);
 		boolean stateReachesAcceptState = false;
-		Transition t0 = s.transition(0);
+		Transition t0 = s.transition[0];
 		if ( t0 instanceof RuleClosureTransition ) {
-			String targetRuleName = ((NFAState)t0.target).getEnclosingRule();
-			if ( grammar.visitedDuringRecursionCheck.contains(targetRuleName) ) {
+			RuleClosureTransition refTrans = (RuleClosureTransition)t0;
+			Rule refRuleDef = refTrans.rule;
+			//String targetRuleName = ((NFAState)t0.target).getEnclosingRule();
+			if ( visitedDuringRecursionCheck.contains(refRuleDef) ) {
 				// record left-recursive rule, but don't go back in
-				grammar.leftRecursiveRules.add(targetRuleName);
+				grammar.leftRecursiveRules.add(refRuleDef);
 				/*
-				System.out.println("already visited "+targetRuleName+", calling from "+
-								   s.getEnclosingRule());
-				*/
-				addRulesToCycle(targetRuleName,
-								s.getEnclosingRule(),
+				System.out.println("already visited "+refRuleDef+", calling from "+
+								   s.enclosingRule);
+								   */
+				addRulesToCycle(refRuleDef,
+								s.enclosingRule,
 								listOfRecursiveCycles);
 			}
 			else {
 				// must visit if not already visited; send new visitedStates set
-				grammar.visitedDuringRecursionCheck.add(targetRuleName);
+				visitedDuringRecursionCheck.add(refRuleDef);
 				boolean callReachedAcceptState =
 					traceStatesLookingForLeftRecursion((NFAState)t0.target,
 													   new HashSet(),
 													   listOfRecursiveCycles);
 				// we're back from visiting that rule
-				grammar.visitedDuringRecursionCheck.remove(targetRuleName);
+				visitedDuringRecursionCheck.remove(refRuleDef);
 				// must keep going in this rule then
 				if ( callReachedAcceptState ) {
 					NFAState followingState =
-						((RuleClosureTransition)t0).getFollowState();
+						((RuleClosureTransition) t0).followState;
 					stateReachesAcceptState |=
 						traceStatesLookingForLeftRecursion(followingState,
 														   visitedStates,
@@ -102,14 +136,14 @@ public class GrammarSanity {
 				}
 			}
 		}
-		else if ( t0.label.isEpsilon() ) {
+		else if ( t0.label.isEpsilon() || t0.label.isSemanticPredicate() ) {
 			stateReachesAcceptState |=
 				traceStatesLookingForLeftRecursion((NFAState)t0.target, visitedStates, listOfRecursiveCycles);
 		}
 		// else it has a labeled edge
 
 		// now do the other transition if it exists
-		Transition t1 = s.transition(1);
+		Transition t1 = s.transition[1];
 		if ( t1!=null ) {
 			stateReachesAcceptState |=
 				traceStatesLookingForLeftRecursion((NFAState)t1.target,
@@ -125,32 +159,33 @@ public class GrammarSanity {
 	 *  cycle.  listOfRecursiveCycles is List<Set<String>> that holds a list
 	 *  of cycles (sets of rule names).
 	 */
-	protected void addRulesToCycle(String targetRuleName,
-								   String enclosingRuleName,
-								   List listOfRecursiveCycles)
+	protected void addRulesToCycle(Rule targetRule,
+								   Rule enclosingRule,
+								   List<Set<Rule>> listOfRecursiveCycles)
 	{
 		boolean foundCycle = false;
 		for (int i = 0; i < listOfRecursiveCycles.size(); i++) {
-			Set rulesInCycle = (Set)listOfRecursiveCycles.get(i);
+			Set<Rule> rulesInCycle = listOfRecursiveCycles.get(i);
 			// ensure both rules are in same cycle
-			if ( rulesInCycle.contains(targetRuleName) ) {
-				rulesInCycle.add(enclosingRuleName);
+			if ( rulesInCycle.contains(targetRule) ) {
+				rulesInCycle.add(enclosingRule);
 				foundCycle = true;
 			}
-			if ( rulesInCycle.contains(enclosingRuleName) ) {
-				rulesInCycle.add(targetRuleName);
+			if ( rulesInCycle.contains(enclosingRule) ) {
+				rulesInCycle.add(targetRule);
 				foundCycle = true;
 			}
 		}
 		if ( !foundCycle ) {
 			Set cycle = new HashSet();
-			cycle.add(targetRuleName);
-			cycle.add(enclosingRuleName);
+			cycle.add(targetRule);
+			cycle.add(enclosingRule);
 			listOfRecursiveCycles.add(cycle);
 		}
 	}
 
-	public void checkRuleReference(GrammarAST refAST,
+	public void checkRuleReference(GrammarAST scopeAST,
+								   GrammarAST refAST,
 								   GrammarAST argsAST,
 								   String currentRuleName)
 	{
diff --git a/src/org/antlr/tool/GrammarSemanticsMessage.java b/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
similarity index 98%
rename from src/org/antlr/tool/GrammarSemanticsMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
index 09afcea..b441f4c 100644
--- a/src/org/antlr/tool/GrammarSemanticsMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java b/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java
new file mode 100644
index 0000000..38f251f
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java
@@ -0,0 +1,191 @@
+package org.antlr.tool;
+
+import org.antlr.runtime.SerializedGrammar;
+
+import java.io.*;
+import java.util.Stack;
+
+/** Serialize a grammar into a highly compressed form with
+ *  only the info needed to recognize sentences.
+ *  FORMAT:
+ *
+ *  file ::= $ANTLR<version:byte><grammartype:byte><name:string>;<numRules:short><rules>
+ *  rule ::= R<rulename:string>;B<nalts:short><alts>.
+ *  alt  ::= A<elems>;
+ *  elem ::= t<tokentype:short> | r<ruleIndex:short> | -<char:uchar><char:uchar> | ~<tokentype> | w
+ */
+public class GrammarSerializerFoo {
+    protected DataOutputStream out;
+    protected String filename;
+    protected Grammar g;
+
+    protected Stack streams = new Stack();
+    protected ByteArrayOutputStream altBuf;
+    protected int numElementsInAlt = 0;
+
+    public GrammarSerializerFoo(Grammar g) {
+        this.g = g;
+    }
+
+    public void open(String filename) throws IOException {
+        this.filename = filename;
+        FileOutputStream fos = new FileOutputStream(filename);
+        BufferedOutputStream bos = new BufferedOutputStream(fos);
+        out = new DataOutputStream(bos);
+        writeString(out, SerializedGrammar.COOKIE);
+        out.writeByte(SerializedGrammar.FORMAT_VERSION);
+    }
+
+    public void close() throws IOException {
+        if ( out!=null ) out.close();
+        out = null;
+    }
+
+
+    // WRITE
+
+    public void grammar(int grammarTokenType, String name) {
+        try {
+            /*
+            switch ( grammarTokenType ) {
+                case ANTLRParser.LEXER_GRAMMAR : out.writeByte('l'); break;
+                case ANTLRParser.PARSER_GRAMMAR : out.writeByte('p'); break;
+                case ANTLRParser.TREE_GRAMMAR: out.writeByte('t'); break;
+                case ANTLRParser.COMBINED_GRAMMAR : out.writeByte('c'); break;
+            }
+            writeString(out, name);
+            */
+            out.writeShort(g.getRules().size());
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void rule(String name) {
+        try {
+            out.writeByte('R');
+            writeString(out, name);
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void endRule() {
+        try {
+            out.writeByte('.');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void block(int nalts) {
+        try {
+            out.writeByte('B');
+            out.writeShort(nalts);
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void alt(GrammarAST alt) {
+        numElementsInAlt = 0;
+        try {
+            out.writeByte('A');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+        //streams.push(out);
+        //altBuf = new ByteArrayOutputStream();
+        //out = new DataOutputStream(altBuf);
+    }
+
+    public void endAlt() {
+        try {
+            //out.flush();
+            //out = (DataOutputStream)streams.pop(); // restore previous stream
+            out.writeByte(';');
+            //out.writeShort(numElementsInAlt);
+            //out.write(altBuf.toByteArray());
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void ruleRef(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            out.writeByte('r');
+            out.writeShort(g.getRuleIndex(t.getText()));
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void token(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            out.writeByte('t');
+            int ttype = g.getTokenType(t.getText());
+            out.writeShort(ttype);
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void charLiteral(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            if ( g.type!=Grammar.LEXER ) {
+                out.writeByte('t');
+                int ttype = g.getTokenType(t.getText());
+                out.writeShort(ttype);
+            }
+            // else lexer???
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void wildcard(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            out.writeByte('w');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void range() { // must be char range
+        numElementsInAlt++;
+        try {
+            out.writeByte('-');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void not() {
+        try {
+            out.writeByte('~');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void writeString(DataOutputStream out, String s) throws IOException {
+        out.writeBytes(s);
+        out.writeByte(';');
+    }
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java b/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java
new file mode 100644
index 0000000..79baed1
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java
@@ -0,0 +1,254 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import java.io.Reader;
+import java.io.IOException;
+import java.io.FileReader;
+import java.io.BufferedReader;
+import java.io.File;
+import java.util.List;
+import java.util.ArrayList;
+
+/** Load a grammar file and scan it just until we learn a few items
+ *  of interest.  Currently: name, type, imports, tokenVocab, language option.
+ *
+ *  GrammarScanner (at bottom of this class) converts grammar to stuff like:
+ *
+ *   grammar Java ; options { backtrack true memoize true }
+ *   import JavaDecl JavaAnnotations JavaExpr ;
+ *   ... : ...
+ *
+ *  First ':' or '@' indicates we can stop looking for imports/options.
+ *
+ *  Then we just grab interesting grammar properties.
+ */
+public class GrammarSpelunker {
+    protected String grammarFileName;
+    protected String token;
+    protected Scanner scanner;
+
+    // grammar info / properties
+    protected String grammarModifier;
+    protected String grammarName;
+    protected String tokenVocab;
+    protected String language = "Java"; // default
+    protected String inputDirectory;
+    protected List<String> importedGrammars;
+
+    public GrammarSpelunker(String inputDirectory, String grammarFileName) {
+        this.inputDirectory = inputDirectory;
+        this.grammarFileName = grammarFileName;
+    }
+
+    void consume() throws IOException { token = scanner.nextToken(); }
+
+    protected void match(String expecting) throws IOException {
+        //System.out.println("match "+expecting+"; is "+token);
+        if ( token.equals(expecting) ) consume();
+        else throw new Error("Error parsing "+grammarFileName+": '"+token+
+                             "' not expected '"+expecting+"'");
+    }
+
+    public void parse() throws IOException {
+        Reader r = new FileReader((inputDirectory != null ? inputDirectory + File.separator : "") + grammarFileName);
+        BufferedReader br = new BufferedReader(r);
+        try {
+            scanner = new Scanner(br);
+            consume();
+            grammarHeader();
+            // scan until imports or options
+            while ( token!=null && !token.equals("@") && !token.equals(":") &&
+                    !token.equals("import") && !token.equals("options") )
+            {
+                consume();
+            }
+            if ( token.equals("options") ) options();
+            // scan until options or first rule
+            while ( token!=null && !token.equals("@") && !token.equals(":") &&
+                    !token.equals("import") )
+            {
+                consume();
+            }
+            if ( token.equals("import") ) imports();
+            // ignore rest of input; close up shop
+        }
+        finally {
+            if ( br!=null ) br.close();
+        }
+    }
+
+    protected void grammarHeader() throws IOException {
+        if ( token==null ) return;
+        if ( token.equals("tree") || token.equals("parser") || token.equals("lexer") ) {
+            grammarModifier=token;
+            consume();
+        }
+        match("grammar");
+        grammarName = token;
+        consume(); // move beyond name
+    }
+
+    // looks like "options { backtrack true ; tokenVocab MyTokens ; }"
+    protected void options() throws IOException {
+        match("options");
+        match("{");
+        while ( token!=null && !token.equals("}") ) {
+            String name = token;
+            consume();
+            String value = token;
+            consume();
+            match(";");
+            if ( name.equals("tokenVocab") ) tokenVocab = value;
+            if ( name.equals("language") ) language = value;
+        }
+        match("}");
+    }
+
+    // looks like "import JavaDecl JavaAnnotations JavaExpr ;"
+    protected void imports() throws IOException {
+        match("import");
+        importedGrammars = new ArrayList<String>();
+        while ( token!=null && !token.equals(";") ) {
+            importedGrammars.add(token);
+            consume();
+        }
+        match(";");
+        if ( importedGrammars.size()==0 ) importedGrammars = null;
+    }
+
+    public String getGrammarModifier() { return grammarModifier; }
+
+    public String getGrammarName() { return grammarName; }
+
+    public String getTokenVocab() { return tokenVocab; }
+
+    public String getLanguage() { return language; }
+
+    public List<String> getImportedGrammars() { return importedGrammars; }
+
+    /** Strip comments and then return stream of words and
+     *  tokens {';', ':', '{', '}'}
+     */ 
+    public static class Scanner {
+        public static final int EOF = -1;
+        Reader input;
+        int c;
+
+        public Scanner(Reader input) throws IOException {
+            this.input = input;
+            consume();
+        }
+
+        boolean isDIGIT() { return c>='0'&&c<='9'; }
+        boolean isID_START() { return c>='a'&&c<='z' || c>='A'&&c<='Z'; }
+        boolean isID_LETTER() { return isID_START() || c>='0'&&c<='9' || c=='_'; }
+        
+        void consume() throws IOException { c = input.read(); }
+
+        public String nextToken() throws IOException {
+            while ( c!=EOF ) {
+                //System.out.println("check "+(char)c);
+                switch ( c ) {
+                    case ';' : consume(); return ";";
+                    case '{' : consume(); return "{";
+                    case '}' : consume(); return "}";
+                    case ':' : consume(); return ":";
+                    case '@' : consume(); return "@";
+                    case '/' : COMMENT(); break;
+                    case '\'': return STRING();
+                    default:
+                        if ( isID_START() ) return ID();
+                        else if ( isDIGIT() ) return INT();
+                        consume(); // ignore anything else
+                }
+            }
+            return null;
+        }
+
+        /** NAME : LETTER+ ; // NAME is sequence of >=1 letter */
+        String ID() throws IOException {
+            StringBuffer buf = new StringBuffer();
+            while ( c!=EOF && isID_LETTER() ) { buf.append((char)c); consume(); }
+            return buf.toString();
+        }
+
+        String INT() throws IOException {
+            StringBuffer buf = new StringBuffer();
+            while ( c!=EOF && isDIGIT() ) { buf.append((char)c); consume(); }
+            return buf.toString();
+        }
+
+        String STRING() throws IOException {
+            StringBuffer buf = new StringBuffer();
+            consume();
+            while ( c!=EOF && c!='\'' ) {
+                if ( c=='\\' ) {
+                    buf.append((char)c);
+                    consume();
+                }
+                buf.append((char)c);
+                consume();
+            }
+            consume(); // scan past '
+            return buf.toString();
+        }
+
+        void COMMENT() throws IOException {
+            if ( c=='/' ) {
+                consume();
+                if ( c=='*' ) {
+                    consume();
+        scarf:
+                    while ( true ) {
+                        if ( c=='*' ) {
+                            consume();
+                            if ( c=='/' ) { consume(); break scarf; }
+                        }
+                        else {
+                            while ( c!=EOF && c!='*' ) consume();
+                        }
+                    }
+                }
+                else if ( c=='/' ) {
+                    while ( c!=EOF && c!='\n' ) consume();
+                }
+            }
+        }
+    }
+
+    /** Tester; Give grammar filename as arg */
+    public static void main(String[] args) throws IOException {
+        GrammarSpelunker g = new GrammarSpelunker(".", args[0]);
+        g.parse();
+        System.out.println(g.grammarModifier+" grammar "+g.grammarName);
+        System.out.println("language="+g.language);
+        System.out.println("tokenVocab="+g.tokenVocab);
+        System.out.println("imports="+g.importedGrammars);
+    }
+}
diff --git a/src/org/antlr/tool/GrammarSyntaxMessage.java b/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
similarity index 98%
rename from src/org/antlr/tool/GrammarSyntaxMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
index 0f94fb6..036a8ff 100644
--- a/src/org/antlr/tool/GrammarSyntaxMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/src/org/antlr/tool/GrammarUnreachableAltsMessage.java b/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
similarity index 95%
rename from src/org/antlr/tool/GrammarUnreachableAltsMessage.java
rename to tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
index 57196d0..2203be0 100644
--- a/src/org/antlr/tool/GrammarUnreachableAltsMessage.java
+++ b/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -77,8 +77,8 @@ public class GrammarUnreachableAltsMessage extends Message {
 				// reset the line/col to the token definition
 				NFAState ruleStart =
 					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
-				line = ruleStart.getAssociatedASTNode().getLine();
-				column = ruleStart.getAssociatedASTNode().getColumn();
+				line = ruleStart.associatedASTNode.getLine();
+				column = ruleStart.associatedASTNode.getColumn();
 				st.setAttribute("tokens", tokenName);
 			}
 		}
diff --git a/src/org/antlr/tool/Interp.java b/tool/src/main/java/org/antlr/tool/Interp.java
similarity index 74%
rename from src/org/antlr/tool/Interp.java
rename to tool/src/main/java/org/antlr/tool/Interp.java
index fc87aa5..430a02c 100644
--- a/src/org/antlr/tool/Interp.java
+++ b/tool/src/main/java/org/antlr/tool/Interp.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,12 @@ import org.antlr.runtime.ANTLRFileStream;
 import org.antlr.runtime.CharStream;
 import org.antlr.runtime.CommonTokenStream;
 import org.antlr.runtime.tree.ParseTree;
+import org.antlr.Tool;
 
-import java.io.BufferedReader;
-import java.io.FileReader;
 import java.util.StringTokenizer;
+import java.util.List;
+import java.io.FileReader;
+import java.io.BufferedReader;
 
 /** Interpret any ANTLR grammar:
  *
@@ -57,15 +59,43 @@ public class Interp {
 		String startRule = args[2];
 		String inputFileName = args[3];
 
-		Grammar parser =
-			new Grammar(null,
-						grammarFileName,
-						new BufferedReader(new FileReader(grammarFileName)));
+		// TODO: using wrong constructor now
+		Tool tool = new Tool();
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar parser = new Grammar(tool, grammarFileName, composite);
+		composite.setDelegationRoot(parser);
+		FileReader fr = new FileReader(grammarFileName);
+		BufferedReader br = new BufferedReader(fr);
+		parser.parseAndBuildAST(br);
+		br.close();
+
+		parser.composite.assignTokenTypes();
+		parser.composite.defineGrammarSymbols();
+		parser.composite.createNFAs();
+
+		List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
+		if ( leftRecursiveRules.size()>0 ) {
+			return;
+		}
+
+		if ( parser.getRule(startRule)==null ) {
+			System.out.println("undefined start rule "+startRule);
+			return;
+		}
 
 		String lexerGrammarText = parser.getLexerGrammar();
 		Grammar lexer = new Grammar();
 		lexer.importTokenVocabulary(parser);
-		lexer.setGrammarContent(lexerGrammarText);
+		lexer.fileName = grammarFileName;
+		lexer.setTool(tool);
+		if ( lexerGrammarText!=null ) {
+			lexer.setGrammarContent(lexerGrammarText);
+		}
+		else {
+			System.err.println("no lexer grammar found in "+grammarFileName);
+		}
+		lexer.composite.createNFAs();
+		
 		CharStream input =
 			new ANTLRFileStream(inputFileName);
 		Interpreter lexEngine = new Interpreter(lexer, input);
diff --git a/src/org/antlr/tool/Interpreter.java b/tool/src/main/java/org/antlr/tool/Interpreter.java
similarity index 84%
rename from src/org/antlr/tool/Interpreter.java
rename to tool/src/main/java/org/antlr/tool/Interpreter.java
index 0c748b1..dbc9719 100644
--- a/src/org/antlr/tool/Interpreter.java
+++ b/tool/src/main/java/org/antlr/tool/Interpreter.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -61,7 +61,8 @@ public class Interpreter implements TokenSource {
 		public LexerActionGetTokenType(Grammar g) {
 			this.g = g;
 		}
-		public void exitRule(String ruleName) {
+
+		public void exitRule(String grammarFileName, String ruleName) {
 			if ( !ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ){
 				int type = g.getTokenType(ruleName);
 				int channel = Token.DEFAULT_CHANNEL;
@@ -132,7 +133,7 @@ public class Interpreter implements TokenSource {
 		//System.out.println("scan("+startRule+",'"+in.substring(in.index(),in.size()-1)+"')");
 		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
 		if ( grammar.getRuleStartState(startRule)==null ) {
-			grammar.createNFAs();
+			grammar.buildNFA();
 		}
 
 		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
@@ -171,7 +172,7 @@ public class Interpreter implements TokenSource {
 		//System.out.println("parse("+startRule+")");
 		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
 		if ( grammar.getRuleStartState(startRule)==null ) {
-			grammar.createNFAs();
+			grammar.buildNFA();
 		}
 		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
 			// Create the DFA predictors for each decision
@@ -216,10 +217,10 @@ public class Interpreter implements TokenSource {
 							   List visitedStates)
 		throws RecognitionException
 	{
+		NFAState s = start;
 		if ( actions!=null ) {
-			actions.enterRule(start.getEnclosingRule());
+			actions.enterRule(s.nfa.grammar.getFileName(), start.enclosingRule.name);
 		}
-		NFAState s = start;
 		int t = input.LA(1);
 		while ( s!=stop ) {
 			if ( visitedStates!=null ) {
@@ -227,17 +228,17 @@ public class Interpreter implements TokenSource {
 			}
 			/*
 			System.out.println("parse state "+s.stateNumber+" input="+
-				grammar.getTokenDisplayName(t));
+				s.nfa.grammar.getTokenDisplayName(t));
 				*/
 			// CASE 1: decision state
-			if ( s.getDecisionNumber()>0 && grammar.getNumberOfAltsForDecisionNFA(s)>1 ) {
+			if ( s.getDecisionNumber()>0 && s.nfa.grammar.getNumberOfAltsForDecisionNFA(s)>1 ) {
 				// decision point, must predict and jump to alt
-				DFA dfa = grammar.getLookaheadDFA(s.getDecisionNumber());
+				DFA dfa = s.nfa.grammar.getLookaheadDFA(s.getDecisionNumber());
 				/*
-				if ( grammar.type!=Grammar.LEXER ) {
+				if ( s.nfa.grammar.type!=Grammar.LEXER ) {
 					System.out.println("decision: "+
 								   dfa.getNFADecisionStartState().getDescription()+
-								   " input="+grammar.getTokenDisplayName(t));
+								   " input="+s.nfa.grammar.getTokenDisplayName(t));
 				}
 				*/
 				int m = input.mark();
@@ -246,9 +247,9 @@ public class Interpreter implements TokenSource {
 					String description = dfa.getNFADecisionStartState().getDescription();
 					NoViableAltException nvae =
 						new NoViableAltException(description,
-												 dfa.getDecisionNumber(),
-												 s.stateNumber,
-												 input);
+													  dfa.getDecisionNumber(),
+													  s.stateNumber,
+													  input);
 					if ( actions!=null ) {
 						actions.recognitionException(nvae);
 					}
@@ -257,22 +258,29 @@ public class Interpreter implements TokenSource {
 				}
 				input.rewind(m);
 				int parseAlt =
-					s.translateDisplayAltToWalkAlt(dfa,predictedAlt);
+					s.translateDisplayAltToWalkAlt(predictedAlt);
 				/*
-				if ( grammar.type!=Grammar.LEXER ) {
+				if ( s.nfa.grammar.type!=Grammar.LEXER ) {
 					System.out.println("predicted alt "+predictedAlt+", parseAlt "+
 									   parseAlt);
 				}
 				*/
-				NFAState alt = grammar.getNFAStateForAltOfDecision(s, parseAlt);
-				s = (NFAState)alt.transition(0).target;
+				NFAState alt;
+				if ( parseAlt > s.nfa.grammar.getNumberOfAltsForDecisionNFA(s) ) {
+					// implied branch of loop etc...
+					alt = s.nfa.grammar.nfa.getState( s.endOfBlockStateNumber );
+				}
+				else {
+					alt = s.nfa.grammar.getNFAStateForAltOfDecision(s, parseAlt);
+				}
+				s = (NFAState)alt.transition[0].target;
 				continue;
 			}
 
 			// CASE 2: finished matching a rule
 			if ( s.isAcceptState() ) { // end of rule node
 				if ( actions!=null ) {
-					actions.exitRule(s.getEnclosingRule());
+					actions.exitRule(s.nfa.grammar.getFileName(), s.enclosingRule.name);
 				}
 				if ( ruleInvocationStack.empty() ) {
 					// done parsing.  Hit the start state.
@@ -282,22 +290,37 @@ public class Interpreter implements TokenSource {
 				// pop invoking state off the stack to know where to return to
 				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
 				RuleClosureTransition invokingTransition =
-						(RuleClosureTransition)invokingState.transition(0);
+						(RuleClosureTransition)invokingState.transition[0];
 				// move to node after state that invoked this rule
-				s = invokingTransition.getFollowState();
+				s = invokingTransition.followState;
 				continue;
 			}
 
-			Transition trans = s.transition(0);
+			Transition trans = s.transition[0];
 			Label label = trans.label;
+			if ( label.isSemanticPredicate() ) {
+				FailedPredicateException fpe =
+					new FailedPredicateException(input,
+												 s.enclosingRule.name,
+												 "can't deal with predicates yet");
+				if ( actions!=null ) {
+					actions.recognitionException(fpe);
+				}
+			}
+
 			// CASE 3: epsilon transition
 			if ( label.isEpsilon() ) {
 				// CASE 3a: rule invocation state
 				if ( trans instanceof RuleClosureTransition ) {
 					ruleInvocationStack.push(s);
 					s = (NFAState)trans.target;
+					//System.out.println("call "+s.enclosingRule.name+" from "+s.nfa.grammar.getFileName());
 					if ( actions!=null ) {
-						actions.enterRule(s.getEnclosingRule());
+						actions.enterRule(s.nfa.grammar.getFileName(), s.enclosingRule.name);
+					}
+					// could be jumping to new grammar, make sure DFA created
+					if ( !s.nfa.grammar.allDecisionDFAHaveBeenCreated() ) {
+						s.nfa.grammar.createLookaheadDFAs();
 					}
 				}
 				// CASE 3b: plain old epsilon transition, just move
@@ -309,13 +332,13 @@ public class Interpreter implements TokenSource {
 			// CASE 4: match label on transition
 			else if ( label.matches(t) ) {
 				if ( actions!=null ) {
-					if ( grammar.type == Grammar.PARSER ||
-						 grammar.type == Grammar.COMBINED )
+					if ( s.nfa.grammar.type == Grammar.PARSER ||
+						 s.nfa.grammar.type == Grammar.COMBINED )
 					{
 						actions.consumeToken(((TokenStream)input).LT(1));
 					}
 				}
-				s = (NFAState)s.transition(0).target;
+				s = (NFAState)s.transition[0].target;
 				input.consume();
 				t = input.LA(1);
 			}
@@ -344,7 +367,7 @@ public class Interpreter implements TokenSource {
 				else if ( label.isSemanticPredicate() ) {
 					FailedPredicateException fpe =
 						new FailedPredicateException(input,
-													 s.getEnclosingRule(),
+													 s.enclosingRule.name,
 													 label.getSemanticContext().toString());
 					if ( actions!=null ) {
 						actions.recognitionException(fpe);
@@ -359,7 +382,7 @@ public class Interpreter implements TokenSource {
 		}
 		//System.out.println("hit stop state for "+stop.getEnclosingRule());
 		if ( actions!=null ) {
-			actions.exitRule(stop.getEnclosingRule());
+			actions.exitRule(s.nfa.grammar.getFileName(), stop.enclosingRule.name);
 		}
 	}
 
@@ -416,10 +439,15 @@ public class Interpreter implements TokenSource {
 
 	public void reportScanError(RecognitionException re) {
 		CharStream cs = (CharStream)input;
-		// print as good of a message is we can't, given that we do not have
+		// print as good of a message as we can, given that we do not have
 		// a Lexer object and, hence, cannot call the routine to get a
 		// decent error message.
 		System.err.println("problem matching token at "+
-			cs.getLine()+":"+cs.getCharPositionInLine()+" "+re.getClass().getName());
+			cs.getLine()+":"+cs.getCharPositionInLine()+" "+re);
 	}
+
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
 }
diff --git a/src/org/antlr/tool/LeftRecursionCyclesMessage.java b/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
similarity index 98%
rename from src/org/antlr/tool/LeftRecursionCyclesMessage.java
rename to tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
index 5b7d768..5dfa962 100644
--- a/src/org/antlr/tool/LeftRecursionCyclesMessage.java
+++ b/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/src/org/antlr/tool/Message.java b/tool/src/main/java/org/antlr/tool/Message.java
similarity index 99%
rename from src/org/antlr/tool/Message.java
rename to tool/src/main/java/org/antlr/tool/Message.java
index e9c3499..860bd1f 100644
--- a/src/org/antlr/tool/Message.java
+++ b/tool/src/main/java/org/antlr/tool/Message.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/src/org/antlr/tool/NFAFactory.java b/tool/src/main/java/org/antlr/tool/NFAFactory.java
similarity index 84%
rename from src/org/antlr/tool/NFAFactory.java
rename to tool/src/main/java/org/antlr/tool/NFAFactory.java
index 219e612..23e6fa5 100644
--- a/src/org/antlr/tool/NFAFactory.java
+++ b/tool/src/main/java/org/antlr/tool/NFAFactory.java
@@ -33,6 +33,9 @@ import org.antlr.misc.IntervalSet;
 
 import java.util.Iterator;
 import java.util.List;
+import java.util.ArrayList;
+
+import antlr.Token;
 
 /** Routines to construct StateClusters from EBNF grammar constructs.
  *  No optimization is done to remove unnecessary epsilon edges.
@@ -47,10 +50,15 @@ public class NFAFactory {
      */
 	NFA nfa = null;
 
-	String currentRuleName = null;
+    public Rule getCurrentRule() {
+        return currentRule;
+    }
+
+    public void setCurrentRule(Rule currentRule) {
+        this.currentRule = currentRule;
+    }
 
-    /** Used to assign state numbers */
-    protected int stateCounter = 0;
+	Rule currentRule = null;
 
 	public NFAFactory(NFA nfa) {
         nfa.setFactory(this);
@@ -59,16 +67,11 @@ public class NFAFactory {
 
     public NFAState newState() {
         NFAState n = new NFAState(nfa);
-        int state = stateCounter;
+        int state = nfa.getNewNFAStateNumber();
         n.stateNumber = state;
-        stateCounter++;
         nfa.addState(n);
-		n.setEnclosingRuleName(currentRuleName);
-        return n;
-    }
-
-    public int getNumberOfStates() {
-        return stateCounter;
+		n.enclosingRule = currentRule;
+		return n;
     }
 
 	/** Optimize an alternative (list of grammar elements).
@@ -87,21 +90,21 @@ public class NFAFactory {
 				s = nfa.getState(s.endOfBlockStateNumber);
 				continue;
 			}
-			Transition t = s.transition(0);
+			Transition t = s.transition[0];
 			if ( t instanceof RuleClosureTransition ) {
-				s = ((RuleClosureTransition)t).getFollowState();
+				s = ((RuleClosureTransition) t).followState;
 				continue;
 			}
-			if ( t.label.isEpsilon() && s.getNumberOfTransitions()==1 ) {
+			if ( t.label.isEpsilon() && !t.label.isAction() && s.getNumberOfTransitions()==1 ) {
 				// bypass epsilon transition and point to what the epsilon's
 				// target points to unless that epsilon transition points to
 				// a block or loop etc..  Also don't collapse epsilons that
-				// point at the last node of the alt
+				// point at the last node of the alt. Don't collapse action edges
 				NFAState epsilonTarget = (NFAState)t.target;
 				if ( epsilonTarget.endOfBlockStateNumber==State.INVALID_STATE_NUMBER &&
-					 epsilonTarget.transition(0)!=null )
+					 epsilonTarget.transition[0] !=null )
 				{
-					s.setTransition0(epsilonTarget.transition(0));
+					s.setTransition0(epsilonTarget.transition[0]);
 					/*
 					System.out.println("### opt "+s.stateNumber+"->"+
 									   epsilonTarget.transition(0).target.stateNumber);
@@ -113,23 +116,31 @@ public class NFAFactory {
 	}
 
 	/** From label A build Graph o-A->o */
-	public StateCluster build_Atom(int label) {
+	public StateCluster build_Atom(int label, GrammarAST associatedAST) {
 		NFAState left = newState();
 		NFAState right = newState();
+		left.associatedASTNode = associatedAST;
+		right.associatedASTNode = associatedAST;
 		transitionBetweenStates(left, right, label);
 		StateCluster g = new StateCluster(left, right);
 		return g;
 	}
 
-    /** From set build single edge graph o->o-set->o.  To conform to
+	public StateCluster build_Atom(GrammarAST atomAST) {
+		int tokenType = nfa.grammar.getTokenType(atomAST.getText());
+		return build_Atom(tokenType, atomAST);
+	}
+
+	/** From set build single edge graph o->o-set->o.  To conform to
      *  what an alt block looks like, must have extra state on left.
      */
-	public StateCluster build_Set(IntSet set) {
-        //NFAState start = newState();
+	public StateCluster build_Set(IntSet set, GrammarAST associatedAST) {
         NFAState left = newState();
-        //transitionBetweenStates(start, left, Label.EPSILON);
         NFAState right = newState();
-        Transition e = new Transition(new Label(set),right);
+		left.associatedASTNode = associatedAST;
+		right.associatedASTNode = associatedAST;
+		Label label = new Label(set);
+		Transition e = new Transition(label,right);
         left.addTransition(e);
 		StateCluster g = new StateCluster(left, right);
         return g;
@@ -153,7 +164,8 @@ public class NFAFactory {
     public StateCluster build_Range(int a, int b) {
         NFAState left = newState();
         NFAState right = newState();
-        Transition e = new Transition(new Label(IntervalSet.of(a,b)),right);
+		Label label = new Label(IntervalSet.of(a, b));
+		Transition e = new Transition(label,right);
         left.addTransition(e);
         StateCluster g = new StateCluster(left, right);
         return g;
@@ -161,9 +173,9 @@ public class NFAFactory {
 
 	/** From char 'c' build StateCluster o-intValue(c)->o
 	 */
-	public StateCluster build_CharLiteralAtom(String charLiteral) {
-        int c = Grammar.getCharValueFromGrammarCharLiteral(charLiteral);
-		return build_Atom(c);
+	public StateCluster build_CharLiteralAtom(GrammarAST charLiteralAST) {
+        int c = Grammar.getCharValueFromGrammarCharLiteral(charLiteralAST.getText());
+		return build_Atom(c, charLiteralAST);
 	}
 
 	/** From char 'c' build StateCluster o-intValue(c)->o
@@ -183,10 +195,10 @@ public class NFAFactory {
      *  the DFA.  Machine== o-'f'->o-'o'->o-'g'->o and has n+1 states
      *  for n characters.
      */
-    public StateCluster build_StringLiteralAtom(String stringLiteral) {
+    public StateCluster build_StringLiteralAtom(GrammarAST stringLiteralAST) {
         if ( nfa.grammar.type==Grammar.LEXER ) {
 			StringBuffer chars =
-				Grammar.getUnescapedStringFromGrammarStringLiteral(stringLiteral);
+				Grammar.getUnescapedStringFromGrammarStringLiteral(stringLiteralAST.getText());
             NFAState first = newState();
             NFAState last = null;
             NFAState prev = first;
@@ -200,8 +212,8 @@ public class NFAFactory {
         }
 
         // a simple token reference in non-Lexers
-        int tokenType = nfa.grammar.getTokenType(stringLiteral);
-        return build_Atom(tokenType);
+        int tokenType = nfa.grammar.getTokenType(stringLiteralAST.getText());
+		return build_Atom(tokenType, stringLiteralAST);
     }
 
     /** For reference to rule r, build
@@ -219,16 +231,13 @@ public class NFAFactory {
      *
      *  TODO add to codegen: collapse alt blks that are sets into single matchSet
      */
-    public StateCluster build_RuleRef(int ruleIndex, NFAState ruleStart) {
-        /*
-        System.out.println("building ref to rule "+ruleIndex+": "+
-                nfa.getGrammar().getRuleName(ruleIndex));
-        */
+    public StateCluster build_RuleRef(Rule refDef, NFAState ruleStart) {
+        //System.out.println("building ref to rule "+nfa.grammar.name+"."+refDef.name);
         NFAState left = newState();
         // left.setDescription("ref to "+ruleStart.getDescription());
         NFAState right = newState();
         // right.setDescription("NFAState following ref to "+ruleStart.getDescription());
-        Transition e = new RuleClosureTransition(ruleIndex,ruleStart,right);
+        Transition e = new RuleClosureTransition(refDef,ruleStart,right);
         left.addTransition(e);
         StateCluster g = new StateCluster(left, right);
         return g;
@@ -243,24 +252,37 @@ public class NFAFactory {
         return g;
     }
 
-    /** Build what amounts to an epsilon transition with a semantic
-     *  predicate action.  The pred is a pointer into the AST of
-     *  the SEMPRED token.
-     */
-    public StateCluster build_SemanticPredicate(GrammarAST pred) {
+	/** Build what amounts to an epsilon transition with a semantic
+	 *  predicate action.  The pred is a pointer into the AST of
+	 *  the SEMPRED token.
+	 */
+	public StateCluster build_SemanticPredicate(GrammarAST pred) {
 		// don't count syn preds
 		if ( !pred.getText().toUpperCase()
-			    .startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
+				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
 		{
 			nfa.grammar.numberOfSemanticPredicates++;
 		}
 		NFAState left = newState();
-        NFAState right = newState();
-        Transition e = new Transition(new Label(pred), right);
-        left.addTransition(e);
-        StateCluster g = new StateCluster(left, right);
-        return g;
-    }
+		NFAState right = newState();
+		Transition e = new Transition(new PredicateLabel(pred), right);
+		left.addTransition(e);
+		StateCluster g = new StateCluster(left, right);
+		return g;
+	}
+
+	/** Build what amounts to an epsilon transition with an action.
+	 *  The action goes into NFA though it is ignored during analysis.
+	 *  It slows things down a bit, but I must ignore predicates after
+	 *  having seen an action (5-5-2008).
+	 */
+	public StateCluster build_Action(GrammarAST action) {
+		NFAState left = newState();
+		NFAState right = newState();
+		Transition e = new Transition(new ActionLabel(action), right);
+		left.addTransition(e);
+		return new StateCluster(left, right);
+	}
 
 	/** add an EOF transition to any rule end NFAState that points to nothing
      *  (i.e., for all those rules not invoked by another rule).  These
@@ -274,26 +296,15 @@ public class NFAFactory {
 		int numberUnInvokedRules = 0;
         for (Iterator iterator = rules.iterator(); iterator.hasNext();) {
 			Rule r = (Rule) iterator.next();
-			String ruleName = r.name;
-			NFAState endNFAState = nfa.grammar.getRuleStopState(ruleName);
+			NFAState endNFAState = r.stopState;
             // Is this rule a start symbol?  (no follow links)
-            if ( endNFAState.transition(0)==null ) {
-                // if so, then don't let algorithm fall off the end of
-                // the rule, make it hit EOF/EOT.
-				/*
-				if ( nfa.grammar.type==Grammar.LEXER ) {
-					return; // 11/28/2005: try having only Tokens with EOT transition
-				}
-                if ( nfa.grammar.type!=Grammar.LEXER ||
-					 ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
-				{
-					build_EOFState(endNFAState);
-				}
-				*/
+			if ( endNFAState.transition[0] ==null ) {
+				// if so, then don't let algorithm fall off the end of
+				// the rule, make it hit EOF/EOT.
 				build_EOFState(endNFAState);
 				// track how many rules have been invoked by another rule
 				numberUnInvokedRules++;
-            }
+			}
         }
 		return numberUnInvokedRules;
     }
@@ -309,14 +320,14 @@ public class NFAFactory {
             label = Label.EOT;
 			end.setEOTTargetState(true);
         }
-        /*
+		/*
 		System.out.println("build "+nfa.grammar.getTokenDisplayName(label)+
 						   " loop on end of state "+endNFAState.getDescription()+
 						   " to state "+end.stateNumber);
-        */
+		*/
 		Transition toEnd = new Transition(label, end);
-        endNFAState.addTransition(toEnd);
-    }
+		endNFAState.addTransition(toEnd);
+	}
 
     /** From A B build A-e->B (that is, build an epsilon arc from right
      *  of A to left of B).
@@ -658,9 +669,11 @@ public class NFAFactory {
 	 */
 
     /** Build an atom with all possible values in its label */
-    public StateCluster build_Wildcard() {
+    public StateCluster build_Wildcard(GrammarAST associatedAST) {
         NFAState left = newState();
         NFAState right = newState();
+        left.associatedASTNode = associatedAST;
+        right.associatedASTNode = associatedAST;
         Label label = new Label(nfa.grammar.getTokenTypes()); // char or tokens
         Transition e = new Transition(label,right);
         left.addTransition(e);
@@ -668,6 +681,34 @@ public class NFAFactory {
         return g;
     }
 
+    /** Build a subrule matching ^(. .*) (any tree or node). Let's use
+     *  (^(. .+) | .) to be safe.
+     */
+    public StateCluster build_WildcardTree(GrammarAST associatedAST) {
+        StateCluster wildRoot = build_Wildcard(associatedAST);
+
+        StateCluster down = build_Atom(Label.DOWN, associatedAST);
+        wildRoot = build_AB(wildRoot,down); // hook in; . DOWN
+
+        // make .+
+        StateCluster wildChildren = build_Wildcard(associatedAST);
+        wildChildren = build_Aplus(wildChildren);
+        wildRoot = build_AB(wildRoot,wildChildren); // hook in; . DOWN .+
+
+        StateCluster up = build_Atom(Label.UP, associatedAST);
+        wildRoot = build_AB(wildRoot,up); // hook in; . DOWN .+ UP
+
+        // make optional . alt
+        StateCluster optionalNodeAlt = build_Wildcard(associatedAST);
+
+        List alts = new ArrayList();
+        alts.add(wildRoot);
+        alts.add(optionalNodeAlt);
+        StateCluster blk = build_AlternativeBlock(alts);
+
+        return blk;
+    }
+
     /** Given a collapsed block of alts (a set of atoms), pull out
      *  the set and return it.
      */
diff --git a/src/org/antlr/tool/NameSpaceChecker.java b/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
similarity index 81%
rename from src/org/antlr/tool/NameSpaceChecker.java
rename to tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
index 3a528a5..75c6e14 100644
--- a/src/org/antlr/tool/NameSpaceChecker.java
+++ b/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,11 @@ public class NameSpaceChecker {
 	}
 
 	public void checkConflicts() {
-		for (int i = 0; i < grammar.ruleIndexToRuleList.size(); i++) {
-			String ruleName = (String) grammar.ruleIndexToRuleList.elementAt(i);
-			if ( ruleName==null ) {
+		for (int i = CompositeGrammar.MIN_RULE_INDEX; i < grammar.composite.ruleIndexToRuleList.size(); i++) {
+			Rule r = grammar.composite.ruleIndexToRuleList.elementAt(i);
+			if ( r==null ) {
 				continue;
 			}
-			Rule r = grammar.getRule(ruleName);
 			// walk all labels for Rule r
 			if ( r.labelNameSpace!=null ) {
 				Iterator it = r.labelNameSpace.values().iterator();
@@ -98,7 +97,9 @@ public class NameSpaceChecker {
 		String ruleName = r.name;
 		antlr.Token ruleToken = r.tree.getToken();
 		int msgID = 0;
-		if ( grammar.type==Grammar.PARSER && Character.isUpperCase(ruleName.charAt(0)) ) {
+		if ( (grammar.type==Grammar.PARSER||grammar.type==Grammar.TREE_PARSER) &&
+			 Character.isUpperCase(ruleName.charAt(0)) )
+		{
 			msgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
         }
         else if ( grammar.type==Grammar.LEXER &&
@@ -116,6 +117,8 @@ public class NameSpaceChecker {
 	}
 
 	/** If ref to undefined rule, give error at first occurrence.
+	 * 
+	 *  Give error if you cannot find the scope override on a rule reference.
 	 *
 	 *  If you ref ID in a combined grammar and don't define ID as a lexer rule
 	 *  it is an error.
@@ -123,11 +126,16 @@ public class NameSpaceChecker {
 	protected void lookForReferencesToUndefinedSymbols() {
 		// for each rule ref, ask if there is a rule definition
 		for (Iterator iter = grammar.ruleRefs.iterator(); iter.hasNext();) {
-			Token tok = (Token) iter.next();
+			GrammarAST refAST = (GrammarAST)iter.next();
+			Token tok = refAST.token;
 			String ruleName = tok.getText();
-			if ( grammar.getRule(ruleName)==null &&
-			     grammar.getTokenType(ruleName)!=Label.EOF )
-			{
+			Rule localRule = grammar.getLocallyDefinedRule(ruleName);
+			Rule rule = grammar.getRule(ruleName);
+			if ( localRule==null && rule!=null ) { // imported rule?
+				grammar.delegatedRuleReferences.add(rule);
+				rule.imported = true;
+			}
+			if ( rule==null && grammar.getTokenType(ruleName)!=Label.EOF ) {
 				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
 										  grammar,
 										  tok,
@@ -135,10 +143,12 @@ public class NameSpaceChecker {
 			}
         }
 		if ( grammar.type==Grammar.COMBINED ) {
+			// if we're a combined grammar, we know which token IDs have no
+			// associated lexer rule.
 			for (Iterator iter = grammar.tokenIDRefs.iterator(); iter.hasNext();) {
 				Token tok = (Token) iter.next();
 				String tokenID = tok.getText();
-				if ( !grammar.lexerRules.contains(tokenID) &&
+				if ( !grammar.composite.lexerRules.contains(tokenID) &&
 					 grammar.getTokenType(tokenID)!=Label.EOF )
 				{
 					ErrorManager.grammarWarning(ErrorManager.MSG_NO_TOKEN_DEFINITION,
@@ -148,6 +158,30 @@ public class NameSpaceChecker {
 				}
 			}
 		}
+		// check scopes and scoped rule refs
+		for (Iterator it = grammar.scopedRuleRefs.iterator(); it.hasNext();) {
+			GrammarAST scopeAST = (GrammarAST)it.next(); // ^(DOT ID atom)
+			Grammar scopeG = grammar.composite.getGrammar(scopeAST.getText());
+			GrammarAST refAST = scopeAST.getChild(1);
+			String ruleName = refAST.getText();
+			if ( scopeG==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_NO_SUCH_GRAMMAR_SCOPE,
+										  grammar,
+										  scopeAST.getToken(),
+										  scopeAST.getText(),
+										  ruleName);
+			}
+			else {
+				Rule rule = grammar.getRule(scopeG.name, ruleName);
+				if ( rule==null ) {
+					ErrorManager.grammarError(ErrorManager.MSG_NO_SUCH_RULE_IN_SCOPE,
+											  grammar,
+											  scopeAST.getToken(),
+											  scopeAST.getText(),
+											  ruleName);
+				}
+			}
+		}
 	}
 
 	protected void checkForGlobalScopeTokenConflict(AttributeScope scope) {
diff --git a/src/org/antlr/tool/NonRegularDecisionMessage.java b/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
similarity index 90%
rename from src/org/antlr/tool/NonRegularDecisionMessage.java
rename to tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
index 884fd65..fc0d9d1 100644
--- a/src/org/antlr/tool/NonRegularDecisionMessage.java
+++ b/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -35,9 +35,9 @@ import java.util.*;
 /** More a single alternative recurses so this decision is not regular. */
 public class NonRegularDecisionMessage extends Message {
 	public DecisionProbe probe;
-	public Set altsWithRecursion;
+	public Set<Integer> altsWithRecursion;
 
-	public NonRegularDecisionMessage(DecisionProbe probe, Set altsWithRecursion) {
+	public NonRegularDecisionMessage(DecisionProbe probe, Set<Integer> altsWithRecursion) {
 		super(ErrorManager.MSG_NONREGULAR_DECISION);
 		this.probe = probe;
 		this.altsWithRecursion = altsWithRecursion;
@@ -53,7 +53,7 @@ public class NonRegularDecisionMessage extends Message {
 		}
 
 		StringTemplate st = getMessageTemplate();
-		String ruleName = probe.dfa.getNFADecisionStartState().getEnclosingRule();
+		String ruleName = probe.dfa.getNFADecisionStartState().enclosingRule.name;
 		st.setAttribute("ruleName", ruleName);
 		List sortedAlts = new ArrayList();
 		sortedAlts.addAll(altsWithRecursion);
diff --git a/tool/src/main/java/org/antlr/tool/RandomPhrase.java b/tool/src/main/java/org/antlr/tool/RandomPhrase.java
new file mode 100644
index 0000000..73b8e41
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/RandomPhrase.java
@@ -0,0 +1,222 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.tool;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.Utils;
+import org.antlr.misc.IntervalSet;
+import org.antlr.Tool;
+
+import java.util.*;
+import java.io.FileReader;
+import java.io.BufferedReader;
+
+/** Generate a random phrase given a grammar.
+ *  Usage:
+ *     java org.antlr.tool.RandomPhrase grammarFile.g startRule [seed]
+ *
+ *  For example:
+ *     java org.antlr.tool.RandomPhrase simple.g program 342
+ *
+ *  The seed acts like a unique identifier so you can get the same random
+ *  phrase back during unit testing, for example.
+ *
+ *  If you do not specify a seed then the current time in milliseconds is used
+ *  guaranteeing that you'll never see that seed again.
+ *
+ *  NOTE: this does not work well for large grammars...it tends to recurse
+ *  too much and build really long strings.  I need throttle control; later.
+ */
+public class RandomPhrase {
+	public static final boolean debug = false;
+
+	protected static Random random;
+
+	/** an experimental method to generate random phrases for a given
+	 *  grammar given a start rule.  Return a list of token types.
+	 */
+	protected static void randomPhrase(Grammar g, List<Integer> tokenTypes, String startRule) {
+		NFAState state = g.getRuleStartState(startRule);
+		NFAState stopState = g.getRuleStopState(startRule);
+
+		Stack ruleInvocationStack = new Stack();
+		while ( true ) {
+			if ( state==stopState && ruleInvocationStack.size()==0 ) {
+				break;
+			}
+			if ( debug ) System.out.println("state "+state);
+			if ( state.getNumberOfTransitions()==0 ) {
+				if ( debug ) System.out.println("dangling state: "+state);
+				return;
+			}
+			// end of rule node
+			if ( state.isAcceptState() ) {
+				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
+				if ( debug ) System.out.println("pop invoking state "+invokingState);
+				//System.out.println("leave "+state.enclosingRule.name);
+				RuleClosureTransition invokingTransition =
+					(RuleClosureTransition)invokingState.transition[0];
+				// move to node after state that invoked this rule
+				state = invokingTransition.followState;
+				continue;
+			}
+			if ( state.getNumberOfTransitions()==1 ) {
+				// no branching, just take this path
+				Transition t0 = state.transition[0];
+				if ( t0 instanceof RuleClosureTransition ) {
+					ruleInvocationStack.push(state);
+					if ( debug ) System.out.println("push state "+state);
+					//System.out.println("call "+((RuleClosureTransition)t0).rule.name);
+					//System.out.println("stack depth="+ruleInvocationStack.size());
+				}
+				else if ( t0.label.isSet() || t0.label.isAtom() ) {
+					tokenTypes.add( getTokenType(t0.label) );
+				}
+				state = (NFAState)t0.target;
+				continue;
+			}
+
+			int decisionNumber = state.getDecisionNumber();
+			if ( decisionNumber==0 ) {
+				System.out.println("weird: no decision number but a choice node");
+				continue;
+			}
+			// decision point, pick ith alternative randomly
+			int n = g.getNumberOfAltsForDecisionNFA(state);
+			int randomAlt = random.nextInt(n) + 1;
+			if ( debug ) System.out.println("randomAlt="+randomAlt);
+			NFAState altStartState =
+				g.getNFAStateForAltOfDecision(state, randomAlt);
+			Transition t = altStartState.transition[0];
+			state = (NFAState)t.target;
+		}
+	}
+
+	protected static Integer getTokenType(Label label) {
+		if ( label.isSet() ) {
+			// pick random element of set
+			IntervalSet typeSet = (IntervalSet)label.getSet();
+			int randomIndex = random.nextInt(typeSet.size());
+			return typeSet.get(randomIndex);
+		}
+		else {
+			return Utils.integer(label.getAtom());
+		}
+		//System.out.println(t0.label.toString(g));
+	}
+
+	/** Used to generate random strings */
+	public static void main(String[] args) {
+		if ( args.length < 2 ) {
+			System.err.println("usage: java org.antlr.tool.RandomPhrase grammarfile startrule");
+			return;
+		}
+		String grammarFileName = args[0];
+		String startRule = args[1];
+		long seed = System.currentTimeMillis(); // use random seed unless spec.
+		if ( args.length==3 ) {
+			String seedStr = args[2];
+			seed = Long.parseLong(seedStr);
+		}
+		try {
+			random = new Random(seed);
+
+			CompositeGrammar composite = new CompositeGrammar();
+			Grammar parser = new Grammar(new Tool(), grammarFileName, composite);
+			composite.setDelegationRoot(parser);
+
+			FileReader fr = new FileReader(grammarFileName);
+			BufferedReader br = new BufferedReader(fr);
+			parser.parseAndBuildAST(br);
+			br.close();
+
+			parser.composite.assignTokenTypes();
+			parser.composite.defineGrammarSymbols();
+			parser.composite.createNFAs();
+
+			List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
+			if ( leftRecursiveRules.size()>0 ) {
+				return;
+			}
+
+			if ( parser.getRule(startRule)==null ) {
+				System.out.println("undefined start rule "+startRule);
+				return;
+			}
+
+			String lexerGrammarText = parser.getLexerGrammar();
+			Grammar lexer = new Grammar();
+			lexer.importTokenVocabulary(parser);
+			lexer.fileName = grammarFileName;
+			if ( lexerGrammarText!=null ) {
+				lexer.setGrammarContent(lexerGrammarText);
+			}
+			else {
+				System.err.println("no lexer grammar found in "+grammarFileName);
+			}
+			lexer.buildNFA();
+			leftRecursiveRules = lexer.checkAllRulesForLeftRecursion();
+			if ( leftRecursiveRules.size()>0 ) {
+				return;
+			}
+			//System.out.println("lexer:\n"+lexer);
+
+			List<Integer> tokenTypes = new ArrayList<Integer>(100);
+			randomPhrase(parser, tokenTypes, startRule);
+			System.out.println("token types="+tokenTypes);
+			for (int i = 0; i < tokenTypes.size(); i++) {
+				Integer ttypeI = (Integer) tokenTypes.get(i);
+				int ttype = ttypeI.intValue();
+				String ttypeDisplayName = parser.getTokenDisplayName(ttype);
+				if ( Character.isUpperCase(ttypeDisplayName.charAt(0)) ) {
+					List<Integer> charsInToken = new ArrayList<Integer>(10);
+					randomPhrase(lexer, charsInToken, ttypeDisplayName);
+					System.out.print(" ");
+					for (int j = 0; j < charsInToken.size(); j++) {
+						java.lang.Integer cI = (java.lang.Integer) charsInToken.get(j);
+						System.out.print((char)cI.intValue());
+					}
+				}
+				else { // it's a literal
+					String literal =
+						ttypeDisplayName.substring(1,ttypeDisplayName.length()-1);
+					System.out.print(" "+literal);
+				}
+			}
+			System.out.println();
+		}
+		catch (Error er) {
+			System.err.println("Error walking "+grammarFileName+" rule "+startRule+" seed "+seed);
+			er.printStackTrace(System.err);
+		}
+		catch (Exception e) {
+			System.err.println("Exception walking "+grammarFileName+" rule "+startRule+" seed "+seed);
+			e.printStackTrace(System.err);
+		}
+	}
+}
diff --git a/src/org/antlr/tool/RecursionOverflowMessage.java b/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
similarity index 98%
rename from src/org/antlr/tool/RecursionOverflowMessage.java
rename to tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
index 221d9f1..1998fde 100644
--- a/src/org/antlr/tool/RecursionOverflowMessage.java
+++ b/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/src/org/antlr/tool/Rule.java b/tool/src/main/java/org/antlr/tool/Rule.java
similarity index 85%
rename from src/org/antlr/tool/Rule.java
rename to tool/src/main/java/org/antlr/tool/Rule.java
index 47f615a..2a77212 100644
--- a/src/org/antlr/tool/Rule.java
+++ b/tool/src/main/java/org/antlr/tool/Rule.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,11 +29,13 @@ package org.antlr.tool;
 
 import antlr.CommonToken;
 import org.antlr.analysis.NFAState;
+import org.antlr.analysis.LookaheadSet;
 import org.antlr.codegen.CodeGenerator;
 
 import java.util.*;
+import org.antlr.grammar.v2.ANTLRParser;
 
-/** Combine the info associated with a rule */
+/** Combine the info associated with a rule. */
 public class Rule {
 	public String name;
 	public int index;
@@ -45,7 +47,12 @@ public class Rule {
 	protected Map options;
 
 	public static final Set legalOptions =
-			new HashSet() {{add("k"); add("greedy"); add("memoize"); add("backtrack");}};
+			new HashSet() {
+                {
+                    add("k"); add("greedy"); add("memoize");
+                    add("backtrack"); add("strategy");
+                }
+            };
 
 	/** The AST representing the whole rule */
 	public GrammarAST tree;
@@ -58,6 +65,12 @@ public class Rule {
 
 	public GrammarAST EORNode;
 
+	/** The set of all tokens reachable from the start state w/o leaving
+	 *  via the accept state.  If it reaches the accept state, FIRST
+	 *  includes EOR_TOKEN_TYPE.
+	 */
+	public LookaheadSet FIRST;
+
 	/** The return values of a rule and predefined rule attributes */
 	public AttributeScope returnScope;
 
@@ -69,8 +82,14 @@ public class Rule {
 	/** A list of scope names (String) used by this rule */
 	public List useScopes;
 
-	/** A list of all LabelElementPair attached to tokens like id=ID */
-	public LinkedHashMap tokenLabels;
+    /** A list of all LabelElementPair attached to tokens like id=ID */
+    public LinkedHashMap tokenLabels;
+
+    /** A list of all LabelElementPair attached to tokens like x=. in tree grammar */
+    public LinkedHashMap wildcardTreeLabels;
+
+    /** A list of all LabelElementPair attached to tokens like x+=. in tree grammar */
+    public LinkedHashMap wildcardTreeListLabels;
 
 	/** A list of all LabelElementPair attached to single char literals like x='a' */
 	public LinkedHashMap charLabels;
@@ -142,6 +161,8 @@ public class Rule {
 
 	public boolean isSynPred = false;
 
+	public boolean imported = false;
+
 	public Rule(Grammar grammar,
 				String ruleName,
 				int ruleIndex,
@@ -165,34 +186,32 @@ public class Rule {
 		pair.type = type;
 		labelNameSpace.put(label.getText(), pair);
 		switch ( type ) {
-			case Grammar.TOKEN_LABEL :
-				if ( tokenLabels==null ) {
-					tokenLabels = new LinkedHashMap();
-				}
-				tokenLabels.put(label.getText(), pair);
-				break;
+            case Grammar.TOKEN_LABEL :
+                if ( tokenLabels==null ) tokenLabels = new LinkedHashMap();
+                tokenLabels.put(label.getText(), pair);
+                break;
+            case Grammar.WILDCARD_TREE_LABEL :
+                if ( wildcardTreeLabels==null ) wildcardTreeLabels = new LinkedHashMap();
+                wildcardTreeLabels.put(label.getText(), pair);
+                break;
+            case Grammar.WILDCARD_TREE_LIST_LABEL :
+                if ( wildcardTreeListLabels==null ) wildcardTreeListLabels = new LinkedHashMap();
+                wildcardTreeListLabels.put(label.getText(), pair);
+                break;
 			case Grammar.RULE_LABEL :
-				if ( ruleLabels==null ) {
-					ruleLabels = new LinkedHashMap();
-				}
+				if ( ruleLabels==null ) ruleLabels = new LinkedHashMap();
 				ruleLabels.put(label.getText(), pair);
 				break;
 			case Grammar.TOKEN_LIST_LABEL :
-				if ( tokenListLabels==null ) {
-					tokenListLabels = new LinkedHashMap();
-				}
+				if ( tokenListLabels==null ) tokenListLabels = new LinkedHashMap();
 				tokenListLabels.put(label.getText(), pair);
 				break;
 			case Grammar.RULE_LIST_LABEL :
-				if ( ruleListLabels==null ) {
-					ruleListLabels = new LinkedHashMap();
-				}
+				if ( ruleListLabels==null ) ruleListLabels = new LinkedHashMap();
 				ruleListLabels.put(label.getText(), pair);
 				break;
 			case Grammar.CHAR_LABEL :
-				if ( charLabels==null ) {
-					charLabels = new LinkedHashMap();
-				}
+				if ( charLabels==null ) charLabels = new LinkedHashMap();
 				charLabels.put(label.getText(), pair);
 				break;
 		}
@@ -338,6 +357,11 @@ public class Rule {
 	}
 
 	public boolean hasRewrite(int i) {
+		if ( i >= altsWithRewrites.length ) {
+			ErrorManager.internalError("alt "+i+" exceeds number of "+name+
+									   "'s alts ("+altsWithRewrites.length+")");
+			return false;
+		}
 		return altsWithRewrites[i];
 	}
 
@@ -530,6 +554,12 @@ public class Rule {
 		if ( options==null ) {
 			options = new HashMap();
 		}
+        if ( key.equals("memoize") && value.toString().equals("true") ) {
+            grammar.atLeastOneRuleMemoizes = true;
+        }
+        if ( key.equals("backtrack") && value.toString().equals("true") ) {
+            grammar.composite.getRootGrammar().atLeastOneBacktrackOption = true;
+        }
 		if ( key.equals("k") ) {
 			grammar.numberOfManualLookaheadOptions++;
 		}
@@ -553,10 +583,20 @@ public class Rule {
 		}
 	}
 
+	/** Used during grammar imports to see if sets of rules intersect... This
+	 *  method and hashCode use the String name as the key for Rule objects.
+	public boolean equals(Object other) {
+		return this.name.equals(((Rule)other).name);
+	}
+	 */
+
+	/** Used during grammar imports to see if sets of rules intersect...
+	public int hashCode() {
+		return name.hashCode();
+	}
+	 * */
+
 	public String toString() { // used for testing
-		if ( modifier!=null ) {
-			return modifier+" "+name;
-		}
-		return name;
+		return "["+grammar.name+"."+name+",index="+index+",line="+tree.getToken().getLine()+"]";
 	}
 }
diff --git a/src/org/antlr/tool/RuleLabelScope.java b/tool/src/main/java/org/antlr/tool/RuleLabelScope.java
similarity index 96%
rename from src/org/antlr/tool/RuleLabelScope.java
rename to tool/src/main/java/org/antlr/tool/RuleLabelScope.java
index 256320b..842f704 100644
--- a/src/org/antlr/tool/RuleLabelScope.java
+++ b/tool/src/main/java/org/antlr/tool/RuleLabelScope.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -62,6 +62,7 @@ public class RuleLabelScope extends AttributeScope {
 			addAttribute("channel", null);
 			addAttribute("start", null);
 			addAttribute("stop", null);
+			addAttribute("int", null);
 			isPredefinedLexerRuleScope = true;
 		}};
 
@@ -70,7 +71,7 @@ public class RuleLabelScope extends AttributeScope {
 			null,
 			predefinedLexerRulePropertiesScope,	// LEXER
 			predefinedRulePropertiesScope,		// PARSER
-			predefinedTreeRulePropertiesScope,		// TREE_PARSER
+			predefinedTreeRulePropertiesScope,	// TREE_PARSER
 			predefinedRulePropertiesScope,		// COMBINED
 		};
 
diff --git a/tool/src/main/java/org/antlr/tool/Strip.java b/tool/src/main/java/org/antlr/tool/Strip.java
new file mode 100644
index 0000000..26422f0
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Strip.java
@@ -0,0 +1,239 @@
+package org.antlr.tool;
+
+import org.antlr.grammar.v3.ANTLRv3Lexer;
+import org.antlr.grammar.v3.ANTLRv3Parser;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeWizard;
+
+import java.util.List;
+
+/** A basic action stripper. */
+public class Strip {
+    protected String filename;
+    protected TokenRewriteStream tokens;
+    protected boolean tree_option = false;
+    protected String args[];
+
+    public static void main(String args[]) throws Exception {
+        Strip s = new Strip(args);
+        s.parseAndRewrite();
+        System.out.println(s.tokens);
+    }
+
+    public Strip(String[] args) { this.args = args; }
+
+    public TokenRewriteStream getTokenStream() { return tokens; }
+
+    public void parseAndRewrite() throws Exception {
+        processArgs(args);
+        CharStream input = null;
+        if ( filename!=null ) input = new ANTLRFileStream(filename);
+        else input = new ANTLRInputStream(System.in);
+        // BUILD AST
+        ANTLRv3Lexer lex = new ANTLRv3Lexer(input);
+        tokens = new TokenRewriteStream(lex);
+        ANTLRv3Parser g = new ANTLRv3Parser(tokens);
+        ANTLRv3Parser.grammarDef_return r = g.grammarDef();
+        CommonTree t = (CommonTree)r.getTree();
+        if (tree_option) System.out.println(t.toStringTree());
+        rewrite(g.getTreeAdaptor(),t,g.getTokenNames());
+    }
+
+    public void rewrite(TreeAdaptor adaptor, CommonTree t, String[] tokenNames) throws Exception {
+        TreeWizard wiz = new TreeWizard(adaptor, tokenNames);
+
+        // ACTIONS STUFF
+        wiz.visit(t, ANTLRv3Parser.ACTION,
+           new TreeWizard.Visitor() {
+               public void visit(Object t) { ACTION(tokens, (CommonTree)t); }
+           });
+
+        wiz.visit(t, ANTLRv3Parser.AT,  // ^('@' id ACTION) rule actions
+            new TreeWizard.Visitor() {
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  CommonTree action = null;
+                  if ( a.getChildCount()==2 ) action = (CommonTree)a.getChild(1);
+                  else if ( a.getChildCount()==3 ) action = (CommonTree)a.getChild(2);
+                  if ( action.getType()==ANTLRv3Parser.ACTION ) {
+                      tokens.delete(a.getTokenStartIndex(),
+                                    a.getTokenStopIndex());
+                      killTrailingNewline(tokens, action.getTokenStopIndex());
+                  }
+              }
+            });
+        wiz.visit(t, ANTLRv3Parser.ARG, // wipe rule arguments
+                  new TreeWizard.Visitor() {
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  a = (CommonTree)a.getChild(0);
+                  tokens.delete(a.token.getTokenIndex());
+                  killTrailingNewline(tokens, a.token.getTokenIndex());
+              }
+            });
+        wiz.visit(t, ANTLRv3Parser.RET, // wipe rule return declarations
+            new TreeWizard.Visitor() {
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    CommonTree ret = (CommonTree)a.getChild(0);
+                    tokens.delete(a.token.getTokenIndex(),
+                                  ret.token.getTokenIndex());
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.SEMPRED, // comment out semantic predicates
+            new TreeWizard.Visitor() {
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    tokens.replace(a.token.getTokenIndex(), "/*"+a.getText()+"*/");
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.GATED_SEMPRED, // comment out semantic predicates
+            new TreeWizard.Visitor() {
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    String text = tokens.toString(a.getTokenStartIndex(),
+                                                  a.getTokenStopIndex());
+                    tokens.replace(a.getTokenStartIndex(),
+                                   a.getTokenStopIndex(),
+                                   "/*"+text+"*/");
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.SCOPE, // comment scope specs
+            new TreeWizard.Visitor() {
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    tokens.delete(a.getTokenStartIndex(),
+                                  a.getTokenStopIndex());
+                    killTrailingNewline(tokens, a.getTokenStopIndex());
+                }
+            });        
+        wiz.visit(t, ANTLRv3Parser.ARG_ACTION, // args r[x,y] -> ^(r [x,y])
+            new TreeWizard.Visitor() {
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    if ( a.getParent().getType()==ANTLRv3Parser.RULE_REF ) {
+                        tokens.delete(a.getTokenStartIndex(),
+                                      a.getTokenStopIndex());
+                    }
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.LABEL_ASSIGN, // ^('=' id ^(RULE_REF [arg])), ...
+            new TreeWizard.Visitor() {
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    if ( !a.hasAncestor(ANTLRv3Parser.OPTIONS) ) { // avoid options
+                        CommonTree child = (CommonTree)a.getChild(0);
+                        tokens.delete(a.token.getTokenIndex());     // kill "id="
+                        tokens.delete(child.token.getTokenIndex());
+                    }
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.LIST_LABEL_ASSIGN, // ^('+=' id ^(RULE_REF [arg])), ...
+            new TreeWizard.Visitor() {
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  CommonTree child = (CommonTree)a.getChild(0);
+                  tokens.delete(a.token.getTokenIndex());     // kill "id+="
+                  tokens.delete(child.token.getTokenIndex());
+              }
+            });
+
+
+        // AST STUFF
+        wiz.visit(t, ANTLRv3Parser.REWRITE,
+            new TreeWizard.Visitor() {
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  CommonTree child = (CommonTree)a.getChild(0);
+                  int stop = child.getTokenStopIndex();
+                  if ( child.getType()==ANTLRv3Parser.SEMPRED ) {
+                      CommonTree rew = (CommonTree)a.getChild(1);
+                      stop = rew.getTokenStopIndex();
+                  }
+                  tokens.delete(a.token.getTokenIndex(), stop);
+                  killTrailingNewline(tokens, stop);
+              }
+            });
+        wiz.visit(t, ANTLRv3Parser.ROOT,
+           new TreeWizard.Visitor() {
+               public void visit(Object t) {
+                   tokens.delete(((CommonTree)t).token.getTokenIndex());
+               }
+           });
+        wiz.visit(t, ANTLRv3Parser.BANG,
+           new TreeWizard.Visitor() {
+               public void visit(Object t) {
+                   tokens.delete(((CommonTree)t).token.getTokenIndex());
+               }
+           });
+    }
+
+    public static void ACTION(TokenRewriteStream tokens, CommonTree t) {
+        CommonTree parent = (CommonTree)t.getParent();
+        int ptype = parent.getType();
+        if ( ptype==ANTLRv3Parser.SCOPE || // we have special rules for these
+             ptype==ANTLRv3Parser.AT )
+        {
+            return;
+        }
+        //System.out.println("ACTION: "+t.getText());
+        CommonTree root = (CommonTree)t.getAncestor(ANTLRv3Parser.RULE);
+        if ( root!=null ) {
+            CommonTree rule = (CommonTree)root.getChild(0);
+            //System.out.println("rule: "+rule);
+            if ( !Character.isUpperCase(rule.getText().charAt(0)) ) {
+                tokens.delete(t.getTokenStartIndex(),t.getTokenStopIndex());
+                killTrailingNewline(tokens, t.token.getTokenIndex());
+            }
+        }
+    }
+
+    private static void killTrailingNewline(TokenRewriteStream tokens, int index) {
+        List all = tokens.getTokens();
+        Token tok = (Token)all.get(index);
+        Token after = (Token)all.get(index+1);
+        String ws = after.getText();
+        if ( ws.startsWith("\n") ) {
+            //System.out.println("killing WS after action");
+            if ( ws.length()>1 ) {
+                int space = ws.indexOf(' ');
+                int tab = ws.indexOf('\t');
+                if ( ws.startsWith("\n") &&
+                     space>=0 || tab>=0 )
+                {
+                    return; // do nothing if \n + indent
+                }
+                // otherwise kill all \n
+                ws = ws.replaceAll("\n", "");
+                tokens.replace(after.getTokenIndex(), ws);
+            }
+            else {
+                tokens.delete(after.getTokenIndex());
+            }
+        }
+    }
+
+    public void processArgs(String[] args) {
+		if ( args==null || args.length==0 ) {
+			help();
+			return;
+		}
+		for (int i = 0; i < args.length; i++) {
+			if (args[i].equals("-tree")) tree_option = true;
+			else {
+				if (args[i].charAt(0) != '-') {
+					// Must be the grammar file
+                    filename = args[i];
+				}
+			}
+		}
+	}
+
+    private static void help() {
+        System.err.println("usage: java org.antlr.tool.Strip [args] file.g");
+        System.err.println("  -tree      print out ANTLR grammar AST");
+    }
+
+}
diff --git a/src/org/antlr/tool/ToolMessage.java b/tool/src/main/java/org/antlr/tool/ToolMessage.java
similarity index 98%
rename from src/org/antlr/tool/ToolMessage.java
rename to tool/src/main/java/org/antlr/tool/ToolMessage.java
index 6506a44..74d9ac7 100644
--- a/src/org/antlr/tool/ToolMessage.java
+++ b/tool/src/main/java/org/antlr/tool/ToolMessage.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
diff --git a/tool/src/main/java/org/antlr/tool/serialize.g b/tool/src/main/java/org/antlr/tool/serialize.g
new file mode 100644
index 0000000..2755e0d
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/serialize.g
@@ -0,0 +1,238 @@
+header {
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+	package org.antlr.tool;
+	import java.util.*;
+	import org.antlr.analysis.*;
+	import org.antlr.misc.*;
+	import java.io.*;
+}
+
+class SerializerWalker extends TreeParser;
+
+options {
+	importVocab = ANTLR;
+	ASTLabelType = "GrammarAST";
+    codeGenBitsetTestThreshold=999;
+}
+
+{
+    public void reportError(RecognitionException ex) {
+		Token token = null;
+		if ( ex instanceof MismatchedTokenException ) {
+			token = ((MismatchedTokenException)ex).token;
+		}
+		else if ( ex instanceof NoViableAltException ) {
+			token = ((NoViableAltException)ex).token;
+		}
+        ErrorManager.syntaxError(
+            ErrorManager.MSG_SYNTAX_ERROR,
+            grammar,
+            token,
+            "serialize: "+ex.toString(),
+            ex);
+    }
+
+protected Grammar grammar;
+protected String currentRuleName;
+protected GrammarSerializer out;
+}
+
+grammar[GrammarSerializer out]
+{
+	this.out = out;
+}
+    :   #( LEXER_GRAMMAR 	grammarSpec[#grammar.getType()] )
+	|   #( PARSER_GRAMMAR   grammarSpec[#grammar.getType()] )
+	|   #( TREE_GRAMMAR     grammarSpec[#grammar.getType()] )
+	|   #( COMBINED_GRAMMAR grammarSpec[#grammar.getType()] )
+    ;
+
+grammarSpec[int gtokentype]
+	:	id:ID {out.grammar(gtokentype, #id.getText());}
+		(cmt:DOC_COMMENT)?
+		(optionsSpec)?
+        (delegateGrammars)?
+        (tokensSpec)?
+        (attrScope)*
+        (AMPERSAND)* // skip actions
+        rules
+	;
+
+attrScope
+	:	#( "scope" ID ACTION )
+	;
+
+optionsSpec
+    :   #( OPTIONS (option)+ )
+    ;
+
+option
+    :   #( ASSIGN ID optionValue )
+    ;
+
+optionValue 
+    :   id:ID
+    |   s:STRING_LITERAL
+    |   c:CHAR_LITERAL
+    |   i:INT
+    ;
+
+charSet
+	:   #( CHARSET charSetElement )
+	;
+
+charSetElement
+	:   c:CHAR_LITERAL
+	|   #( OR c1:CHAR_LITERAL c2:CHAR_LITERAL )
+	|   #( RANGE c3:CHAR_LITERAL c4:CHAR_LITERAL )
+	;
+
+delegateGrammars
+	:	#( "import"
+            (   #(ASSIGN ID ID)
+            |   ID
+            )+
+        )
+	;
+
+tokensSpec
+	:	#( TOKENS ( tokenSpec )+ )
+	;
+
+tokenSpec
+	:	t:TOKEN_REF
+	|	#( ASSIGN
+		   t2:TOKEN_REF      
+		   ( s:STRING_LITERAL
+		   | c:CHAR_LITERAL 
+		   )
+		 )
+	;
+
+rules
+    :   ( rule )+
+    ;
+
+rule
+    :   #( RULE id:ID           {out.rule(#id.getText());}
+           (m:modifier)?
+           (ARG (ARG_ACTION)?)
+           (RET (ARG_ACTION)?)
+           (optionsSpec)?
+           (ruleScopeSpec)?
+       	   (AMPERSAND)*
+           b:block
+           (exceptionGroup)?
+           EOR                  {out.endRule();}
+         )
+    ;
+
+modifier
+	:	"protected"
+	|	"public"
+	|	"private"
+	|	"fragment"
+	;
+
+ruleScopeSpec
+ 	:	#( "scope" (ACTION)? ( ID )* )
+ 	;
+
+block
+    :   #(  BLOCK {out.block(#BLOCK.getNumberOfChildrenWithType(ALT));}
+            (optionsSpec)?
+            ( alternative rewrite )+
+            EOB   
+         )
+    ;
+
+alternative
+    :   #( ALT {out.alt(#alternative);} (element)+ EOA {out.endAlt();} )
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    #("catch" ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    #("finally" ACTION)
+    ;
+
+rewrite
+	:	( #( REWRITE (SEMPRED)? (ALT|TEMPLATE|ACTION|ETC) ) )*
+	;
+
+element
+    :   #(ROOT element)
+    |   #(BANG element)
+    |   atom
+    |   #(NOT {out.not();} element)
+    |   #(RANGE atom atom)
+    |   #(CHAR_RANGE {out.range();} atom atom)
+    |	#(ASSIGN ID element)
+    |	#(PLUS_ASSIGN ID element)
+    |   ebnf
+    |   tree
+    |   #( SYNPRED block ) 
+    |   FORCED_ACTION
+    |   ACTION
+    |   SEMPRED
+    |   SYN_SEMPRED
+    |   BACKTRACK_SEMPRED
+    |   GATED_SEMPRED
+    |   EPSILON
+    ;
+
+ebnf:   block
+    |   #( OPTIONAL block ) 
+    |   #( CLOSURE block )  
+    |   #( POSITIVE_CLOSURE block ) 
+    ;
+
+tree:   #(TREE_BEGIN  element (element)*  )
+    ;
+
+atom
+    :   #( rr:RULE_REF (rarg:ARG_ACTION)? )     {out.ruleRef(#rr);}
+    |   #( t:TOKEN_REF (targ:ARG_ACTION )? )    {out.token(#t);}
+    |   c:CHAR_LITERAL                          {out.charLiteral(#c);}
+    |   s:STRING_LITERAL                        {out.charLiteral(#s);}
+    |   WILDCARD                                {out.wildcard(#WILDCARD);}
+    |   #(DOT ID atom) // scope override on rule
+    ;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
diff --git a/tool/src/main/resources/org/antlr/antlr.properties b/tool/src/main/resources/org/antlr/antlr.properties
new file mode 100644
index 0000000..45eb421
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/antlr.properties
@@ -0,0 +1,7 @@
+# This properties file is used by ANTLR to pick up any resources
+# and configuration options that may or may not be produced by
+# the build process. For instance the Tool version string is
+# filtered into this resource file so that we don't need to
+# hard code this into Tool.java and so on.
+#
+antlr.version=${project.version} ${buildNumber}
diff --git a/src/org/antlr/codegen/templates/ANTLRCore.sti b/tool/src/main/resources/org/antlr/codegen/templates/ANTLRCore.sti
similarity index 95%
rename from src/org/antlr/codegen/templates/ANTLRCore.sti
rename to tool/src/main/resources/org/antlr/codegen/templates/ANTLRCore.sti
index 89117a0..e4e5a15 100644
--- a/src/org/antlr/codegen/templates/ANTLRCore.sti
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ANTLRCore.sti
@@ -34,7 +34,7 @@ interface ANTLRCore;
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass, literals);
@@ -44,13 +44,13 @@ optional
 headerFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass, literals);
 
 lexer(grammar, name, tokens, scopes, rules, numRules, labelType,
-      filterMode);
+      filterMode, superClass);
 
 parser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
        bitsets, ASTLabelType, superClass,
@@ -61,7 +61,7 @@ parser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
  */
 treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
            numRules, bitsets, labelType, ASTLabelType,
-           superClass, members);
+           superClass, members, filterMode);
 
 /** A simpler version of a rule template that is specific to the imaginary
  *  rules created for syntactic predicates.  As they never have return values
@@ -122,15 +122,15 @@ optionalBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,
 optionalBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description);
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt);
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew);
 
 // E L E M E N T S
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex);
+tokenRef(token,label,elementIndex,hetero);
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex);
+tokenRefAndListLabel(token,label,elementIndex,hetero);
 
 listLabel(label,elem);
 
@@ -160,22 +160,23 @@ wildcardCharListLabel(label, elementIndex);
 /** Match a rule reference by invoking it possibly with arguments
  *  and a return value or values.
  */
-ruleRef(rule,label,elementIndex,args);
+ruleRef(rule,label,elementIndex,args,scope);
 
 /** ids+=ID */
-ruleRefAndListLabel(rule,label,elementIndex,args);
+ruleRefAndListLabel(rule,label,elementIndex,args,scope);
 
 /** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex);
+lexerRuleRef(rule,label,args,elementIndex,scope);
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex);
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope);
 
 /** EOF in the lexer */
 lexerMatchEOF(label,elementIndex);
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList);
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel);
 
 /** Every predicate is used as a validating predicate (even when it is
  *  also hoisted into a prediction expression).
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg
new file mode 100644
index 0000000..9392c5b
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg
@@ -0,0 +1,404 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+import org.antlr.runtime.tree.*;<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected var adaptor:TreeAdaptor = new CommonTreeAdaptor();<\n>
+override public function set treeAdaptor(adaptor:TreeAdaptor):void {
+    this.adaptor = adaptor;
+    <grammar.directDelegates:{g|<g:delegateName()>.treeAdaptor = this.adaptor;}>
+}
+override public function get treeAdaptor():TreeAdaptor {
+    return adaptor;
+}
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+<ASTLabelType> tree;
+public function get tree():Object { return tree; }
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+var root_0:<ASTLabelType> = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{var <it.label.text>_tree:<ASTLabelType>=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{var <it.label.text>_tree:<ASTLabelType>=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = <ASTLabelType>(adaptor.nil());<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+    referencedWildcardLabels,
+    referencedWildcardListLabels,	
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = <ASTLabelType>(adaptor.nil());
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
+input.replaceChildren(adaptor.getParent(retval.start),
+                      adaptor.getChildIndex(retval.start),
+                      adaptor.getChildIndex(_last),
+                      retval.tree);
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;
+<endif>
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{var  stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.tree:null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>",list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.hasNext}; separator="||"> ) {
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.hasNext}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.hasNext}; separator="||">) ) {
+    throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.hasNext}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+var root_<treeLevel>:<ASTLabelType> = <ASTLabelType>(adaptor.nil());
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>));<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>));<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.addChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<action>, root_<treeLevel>));<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+<ASTLabelType>(adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>))
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg
new file mode 100644
index 0000000..d344cfb
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg
@@ -0,0 +1,190 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+retval.tree = <ASTLabelType>(adaptor.errorNode(input, Token(retval.start), input.LT(-1), re));
+<! trace("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_0));
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=<labelType>(input.LT(1));<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = <ASTLabelType>(adaptor.becomeRoot(<createNodeFromToken(...)>, root_0));})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <label>.tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>.tree, root_0));
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <ASTLabelType>(adaptor.create(<label>));
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <ASTLabelType>(adaptor.create(<label>));
+root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_0));
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+<ASTLabelType>(adaptor.create(<label>))
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
+adaptor.setTokenBoundaries(retval.tree, Token(retval.start), Token(retval.stop));
+<if(backtracking)>}<endif>
+>>
\ No newline at end of file
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg
new file mode 100644
index 0000000..34a5b65
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg
@@ -0,0 +1,296 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+var _first_0:<ASTLabelType> = null;
+var _last:<ASTLabelType> = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = <ASTLabelType>(_first_0);
+if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
+    retval.tree = <ASTLabelType>(adaptor.getParent(retval.tree));
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+{
+var _save_last_<treeLevel>:<ASTLabelType> = _last;
+var _first_<treeLevel>:<ASTLabelType> = null;
+<if(!rewriteMode)>
+var root_<treeLevel>:<ASTLabelType> = <ASTLabelType>(adaptor.nil());
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==TokenConstants.DOWN ) {
+    matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_<treeLevel>));
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.dupTree(<label>);
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_<treeLevel>));
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<treeLevel>, <label>.tree);
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>.tree, root_<treeLevel>));
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/Java/Java.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
similarity index 55%
copy from src/org/antlr/codegen/templates/Java/Java.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
index 92f6b75..8d13c4d 100644
--- a/src/org/antlr/codegen/templates/Java/Java.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
@@ -25,17 +25,13 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-group Java implements ANTLRCore;
+group ActionScript implements ANTLRCore;
 
-javaTypeInitMap ::= [
+asTypeInitMap ::= [
 	"int":"0",
-	"long":"0",
-	"float":"0.0",
-	"double":"0.0",
-	"boolean":"false",
-	"byte":"0",
-	"short":"0",
-	"char":"0",
+	"uint":"0",
+	"Number":"0.0",
+	"Boolean":"false",
 	default:"null" // anything other than an atomic type
 ]
 
@@ -45,46 +41,56 @@ javaTypeInitMap ::= [
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass, literals) ::=
 <<
 // $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<actions.(actionScope).header>
-
-<@imports>
+package<if(actions.(actionScope).package)> <actions.(actionScope).package><endif> {
+    <actions.(actionScope).header>
+    <@imports>
 import org.antlr.runtime.*;
 <if(TREE_PARSER)>
-import org.antlr.runtime.tree.*;
-<endif>
-import java.util.Stack;
-import java.util.List;
-import java.util.ArrayList;
-<if(backtracking)>
-import java.util.Map;
-import java.util.HashMap;
+    import org.antlr.runtime.tree.*;
 <endif>
-<@end>
+    <@end>
 
-<docComment>
-<recognizer>
+    <docComment>
+    <recognizer>
+}
 >>
 
 lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-public class <name> extends Lexer {
-    <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
+      filterMode, superClass="Lexer") ::= <<
+public class <grammar.recognizerName> extends <if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else><@superClassName><superClass><@end><endif> {
+    <tokens:{public static const <it.name>:int=<it.type>;}; separator="\n">
     <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
     <actions.lexer.members>
-    public <name>() {;} <! needed by subclasses !>
-    public <name>(CharStream input) {
-        super(input);
-<if(backtracking)>
-        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+
+    // delegates
+    <grammar.delegates:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    <last(grammar.delegators):{g|public var gParent:<g.recognizerName>;}>
+
+    public function <grammar.recognizerName>(<grammar.delegators:{g|<g:delegateName()>:<g.recognizerName>, }>input:CharStream = null, state:RecognizerSharedState = null) {
+        super(input, state);
+        <cyclicDFAs:cyclicDFACtor()>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        this.state.ruleMemo = new Array(<numRules>+1);<\n> <! index from 1..n !>
 <endif>
+<endif>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>this, input, this.state);}; separator="\n">
+        <grammar.delegators:
+         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
     }
-    public String getGrammarFileName() { return "<fileName>"; }
+    public override function get grammarFileName():String { return "<fileName>"; }
 
 <if(filterMode)>
     <filteringNextToken()>
@@ -93,7 +99,6 @@ public class <name> extends Lexer {
 
     <synpreds:{p | <lexerSynpred(p)>}>
 
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
     <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
 
 }
@@ -106,87 +111,110 @@ public class <name> extends Lexer {
  *  at backtracking==1.
  */
 filteringNextToken() ::= <<
-public Token nextToken() {
+public override function nextToken():Token {
     while (true) {
-        if ( input.LA(1)==CharStream.EOF ) {
-            return Token.EOF_TOKEN;
+        if ( input.LA(1)==CharStreamConstants.EOF ) {
+            return TokenConstants.EOF_TOKEN;
         }
-        token = null;
-	channel = Token.DEFAULT_CHANNEL;
-        tokenStartCharIndex = input.index();
-        tokenStartCharPositionInLine = input.getCharPositionInLine();
-        tokenStartLine = input.getLine();
-	text = null;
+        this.state.token = null;
+	    this.state.channel = TokenConstants.DEFAULT_CHANNEL;
+        this.state.tokenStartCharIndex = input.index;
+        this.state.tokenStartCharPositionInLine = input.charPositionInLine;
+        this.state.tokenStartLine = input.line;
+	    this.state.text = null;
         try {
-            int m = input.mark();
-            backtracking=1; <! means we won't throw slow exception !>
-            failed=false;
+            var m:int = input.mark();
+            this.state.backtracking=1; <! means we won't throw slow exception !>
+            this.state.failed=false;
             mTokens();
-            backtracking=0;
+            this.state.backtracking=0;
             <! mTokens backtracks with synpred at backtracking==2
                and we set the synpredgate to allow actions at level 1. !>
-            if ( failed ) {
-                input.rewind(m);
+            if ( this.state.failed ) {
+                input.rewindTo(m);
                 input.consume(); <! advance one char and try again !>
             }
             else {
                 emit();
-                return token;
+                return this.state.token;
             }
         }
-        catch (RecognitionException re) {
+        catch (re:RecognitionException) {
             // shouldn't happen in backtracking mode, but...
             reportError(re);
             recover(re);
         }
     }
+    // Not reached - For ActionScript compiler
+    throw new Error();
 }
 
-public void memoize(IntStream input,
-		int ruleIndex,
-		int ruleStartIndex)
+public override function memoize(input:IntStream,
+		ruleIndex:int,
+		ruleStartIndex:int):void
 {
-if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+if ( this.state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
 }
 
-public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+public override function alreadyParsedRule(input:IntStream, ruleIndex:int):Boolean {
+if ( this.state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
 return false;
 }
 >>
 
-filteringActionGate() ::= "backtracking==1"
+actionGate() ::= "this.state.backtracking==0"
+
+filteringActionGate() ::= "this.state.backtracking==1"
 
 /** How to generate a parser */
 genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              ASTLabelType="Object", labelType, members) ::= <<
-public class <name> extends <@superClassName><superClass><@end> {
-    public static final String[] tokenNames = new String[] {
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="Object", labelType, members, rewriteElementType) ::= <<
+public class <grammar.recognizerName> extends <if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else><@superClassName><superClass><@end><endif> {
+<if(grammar.grammarIsRoot)>
+    public static const tokenNames:Array = [
         "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-    };
-    <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
+    ];<\n>
+<endif>
+    <tokens:{public static const <it.name>:int=<it.type>;}; separator="\n">
+
+    // delegates
+    <grammar.delegates:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    <last(grammar.delegators):{g|public var gParent:<g.recognizerName>;}>
+
     <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
     <@members>
    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-    public <name>(<inputStreamType> input) {
-        super(input);
-<if(backtracking)>
-        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
+    public function <grammar.recognizerName>(<grammar.delegators:{g|<g:delegateName()>:<g.recognizerName>, }>input:<inputStreamType>, state:RecognizerSharedState = null) {
+        super(input, state);
+        <cyclicDFAs:cyclicDFACtor()>
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>this, input, this.state);}; separator="\n">
+        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
     }
     <@end>
 
-    public String[] getTokenNames() { return tokenNames; }
-    public String getGrammarFileName() { return "<fileName>"; }
+    public override function get tokenNames():Array { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
+    public override function get grammarFileName():String { return "<fileName>"; }
 
     <members>
 
     <rules; separator="\n\n">
 
+    <! generate rule/method definitions for imported rules so they
+       appear to be defined in this recognizer. !>
+       // Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor|
+        public function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):<returnType()> \{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}; separator="\n">
+
     <synpreds:{p | <synpred(p)>}>
 
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
     <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
 
     <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
@@ -194,15 +222,25 @@ public class <name> extends <@superClassName><superClass><@end> {
 }
 >>
 
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", ...)>
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new Array(<length(grammar.allImportedRules)>+1);<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
 >>
 
 /** How to generate a tree parser; same as parser except the input
  *  stream is a different type.
  */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", ...)>
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
 >>
 
 /** A simpler version of a rule template that is specific to the imaginary
@@ -215,7 +253,8 @@ treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRu
 synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 <<
 // $ANTLR start <ruleName>
-public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {   
+public final function <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):void {
+    <ruleLabelDefs()>
 <if(trace)>
     traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
     try {
@@ -232,20 +271,20 @@ public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterSc
 >>
 
 synpred(name) ::= <<
-public final boolean <name>() {
-    backtracking++;
+public final function <name>():Boolean {
+    this.state.backtracking++;
     <@start()>
-    int start = input.mark();
+    var start:int = input.mark();
     try {
         <name>_fragment(); // can never throw exception
-    } catch (RecognitionException re) {
-        System.err.println("impossible: "+re);
+    } catch (re:RecognitionException) {
+        trace("impossible: "+re);
     }
-    boolean success = !failed;
-    input.rewind(start);
+    var success:Boolean = !this.state.failed;
+    input.rewindTo(start);
     <@stop()>
-    backtracking--;
-    failed=false;
+    this.state.backtracking--;
+    this.state.failed=false;
     return success;
 }<\n>
 >>
@@ -256,18 +295,18 @@ lexerSynpred(name) ::= <<
 
 ruleMemoization(name) ::= <<
 <if(memoize)>
-if ( backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+if ( this.state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
 <endif>
 >>
 
 /** How to test for failure and return from rule */
 checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+<if(backtracking)>if (this.state.failed) return <ruleReturnValue()>;<endif>
 >>
 
 /** This rule has failed, exit indicating failure during backtrack */
 ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (backtracking>0) {failed=true; return <ruleReturnValue()>;}<endif>
+<if(backtracking)>if (this.state.backtracking>0) {this.state.failed=true; return <ruleReturnValue()>;}<endif>
 >>
 
 /** How to generate code for a rule.  This includes any return type
@@ -275,11 +314,9 @@ ruleBacktrackFailure() ::= <<
  */
 rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
 <ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
 // $ANTLR start <ruleName>
 // <fileName>:<description>
-public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
+public final function <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):<returnType()> {
     <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
     <ruleScopeSetUp()>
     <ruleDeclarations()>
@@ -299,9 +336,10 @@ public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterS
 <if(actions.(actionScope).rulecatch)>
     <actions.(actionScope).rulecatch>
 <else>
-    catch (RecognitionException re) {
+    catch (re:RecognitionException) {
         reportError(re);
-        recover(input,re);
+        recoverStream(input,re);
+        <@setErrorReturnValue()>
     }<\n>
 <endif>
 <endif>
@@ -326,21 +364,21 @@ catch (<e.decl>) {
 
 ruleDeclarations() ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = new <returnType()>();
+var retval:<returnType()> = new <returnType()>();
 retval.start = input.LT(1);<\n>
 <else>
 <ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+var <a.name>:<a.type> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
 }>
 <endif>
 <if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.index();
+var <ruleDescriptor.name>_StartIndex:int = input.index;
 <endif>
 >>
 
 ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
+<ruleDescriptor.useScopes:{<it>_stack.push(new Object());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.push(new Object());}; separator="\n">
 >>
 
 ruleScopeCleanUp() ::= <<
@@ -349,27 +387,27 @@ ruleScopeCleanUp() ::= <<
 >>
 
 ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{<labelType> <it.label.text>=null;}; separator="\n"
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{var <it.label.text>:<labelType>=null;}; separator="\n"
 >
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{List list_<it.label.text>=null;}; separator="\n"
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{var list_<it.label.text>:Array=null;}; separator="\n"
 >
 <ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text>:RuleReturnScope = null;}; separator="\n">
 >>
 
 lexerRuleLabelDefs() ::= <<
 <[ruleDescriptor.tokenLabels,
   ruleDescriptor.tokenListLabels,
   ruleDescriptor.ruleLabels]
-    :{<labelType> <it.label.text>=null;}; separator="\n"
+    :{var <it.label.text>:<labelType>=null;}; separator="\n"
 >
-<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<ruleDescriptor.charLabels:{var <it.label.text>:int;}; separator="\n">
 <[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
   ruleDescriptor.ruleListLabels]
-    :{List list_<it.label.text>=null;}; separator="\n"
+    :{var list_<it.label.text>:Array=null;}; separator="\n"
 >
 >>
 
@@ -396,7 +434,7 @@ retval.stop = input.LT(-1);<\n>
 memoize() ::= <<
 <if(memoize)>
 <if(backtracking)>
-if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+if ( this.state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
 <endif>
 <endif>
 >>
@@ -406,7 +444,7 @@ if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.n
  */
 lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
 // $ANTLR start <ruleName>
-public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
+public final function m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):void {
     <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
     <ruleScopeSetUp()>
     <ruleDeclarations()>
@@ -417,13 +455,15 @@ public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scop
         <ruleDescriptor.actions.init>
         <block><\n>
 <else>
-        int _type = <ruleName>;
+        var _type:int = <ruleName>;
+        var _channel:int = DEFAULT_TOKEN_CHANNEL;
         <ruleMemoization(name=ruleName)>
         <lexerRuleLabelDefs()>
         <ruleDescriptor.actions.init>
         <block>
         <ruleCleanUp()>
-        this.type = _type;
+        this.state.type = _type;
+        this.state.channel = _channel;
         <(ruleDescriptor.actions.after):execAction()>
 <endif>
     }
@@ -440,7 +480,7 @@ public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scop
  *  that chooses between lexer rules.
  */
 tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-public void mTokens() throws RecognitionException {
+public override function mTokens():void {
     <block><\n>
 }
 >>
@@ -450,7 +490,7 @@ public void mTokens() throws RecognitionException {
 /** A (...) subrule with multiple alternatives */
 block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
 // <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
+var alt<decisionNumber>:int=<maxAlt>;
 <decls>
 <@predecision()>
 <decision>
@@ -465,7 +505,7 @@ switch (alt<decisionNumber>) {
 /** A rule block with multiple alternatives */
 ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
 // <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
+var alt<decisionNumber>:int=<maxAlt>;
 <decls>
 <@predecision()>
 <decision>
@@ -495,12 +535,12 @@ blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber
 /** A (..)+ block with 1 or more alternatives */
 positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
 // <fileName>:<description>
-int cnt<decisionNumber>=0;
+var cnt<decisionNumber>:int=0;
 <decls>
 <@preloop()>
 loop<decisionNumber>:
 do {
-    int alt<decisionNumber>=<maxAlt>;
+    var alt<decisionNumber>:int=<maxAlt>;
     <@predecision()>
     <decision>
     <@postdecision()>
@@ -509,10 +549,8 @@ do {
 	default :
 	    if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
 	    <ruleBacktrackFailure()>
-            EarlyExitException eee =
-                new EarlyExitException(<decisionNumber>, input);
-            <@earlyExitException()>
-            throw eee;
+            throw new EarlyExitException(<decisionNumber>, input);
+            <! Need to add support for earlyExitException debug hook !>
     }
     cnt<decisionNumber>++;
 } while (true);
@@ -528,7 +566,7 @@ closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,m
 <@preloop()>
 loop<decisionNumber>:
 do {
-    int alt<decisionNumber>=<maxAlt>;
+    var alt<decisionNumber>:int=<maxAlt>;
     <@predecision()>
     <decision>
     <@postdecision()>
@@ -562,15 +600,21 @@ case <i> :
 >>
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
 // <fileName>:<description>
 {
 <@declarations()>
 <elements:element()>
+<rew>
 <@cleanup()>
 }
 >>
 
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
 // E L E M E N T S
 
 /** Dump the elements one per line */
@@ -580,22 +624,19 @@ element() ::= <<
 >>
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=<labelType>(<endif>matchStream(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)>)<endif>; <checkRuleBacktrackFailure()>
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRef(...)>
 <listLabel(elem=label,...)>
 >>
 
 listLabel(label,elem) ::= <<
-if (list_<label>==null) list_<label>=new ArrayList();
-list_<label>.add(<elem>);<\n>
+if (list_<label>==null) list_<label>=new Array();
+list_<label>.push(<elem>);<\n>
 >>
 
 /** match a character */
@@ -620,31 +661,33 @@ matchSet(s,label,elementIndex,postmatchCode="") ::= <<
 <if(LEXER)>
 <label>= input.LA(1);<\n>
 <else>
-<label>=(<labelType>)input.LT(1);<\n>
+<label>=<labelType>(input.LT(1));<\n>
 <endif>
 <endif>
 if ( <s> ) {
     input.consume();
     <postmatchCode>
 <if(!LEXER)>
-    errorRecovery=false;
+    this.state.errorRecovery=false;
 <endif>
-    <if(backtracking)>failed=false;<endif>
+    <if(backtracking)>this.state.failed=false;<endif>
 }
 else {
     <ruleBacktrackFailure()>
-    MismatchedSetException mse =
-        new MismatchedSetException(null,input);
     <@mismatchedSetException()>
 <if(LEXER)>
-    recover(mse);
+    throw recover(new MismatchedSetException(null,input));<\n>
 <else>
+    throw new MismatchedSetException(null,input);
+    <! use following code to make it recover inline; remove throw mse;
     recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
 <endif>
-    throw mse;
 }<\n>
 >>
 
+matchRuleBlockSet ::= matchSet
+
 matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 <matchSet(...)>
 <listLabel(elem=label,...)>
@@ -653,17 +696,17 @@ matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 /** Match a string literal */
 lexerStringRef(string,label) ::= <<
 <if(label)>
-int <label>Start = getCharIndex();
-match(<string>); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
+var <label>Start:int = charIndex;
+matchString(<string>); <checkRuleBacktrackFailure()>
+<label> = CommonToken.createFromStream(input, TokenConstants.INVALID_TOKEN_TYPE, TokenConstants.DEFAULT_CHANNEL, <label>Start, charIndex-1);
 <else>
-match(<string>); <checkRuleBacktrackFailure()><\n>
+matchString(<string>); <checkRuleBacktrackFailure()><\n>
 <endif>
 >>
 
 wildcard(label,elementIndex) ::= <<
 <if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
+<label>=<labelType>(input.LT(1));<\n>
 <endif>
 matchAny(input); <checkRuleBacktrackFailure()>
 >>
@@ -687,38 +730,46 @@ wildcardCharListLabel(label, elementIndex) ::= <<
 >>
 
 /** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ *
+ * GMS: Note:  do not use post-decrement operator!  ASC produces bad code for exceptions in this case.
+ *      See: https://bugs.adobe.com/jira/browse/ASC-3625
  */
-ruleRef(rule,label,elementIndex,args) ::= <<
-pushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
-<if(label)>
-<label>=<rule>(<args; separator=", ">);<\n>
-<else>
-<rule>(<args; separator=", ">);<\n>
-<endif>
-_fsp--;
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+state._fsp = state._fsp - 1;
 <checkRuleBacktrackFailure()>
 >>
 
+
 /** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
 <listLabel(elem=label,...)>
 >>
 
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
 <if(label)>
-int <label>Start<elementIndex> = getCharIndex();
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+var <label>Start<elementIndex>:int = charIndex;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = CommonToken.createFromStream(input, TokenConstants.INVALID_TOKEN_TYPE, TokenConstants.DEFAULT_CHANNEL, <label>Start<elementIndex>, charIndex-1);
 <else>
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -726,28 +777,29 @@ lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
 /** EOF in the lexer */
 lexerMatchEOF(label,elementIndex) ::= <<
 <if(label)>
-int <label>Start<elementIndex> = getCharIndex();
+var <label>Start<elementIndex>:int = charIndex;
 match(EOF); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+var <label>:<labelType> = CommonToken.createFromStream(input, EOF, TokenConstants.DEFAULT_CHANNEL, <label>Start<elementIndex>, charIndex-1);
 <else>
 match(EOF); <checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
 <root:element()>
 <actionsAfterRoot:element()>
 <if(nullableChildList)>
-if ( input.LA(1)==Token.DOWN ) {
-    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+if ( input.LA(1)==TokenConstants.DOWN ) {
+    matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
     <children:element()>
-    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+    matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
 }
 <else>
-match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
 <children:element()>
-match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
 <endif>
 >>
 
@@ -764,17 +816,15 @@ if ( !(<evalPredicate(...)>) ) {
 // F i x e d  D F A  (if-then-else)
 
 dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
 <edges; separator="\nelse ">
 else {
 <if(eotPredictsAlt)>
     alt<decisionNumber>=<eotPredictsAlt>;
 <else>
     <ruleBacktrackFailure()>
-    NoViableAltException nvae =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
+    throw new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <! Need to add hook for noViableAltException() !>
 <endif>
 }
 >>
@@ -785,7 +835,7 @@ else {
  *  expect "if ( LA(1)==X ) match(X);" and that's it.
  */
 dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
 <edges; separator="\nelse ">
 >>
 
@@ -796,7 +846,7 @@ int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
  *  anything other than 'a' predicts exiting.
  */
 dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
 <edges; separator="\nelse "><\n>
 <if(eotPredictsAlt)>
 <if(!edges)>
@@ -835,10 +885,8 @@ default:
     alt<decisionNumber>=<eotPredictsAlt>;
 <else>
     <ruleBacktrackFailure()>
-    NoViableAltException nvae =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
+    throw new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <! Need to add hook for noViableAltException !>
 <endif>
 }<\n>
 >>
@@ -878,6 +926,15 @@ dfaDecision(decisionNumber,description) ::= <<
 alt<decisionNumber> = dfa<decisionNumber>.predict(input);
 >>
 
+cyclicDFACtor(dfa) ::= <<
+
+dfa<dfa.decisionNumber> = new DFA(this, <dfa.decisionNumber>,
+            "<dfa.description>",
+            DFA<dfa.decisionNumber>_eot, DFA<dfa.decisionNumber>_eof, DFA<dfa.decisionNumber>_min,
+            DFA<dfa.decisionNumber>_max, DFA<dfa.decisionNumber>_accept, DFA<dfa.decisionNumber>_special,
+            DFA<dfa.decisionNumber>_transition<if(dfa.specialStateSTs)>, DFA<dfa.decisionNumber>_specialStateTransition<endif>);
+
+>>
 /* Dump DFA tables as run-length-encoded Strings of octal values.
  * Can't use hex as compiler translates them before compilation.
  * These strings are split into multiple, concatenated strings.
@@ -887,82 +944,57 @@ alt<decisionNumber> = dfa<decisionNumber>.predict(input);
  * the encoding methods.
  */
 cyclicDFA(dfa) ::= <<
-static final String DFA<dfa.decisionNumber>_eotS =
-    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_eofS =
-    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_minS =
-    "<dfa.javaCompressedMin; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_maxS =
-    "<dfa.javaCompressedMax; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_acceptS =
-    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_specialS =
-    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>";
-static final String[] DFA<dfa.decisionNumber>_transitionS = {
-        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-};
-
-static final short[] DFA<dfa.decisionNumber>_eot = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS);
-static final short[] DFA<dfa.decisionNumber>_eof = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS);
-static final char[] DFA<dfa.decisionNumber>_min = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
-static final char[] DFA<dfa.decisionNumber>_max = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
-static final short[] DFA<dfa.decisionNumber>_accept = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
-static final short[] DFA<dfa.decisionNumber>_special = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS);
-static final short[][] DFA<dfa.decisionNumber>_transition;
-
-static {
-    int numStates = DFA<dfa.decisionNumber>_transitionS.length;
-    DFA<dfa.decisionNumber>_transition = new short[numStates][];
-    for (int i=0; i\<numStates; i++) {
-        DFA<dfa.decisionNumber>_transition[i] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
-    }
-}
 
-class DFA<dfa.decisionNumber> extends DFA {
-
-    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) {
-        this.recognizer = recognizer;
-        this.decisionNumber = <dfa.decisionNumber>;
-        this.eot = DFA<dfa.decisionNumber>_eot;
-        this.eof = DFA<dfa.decisionNumber>_eof;
-        this.min = DFA<dfa.decisionNumber>_min;
-        this.max = DFA<dfa.decisionNumber>_max;
-        this.accept = DFA<dfa.decisionNumber>_accept;
-        this.special = DFA<dfa.decisionNumber>_special;
-        this.transition = DFA<dfa.decisionNumber>_transition;
-    }
-    public String getDescription() {
-        return "<dfa.description>";
-    }
-    <@errorMethod()>
+private const DFA<dfa.decisionNumber>_eot:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedEOT; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_eof:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedEOF; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_min:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedMin; wrap="\"+\n    \"">", true);
+private const DFA<dfa.decisionNumber>_max:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedMax; wrap="\"+\n    \"">", true);
+private const DFA<dfa.decisionNumber>_accept:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedAccept; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_special:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedSpecial; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_transition:Array = [
+        <dfa.javaCompressedTransition:{s|DFA.unpackEncodedString("<s; wrap="\"+\n\"">")}; separator=",\n">
+];
 <if(dfa.specialStateSTs)>
-    public int specialStateTransition(int s, IntStream input) throws NoViableAltException {
-    	int _s = s;
+    private function DFA<dfa.decisionNumber>_specialStateTransition(dfa:DFA, s:int, _input:IntStream):int {
+        <if(LEXER)>
+        var input:IntStream = _input;
+        <endif>
+        <if(PARSER)>
+        var input:TokenStream = TokenStream(_input);
+        <endif>
+        <if(TREE_PARSER)>
+        var input:TreeNodeStream = TreeNodeStream(_input);
+        <endif>
+    	var _s:int = s;
         switch ( s ) {
         <dfa.specialStateSTs:{state |
         case <i0> : <! compressed special state numbers 0..n-1 !>
             <state>}; separator="\n">
         }
 <if(backtracking)>
-        if (backtracking>0) {failed=true; return -1;}<\n>
+        if (this.state.backtracking>0) {this.state.failed=true; return -1;}<\n>
 <endif>
-        NoViableAltException nvae =
-            new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input);
-        error(nvae);
-        throw nvae;
+        throw dfa.error(new NoViableAltException(dfa.description, <dfa.decisionNumber>, _s, input));
     }<\n>
 <endif>
-}<\n>
+
+protected var dfa<dfa.decisionNumber>:DFA;  // initialized in constructor
+
 >>
 
 /** A state in a cyclic DFA; it's a special state and part of a big switch on
  *  state.
  */
 cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+var LA<decisionNumber>_<stateNumber>:int = input.LA(1);<\n>
 <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-int index<decisionNumber>_<stateNumber> = input.index();
+var index<decisionNumber>_<stateNumber>:int = input.index;
 input.rewind();<\n>
 <endif>
 s = -1;
@@ -997,22 +1029,22 @@ orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
 
 notPredicate(pred) ::= "!(<evalPredicate(...)>)"
 
-evalPredicate(pred,description) ::= "<pred>"
+evalPredicate(pred,description) ::= "(<pred>)"
 
 evalSynPredicate(pred,description) ::= "<pred>()"
 
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atomAsInt>"
 
 /** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
  *  somewhere.  Must ask for the lookahead directly.
  */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atomAsInt>"
 
 lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+(LA<decisionNumber>_<stateNumber>\>=<lowerAsInt> && LA<decisionNumber>_<stateNumber>\<=<upperAsInt>)
 >>
 
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lowerAsInt> && input.LA(<k>)\<=<upperAsInt>)"
 
 setTest(ranges) ::= "<ranges; separator=\"||\">"
 
@@ -1020,25 +1052,21 @@ setTest(ranges) ::= "<ranges; separator=\"||\">"
 
 globalAttributeScope(scope) ::= <<
 <if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
+protected var <scope.name>_stack:Array = new Array();<\n>
 <endif>
 >>
 
 ruleAttributeScope(scope) ::= <<
 <if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
+protected var <scope.name>_stack:Array = new Array();<\n>
 <endif>
 >>
 
+returnStructName() ::= "<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope"
+
 returnType() ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.name>_return
+<returnStructName()>
 <else>
 <if(ruleDescriptor.hasSingleReturnValue)>
 <ruleDescriptor.singleValueReturnType>
@@ -1053,7 +1081,7 @@ void
  */
 ruleLabelType(referencedRule) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
+<returnStructName()>
 <else>
 <if(referencedRule.hasSingleReturnValue)>
 <referencedRule.singleValueReturnType>
@@ -1063,16 +1091,20 @@ void
 <endif>
 >>
 
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
 /** Using a type to init value map, try to init a type; if not in table
  *  must be an object, default value is "null".
  */
 initValue(typeName) ::= <<
-<javaTypeInitMap.(typeName)>
+<asTypeInitMap.(typeName)>
 >>
 
 /** Define a rule label including default value */
 ruleLabelDef(label) ::= <<
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+var <label.label.text>:<ruleLabelType(referencedRule=label.referencedRule)> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
 >>
 
 /** Define a return struct for a rule if the code needs to access its
@@ -1089,32 +1121,32 @@ public static class <returnType()> extends <if(TREE_PARSER)>Tree<else>Parser<end
 >>
 
 parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
+<scope.attributes:{<it.name>:<it.type>}; separator=", ">
 >>
 
 parameterAttributeRef(attr) ::= "<attr.name>"
 parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
 
 scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
-<else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name>
-<endif>
-<endif>
+ <if(negIndex)>
+ <scope>_stack[<scope>_stack.length-<negIndex>-1].<attr.name>
+ <else>
+ <if(index)>
+ <scope>_stack[<index>].<attr.name>
+ <else>
+ <scope>_stack[<scope>_stack.length-1].<attr.name>
+ <endif>
+ <endif>
 >>
 
 scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
 <if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name> =<expr>;
+<scope>_stack[<scope>_stack.length-<negIndex>-1].<attr.name> =<expr>;
 <else>
 <if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name> =<expr>;
+<scope>_stack[<index>].<attr.name> =<expr>;
 <else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name> =<expr>;
+<scope>_stack[<scope>_stack.length-1].<attr.name> =<expr>;
 <endif>
 <endif>
 >>
@@ -1128,7 +1160,7 @@ isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
 /** reference an attribute of rule; might only have single return value */
 ruleLabelRef(referencedRule,scope,attr) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
+(<scope>!=null?<scope>.values.<attr.name>:<initValue(attr.type)>)
 <else>
 <scope>
 <endif>
@@ -1136,7 +1168,7 @@ ruleLabelRef(referencedRule,scope,attr) ::= <<
 
 returnAttributeRef(ruleDescriptor,attr) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
+retval.values.<attr.name>
 <else>
 <attr.name>
 <endif>
@@ -1144,7 +1176,7 @@ retval.<attr.name>
 
 returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> =<expr>;
+retval.values.<attr.name> =<expr>;
 <else>
 <attr.name> =<expr>;
 <endif>
@@ -1159,93 +1191,99 @@ listLabelRef(label) ::= "list_<label>"
 
 // not sure the next are the right approach
 
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.text:null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.type:0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.line:0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.charPositionInLine:0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.channel:0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.tokenIndex:0)"
 tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int(<scope>.text):0)"
 
-ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?<labelType>(<scope>.start):null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?<labelType>(<scope>.stop):null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?<ASTLabelType>(<scope>.tree):null)"
 ruleLabelPropertyRef_text(scope,attr) ::= <<
 <if(TREE_PARSER)>
-input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))
+(<scope>!=null?(input.tokenStream.toStringWithRange(
+  input.treeAdaptor.getTokenStartIndex(<scope>.start),
+  input.treeAdaptor.getTokenStopIndex(<scope>.start))):null)
 <else>
-input.toString(<scope>.start,<scope>.stop)
+(<scope>!=null?input.toStringWithRange(<scope>.start,<scope>.stop):null)
 <endif>
 >>
 
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.st:null)"
 
 /** Isolated $RULE ref ok in lexer as it's a Token */
 lexerRuleLabel(label) ::= "<label>"
 
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(<scope>!=null?<scope>.type:0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(<scope>!=null?<scope>.lien:0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::=
+    "(<scope>!=null?<scope>.charPositionInLine:0)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(<scope>!=null?<scope>.channel:0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(<scope>!=null?<scope>.tokenIndex:0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(<scope>!=null?<scope>.text:null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(<scope>!=null?int(<scope>.text):0)"
 
 // Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
+rulePropertyRef_start(scope,attr) ::= "<labelType>(retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "<labelType>(retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "<ASTLabelType>(retval.tree)"
 rulePropertyRef_text(scope,attr) ::= <<
 <if(TREE_PARSER)>
-input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start))
+input.tokenStream.toStringWithRange(
+  input.treeAdaptor.getTokenStartIndex(retval.start),
+  input.treeAdaptor.getTokenStopIndex(retval.start))
 <else>
-input.toString(retval.start,input.LT(-1))
+input.toStringWithRange(retval.start,input.LT(-1))
 <endif>
 >>
 rulePropertyRef_st(scope,attr) ::= "retval.st"
 
-lexerRulePropertyRef_text(scope,attr) ::= "getText()"
+lexerRulePropertyRef_text(scope,attr) ::= "text"
 lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
 lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "channel"
-lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(charIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
 
 // setting $st and $tree is allowed in local rule. everything else
 // is flagged as error
 ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
 ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
 
-
-/** How to execute an action */
+/** How to execute an action (only when not backtracking) */
 execAction(action) ::= <<
 <if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
 if ( <actions.(actionScope).synpredgate> ) {
   <action>
 }
 <else>
-if ( backtracking==0 ) {
-  <action>
-}
-<endif>
-<else>
 <action>
 <endif>
 >>
 
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
 // M I S C (properties, etc...)
 
 bitset(name, words64) ::= <<
-public static final BitSet <name> = new BitSet(new long[]{<words64:{<it>L};separator=",">});<\n>
+public static const <name>:BitSet = new BitSet([<words64:{<it>};separator=", ">]);<\n>
 >>
 
-codeFileExtension() ::= ".java"
+codeFileExtension() ::= ".as"
 
 true() ::= "true"
 false() ::= "false"
diff --git a/src/org/antlr/codegen/templates/C/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg
similarity index 50%
rename from src/org/antlr/codegen/templates/C/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg
index e1035ac..c187557 100644
--- a/src/org/antlr/codegen/templates/C/AST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg
@@ -1,6 +1,9 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -35,14 +38,14 @@ pANTLR3_VECTOR_FACTORY		vectors;
 /* End @headerFile.members() */
 >>
 
-/** Install the tree adpator inteface pointer and anything else that 
+/** Install the tree adaptor interface pointer and anything else that 
  *  tree parsers and producers require.
  */
 @genericParser.apifuncs() ::= <<
 <if(PARSER)>
 ADAPTOR	= ANTLR3_TREE_ADAPTORNew(instream->tstream->tokenSource->strFactory);<\n>
 <endif>
-ctx->vectors	= antlr3VectorFactoryNew(64);
+ctx->vectors	= antlr3VectorFactoryNew(0);
 >>
 
 @genericParser.cleanup() ::= <<
@@ -55,15 +58,15 @@ ADAPTOR->free(ADAPTOR);
 >>
 
 @returnScope.ruleReturnMembers() ::= <<
-/* @returnScope.ruleReturnMembers() */
-pANTLR3_BASE_TREE	tree;
-/* End @returnScope.ruleReturnMembers() */
+
+<super.ASTLabelType()>	tree;
+
 >>
 
 /** Add a variable to track rule's return AST */
 ruleDeclarations() ::= <<
 <super.ruleDeclarations()>
-pANTLR3_BASE_TREE root_0;<\n>
+<ASTLabelType> root_0;<\n>
 >>
 
 ruleInitializations() ::= <<
@@ -73,22 +76,32 @@ root_0 = NULL;<\n>
 
 ruleLabelDefs() ::= <<
 <super.ruleLabelDefs()>
-<ruleDescriptor.tokenLabels:{pANTLR3_BASE_TREE <it.label.text>_tree;}; separator="\n">
-<ruleDescriptor.tokenListLabels:{pANTLR3_BASE_TREE <it.label.text>_tree;}; separator="\n">
+<ruleDescriptor.tokenLabels:{<ASTLabelType> <it.label.text>_tree;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree;}; separator="\n">
 <ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{pANTLR3_REWRITE_RULE_TOKEN_STREAM stream_<it>;}; separator="\n">
+    :{pANTLR3_REWRITE_RULE_<rewriteElementType>_STREAM stream_<it>;}; separator="\n">
 <ruleDescriptor.allRuleRefsInAltsWithRewrites
     :{pANTLR3_REWRITE_RULE_SUBTREE_STREAM stream_<it>;}; separator="\n">
 >>
 
+/* Note that we defer the actual creation of any rewrite streams we need here and just initialize
+ * them to NULL. This saves creating huge numbers of rewrite streams that cannot be used as only
+ * one alt will be taken in a rule, but we are declaring all the streams here. So we define
+ * a macro that conatins the create code, then use this macro later to check if the stream
+ * has been created yet. Checking for NULL is almost free in C.
+ */
 ruleLabelInitializations() ::= <<
 <super.ruleLabelInitializations()>
 <ruleDescriptor.tokenLabels:{<it.label.text>_tree   = NULL;}; separator="\n">
 <ruleDescriptor.tokenListLabels:{<it.label.text>_tree   = NULL;}; separator="\n">
+
 <ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{stream_<it>   = antlr3RewriteRuleTokenStreamNewAE(ADAPTOR,(pANTLR3_UINT8)"token <it>");}; separator="\n">
+:{stream_<it>   = NULL;
+#define CREATE_stream_<it>  if (stream_<it> == NULL) {stream_<it> = antlr3RewriteRule<rewriteElementType>StreamNewAE(ADAPTOR, RECOGNIZER, (pANTLR3_UINT8)"token <it>"); } }; separator="\n">
 <ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAE(ADAPTOR,(pANTLR3_UINT8)"rule <it>");}; separator="\n">
+:{stream_<it>   = NULL;
+#define CREATE_stream_<it>  if (stream_<it> == NULL) {stream_<it> = antlr3RewriteRuleSubtreeStreamNewAE(ADAPTOR, RECOGNIZER, (pANTLR3_UINT8)"rule <it>"); }}; separator="\n">
+
 <if(ruleDescriptor.hasMultipleReturnValues)>
 retval.tree  = NULL;
 <endif>
@@ -99,26 +112,6 @@ ruleLabelInitVal(label) ::= <<
 <super.ruleLabelInitVal(...)>
 <label.label.text>.tree = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
 >>
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(backtracking)>
-if ( BACKTRACKING==0 ) {<\n>
-<endif>
-<if(!ruleDescriptor.isSynPred)>
-	retval.stop = LT(-1);<\n>
-<endif>
-	retval.tree = ADAPTOR->rulePostProcessing(ADAPTOR, root_0);
-	ADAPTOR->setTokenBoundaries(ADAPTOR, retval.tree, retval.start, retval.stop);
-<if(backtracking)>
-}
-<endif>
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{stream_<it>->free(stream_<it>);}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{stream_<it>->free(stream_<it>);}; separator="\n">
-<endif>
->>
 
 /** When doing auto AST construction, we must define some variables;
  *  These should be turned off if doing rewrites.  This must be a "mode"
@@ -135,114 +128,75 @@ if ( BACKTRACKING==0 ) {<\n>
 @alt.initializations() ::= <<
 <if(autoAST)>
 <if(outerAlt)>
-root_0 = ADAPTOR->nil(ADAPTOR);<\n>
+<if(!rewriteMode)>
+root_0 = (<ASTLabelType>)(ADAPTOR->nilNode(ADAPTOR));<\n>
+<endif>
 <endif>
 <endif>
 >>
 
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
-<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
-ADAPTOR->addChild(ADAPTOR, root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
-<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
-root_0 = ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
 
+// T r a c k i n g  R u l e  E l e m e n t s
+//
 /** ID but track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) <endif>stream_<token>->add(stream_<token>, <label>, NULL);<\n>
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) { <endif>CREATE_stream_<token>; stream_<token>->add(stream_<token>, <label>, NULL);<if(backtracking)> }<endif><\n>
 >>
 
 /** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
  *  to the tracking list stream_ID for use in the rewrite.
  */
-tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRefTrack(...)>
 <listLabel(elem=label,...)>
 >>
 
-// SET AST
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>CREATE_stream_<token>; stream_<token>->add(stream_<token>, <label>, NULL);<if(backtracking)> }<endif><\n>
+>>
 
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
+wildcardTrack(label,elementIndex) ::= <<
+<super.wildcard(...)>
+>> 
 
-matchSet(s,label,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( BACKTRACKING==0 ) <endif>ADAPTOR->addChild(ADAPTOR, root_0, ADAPTOR->create(ADAPTOR, <label>));})>
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) { <endif>CREATE_stream_<rule.name>; stream_<rule.name>->add(stream_<rule.name>, <label>.tree, NULL);<if(backtracking)> }<endif>
 >>
 
-matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabelTrack(...)>
+>>
 
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) { <endif>CREATE_stream_<rule.name>; stream_<rule.name>->add(stream_<rule.name>, <label>.tree, NULL);<if(backtracking)> }<endif>
+>>
 
-matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( BACKTRACKING==0 ) <endif>root_0 = ADAPTOR->becomeRoot(ADAPTOR, ADAPTOR->create(ADAPTOR, <label>), root_0);})>
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabelAST(...)>
 >>
 
+
 // RULE REF AST
 
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) <endif>ADAPTOR->addChild(ADAPTOR, root_0, <label>.tree);
->>
 
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
 
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) <endif>root_0 = ADAPTOR->becomeRoot(ADAPTOR, <label>.tree, root_0);
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
 >>
 
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) <endif>stream_<rule>->add(stream_<rule>, <label>.tree, NULL);
->>
 
 /* How to accumulate lists when we are doing rewrite tracking...
  */
@@ -263,7 +217,7 @@ list_<label>->add(list_<label>, <label>.tree, NULL);
  * Normally, we use inline structures (which the compiler lays down
  * code to copy from heap allocations. However, here we want to accumulate copies
  * of the returned structures because we are adding them to a list. This only makes sense if the
- * grammar is not rewriting the tree as a tree rewwrite onnly preserves the tree, not the object/structure
+ * grammar is not rewriting the tree as a tree rewrite only preserves the tree, not the object/structure
  * returned from the rule. The rewrite will extract the tree pointer. However, if we are not going to 
  * do a tree re-write, then the user may wish to iterate the structures returned by the rule in 
  * action code and will expect the user defined returns[] elements to be available when they do this.
@@ -279,56 +233,11 @@ if (list_<label> == NULL)
 {
     RETURN_TYPE_<label> * tcopy;
 
-    tcopy = ANTLR3_MALLOC(sizeof(RETURN_TYPE_<label>)); /* Note no memory allocation checks! */
-    ANTLR3_MEMMOVE((void *)(tcopy), (const void *)&<label>, sizeof(RETURN_TYPE_<label>));
-    list_<label>->add(list_<label>, tcopy, freeScope);  /* Add whatever the return type is */<\n>
+    tcopy = (RETURN_TYPE_<label> *)ANTLR3_MALLOC(sizeof(RETURN_TYPE_<label>)); /* Note no memory allocation checks! */
+    ANTLR3_MEMCPY((void *)(tcopy), (const void *)&<label>, sizeof(RETURN_TYPE_<label>));
+    list_<label>->add(list_<label>, (void *)tcopy, freeScope);  /* Add whatever the return type is */<\n>
 }
 >>
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefTrack(...)>
-<listLabelTrack(...)>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabelAST(...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefBang(...)>
-<listLabelAST(...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabelAST(...)>
->>
-
-// WILDCARD AST
-
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
-<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
-ADAPTOR->addChild(ADAPTOR, root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( BACKTRACKING==0 ) {<endif>
-<label>_tree = ADAPTOR->create(ADAPTOR, <label>);
-root_0 = ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-// there's got to be a better way
 
 // R e w r i t e
 
@@ -340,6 +249,8 @@ rewriteCode(
 	referencedTokenListLabels,
 	referencedRuleLabels,
 	referencedRuleListLabels,
+    referencedWildcardLabels,
+    referencedWildcardListLabels,
 	rewriteBlockLevel, 
 	enclosingTreeLevel, 
 	treeLevel) ::=
@@ -353,14 +264,24 @@ rewriteCode(
  * rule list labels  : <referencedRuleListLabels; separator=", ">
  */
 <if(backtracking)>
-if ( BACKTRACKING==0 ) <\n>
+if ( <actions.(actionScope).synpredgate> ) <\n>
 <endif>
 {
 	<rewriteCodeLabelsDecl()>
 	<rewriteCodeLabelsInit()>
-	root_0			    = ADAPTOR->nil(ADAPTOR);
+	root_0			    = (<ASTLabelType>)(ADAPTOR->nilNode(ADAPTOR));
 	<prevRuleRootRef()>.tree    = root_0;
 	<alts:rewriteAlt(); separator="else ">
+	<if(TREE_PARSER)>
+	<if(rewriteMode)>
+	<prevRuleRootRef()>.tree = (<ASTLabelType>)(ADAPTOR->rulePostProcessing(ADAPTOR, root_0));
+	INPUT->replaceChildren(INPUT, ADAPTOR->getParent(ADAPTOR, retval.start),
+                      ADAPTOR->getChildIndex(ADAPTOR, retval.start),
+                      ADAPTOR->getChildIndex(ADAPTOR, _last),
+                      retval.tree);
+	<endif>
+	<endif>
+	<prevRuleRootRef()>.tree = root_0; // set result root
 	<rewriteCodeLabelsFree()>
 
 }
@@ -368,11 +289,11 @@ if ( BACKTRACKING==0 ) <\n>
 
 rewriteCodeLabelsDecl() ::= <<
 <referencedTokenLabels
-    :{pANTLR3_REWRITE_RULE_TOKEN_STREAM stream_<it>;};
+    :{pANTLR3_REWRITE_RULE_<rewriteElementType>_STREAM stream_<it>;};
     separator="\n"
 >
 <referencedTokenListLabels
-    :{pANTLR3_REWRITE_RULE_TOKEN_STREAM stream_<it>;};
+    :{pANTLR3_REWRITE_RULE_<rewriteElementType>_STREAM stream_<it>;};
     separator="\n"
 >
 <referencedRuleLabels
@@ -387,38 +308,38 @@ rewriteCodeLabelsDecl() ::= <<
 
 rewriteCodeLabelsInit() ::= <<
 <referencedTokenLabels
-    :{stream_<it>=antlr3RewriteRuleTokenStreamNewAEE(ADAPTOR, (pANTLR3_UINT8)"token <it>", <it>);};
-    separator="\n"
+:{stream_<it>=antlr3RewriteRule<rewriteElementType>StreamNewAEE(ADAPTOR,  RECOGNIZER, (pANTLR3_UINT8)"token <it>", <it>);};
+separator="\n"
 >
 <referencedTokenListLabels
-    :{stream_<it>=antlr3RewriteRuleTokenStreamNewAEV(ADAPTOR, (pANTLR3_UINT8)"token <it>", list_<it>);};
-    separator="\n"
+:{stream_<it>=antlr3RewriteRule<rewriteElementType>StreamNewAEV(ADAPTOR,  RECOGNIZER, (pANTLR3_UINT8)"token <it>", list_<it>); };
+separator="\n"
 >
-<referencedRuleLabels 
-    :{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAEE(ADAPTOR, (pANTLR3_UINT8)"token <it>", <it>.tree != NULL ? <it>.tree : NULL);};
-    separator="\n"
+<referencedRuleLabels
+:{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAEE(ADAPTOR,  RECOGNIZER, (pANTLR3_UINT8)"token <it>", <it>.tree != NULL ? <it>.tree : NULL);};
+separator="\n"
 >
 <referencedRuleListLabels
-    :{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAEV(ADAPTOR, (pANTLR3_UINT8)"token <it>", list_<it>);};
-    separator="\n"
+:{stream_<it>=antlr3RewriteRuleSubtreeStreamNewAEV(ADAPTOR,  RECOGNIZER, (pANTLR3_UINT8)"token <it>", list_<it>);};
+separator="\n"
 >
 >>
 rewriteCodeLabelsFree() ::= <<
 <referencedTokenLabels
-    :{stream_<it>->free(stream_<it>);};
-    separator="\n"
+:{if (stream_<it> != NULL) stream_<it>->free(stream_<it>); };
+separator="\n"
 >
 <referencedTokenListLabels
-    :{stream_<it>->free(stream_<it>);};
-    separator="\n"
+:{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);};
+separator="\n"
 >
-<referencedRuleLabels 
-    :{stream_<it>->free(stream_<it>);};
-    separator="\n"
+<referencedRuleLabels
+:{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);};
+separator="\n"
 >
 <referencedRuleListLabels
-    :{stream_<it>->free(stream_<it>);};
-    separator="\n"
+:{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);};
+separator="\n"
 >
 >>
 
@@ -434,11 +355,11 @@ rewriteOptionalBlock(
 <<
 // <fileName>:<description>
 {
-	if ( <referencedElementsDeep:{el | stream_<el>->hasNext(stream_<el>)}; separator="||"> ) 
+	if ( <referencedElementsDeep:{el | (stream_<el> != NULL && stream_<el>->hasNext(stream_<el>)) }; separator="|| "> )
 	{
 		<alt>
 	}
-	<referencedElementsDeep:{el | stream_<el>->reset(stream_<el>);<\n>}>
+	<referencedElementsDeep:{el | if ( stream_<el> != NULL) stream_<el>->reset(stream_<el>);<\n>}>
 }<\n>
 >>
 
@@ -451,18 +372,18 @@ rewriteClosureBlock(
 <<
 // <fileName>:<description>
 {
-	while ( <referencedElements:{el | stream_<el>->hasNext(stream_<el>)}; separator="||"> ) 
+	while ( <referencedElements:{el | (stream_<el> != NULL && stream_<el>->hasNext(stream_<el>)) }; separator="|| "> )
 	{
 		<alt>
 	}
-	<referencedElements:{el | stream_<el>->reset(stream_<el>);<\n>}>
+	<referencedElements:{el | if (stream_<el> != NULL) stream_<el>->reset(stream_<el>);<\n>}>
 }<\n>
 >>
 RewriteEarlyExitException() ::=
 <<
 CONSTRUCTEX();
 EXCEPTION->type         = ANTLR3_REWRITE_EARLY_EXCEPTION;
-EXCEPTION->name         = ANTLR3_REWRITE_EARLY_EXCEPTION_NAME;
+EXCEPTION->name         = (void *)ANTLR3_REWRITE_EARLY_EXCEPTION_NAME;
 >>
 rewritePositiveClosureBlock(
 	alt,
@@ -471,13 +392,13 @@ rewritePositiveClosureBlock(
 	referencedElements,		// elements in immediately block; no nested blocks
 	description) ::=
 <<
-if ( !(<referencedElements:{el | stream_<el>->hasNext(stream_<el>)}; separator="||">) ) 
+if (<referencedElements:{el | (stream_<el> == NULL || !stream_<el>->hasNext(stream_<el>)) }; separator="|| "> )
 {
     <RewriteEarlyExitException()>
 }
 else
 {
-	while ( <referencedElements:{el | stream_<el>->hasNext(stream_<el>)}; separator="||"> ) {
+	while ( <referencedElements:{el | (stream_<el>->hasNext(stream_<el>)) }; separator="|| "> ) {
 		<alt>
 	}
 	<referencedElements:{el | stream_<el>->reset(stream_<el>);<\n>}>
@@ -504,7 +425,7 @@ rewriteEmptyAlt() ::= "root_0 = NULL; /* \<-- rewriteEmptyAlt()) */"
 rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
 // <fileName>:<description>
 {
-    pANTLR3_BASE_TREE root_<treeLevel> = ADAPTOR->nil(ADAPTOR);
+    <ASTLabelType> root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->nilNode(ADAPTOR));
     <root:rewriteElement()>
     <children:rewriteElement()>
     ADAPTOR->addChild(ADAPTOR, root_<enclosingTreeLevel>, root_<treeLevel>);
@@ -519,52 +440,40 @@ rewriteElement(e) ::= <<
 >>
 
 /** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,args) ::= <<
-<if(args)>
-ADAPTOR->addChildToken(ADAPTOR, root_<treeLevel>, ADAPTOR->createToken(ADAPTOR, <args; separator=", ">));<\n>
-<else>
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<token>->next(stream_<token>));<\n>
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
 <endif>
 >>
 
 /** Gen $label ... where defined via label=ID */
 rewriteTokenLabelRef(label,elementIndex) ::= <<
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label>->next(stream_<label>));<\n>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label> == NULL ? NULL : stream_<label>->nextNode(stream_<label>));<\n>
 >>
 
 /** Gen $label ... where defined via label+=ID */
 rewriteTokenListLabelRef(label,elementIndex) ::= <<
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label>->next(stream_<label>));<\n>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label> == NULL ? NULL : stream_<label>->nextNode(stream_<label>));<\n>
 >>
 
 /** Gen ^($label ...) */
 rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = ADAPTOR->becomeRootToken(ADAPTOR, stream_<label>->next(stream_<label>), root_<treeLevel>);<\n>
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRootToken(ADAPTOR, stream_<label> == NULL ? NULL : stream_<label>->nextToken(stream_<label>), root_<treeLevel>));<\n>
 >>
 
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
 /** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,args) ::= <<
-<if(args)>
-root_<treeLevel> = ADAPTOR->becomeRootToken(ADAPTOR, ADAPTOR->createToken(ADAPTOR, <args; separator=", ">), root_<treeLevel>);<\n>
-<else>
-root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, stream_<token>->next(stream_<token>), root_<treeLevel>);<\n>
-<endif>
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <createRewriteNodeFromElement(...)>, root_<treeLevel>));<\n>
 >>
 
-rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
-<if(args)>
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, ADAPTOR->createTypeTokenText(ADAPTOR, <token>, TOKTEXT(<args; separator=", ">)));<\n>
-<else>
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, ADAPTOR->createTypeText(ADAPTOR, <token>, (pANTLR3_UINT8)"<token>"));<\n>
-<endif>
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
 >>
 
-rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
-<if(args)>
-root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, ADAPTOR->createTypeTokenText(ADAPTOR, <token>, TOKTEXT(<args; separator=", ">)), root_<treeLevel>);<\n>
-<else>
-root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, ADAPTOR->createTypeText(ADAPTOR, <token>, (pANTLR3_UINT8)"<token>"), root_<treeLevel>);<\n>
-<endif>
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>));<\n>
 >>
 
 /** plain -> {foo} action */
@@ -580,11 +489,11 @@ root_0 = <action>;<\n>
 prevRuleRootRef() ::= "retval"
 
 rewriteRuleRef(rule,dup) ::= <<
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<rule>->next(stream_<rule>));<\n>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<rule> == NULL ? NULL : stream_<rule>->nextTree(stream_<rule>));<\n>
 >>
 
 rewriteRuleRefRoot(rule,dup) ::= <<
-root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, stream_<rule>->next(stream_<rule>), root_<treeLevel>);<\n>
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, stream_<rule> == NULL ? NULL : stream_<rule>->nextNode(stream_<rule>), root_<treeLevel>));<\n>
 >>
 
 rewriteNodeAction(action) ::= <<
@@ -592,25 +501,66 @@ ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <action>);<\n>
 >>
 
 rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, <action>, root_<treeLevel>);<\n>
+root_<treeLevel> = (<ASLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <action>, root_<treeLevel>));<\n>
 >>
 
 /** Gen $ruleLabel ... where defined via ruleLabel=rule */
 rewriteRuleLabelRef(label) ::= <<
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label>->next(stream_<label>));<\n>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label> == NULL ? NULL : stream_<label>->nextTree(stream_<label>));<\n>
 >>
 
 /** Gen $ruleLabel ... where defined via ruleLabel+=rule */
 rewriteRuleListLabelRef(label) ::= <<
-ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, (pANTLR3_BASE_TREE)(stream_<label>->next(stream_<label>)));<\n>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label> == NULL ? NULL : stream_<label>->nextTree(stream_<label>));<\n>
 >>
 
 /** Gen ^($ruleLabel ...) where ruleLabel=rule */
 rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = ADAPTOR->becomeRoot(ADAPTOR, stream_<label>->nextNode(stream_<label>), root_<treeLevel>);<\n>
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, stream_<label> == NULL ? NULL : stream_<label>->nextNode(stream_<label>), root_<treeLevel>));<\n>
 >>
 
 /** Gen ^($ruleLabel ...) where ruleLabel+=rule */
 rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = ADAPTOR->becomeRoot((pANTLR3_BASE_TREE)(stream_<label>->nextNode(stream_<label>)), root_<treeLevel>);<\n>
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRoot((<ASTLabelType>)(stream_<label> == NULL ? NULL : stream_<label>->nextNode(stream_<label>), root_<treeLevel>));<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, stream_<label> == NULL ? NULL : stream_<label>->nextTree(stream_<label>));<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+<hetero>New(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)>
+
+#if <length(args)> == 2
+	(<ASTLabelType>)ADAPTOR->createTypeTokenText(ADAPTOR, <tokenType>, TOKTEXT(<args; separator=", ">))
+#else
+	(<ASTLabelType>)ADAPTOR->createTypeText(ADAPTOR, <tokenType>, (pANTLR3_UINT8)<args; separator=", ">)
+#endif
+
+<else>
+(<ASTLabelType>)ADAPTOR->createTypeText(ADAPTOR, <tokenType>, (pANTLR3_UINT8)"<tokenType>")
+<endif>
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+<hetero>New(stream_<token>->nextToken(stream_<token>)<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+
+#if	<length(args)> == 2
+ADAPTOR->createTypeTokenText(ADAPTOR, <token>->getType(<token>, TOKTEXT(<token>, <args; separator=", ">)) /* JIMI */
+#else
+ADAPTOR->createTypeToken(ADAPTOR, <token>->getType(<token>, <token>, <args; separator=", ">)
+#endif
+
+<else>
+stream_<token> == NULL ? NULL : stream_<token>->nextNode(stream_<token>)
+<endif>
+<endif>
 >>
diff --git a/src/org/antlr/codegen/templates/C/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg
similarity index 81%
rename from src/org/antlr/codegen/templates/C/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg
index 00f0525..c6727da 100644
--- a/src/org/antlr/codegen/templates/C/ASTDbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg
@@ -1,6 +1,9 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -29,11 +32,11 @@
 /** Template overrides to add debugging to AST stuff.  Dynamic inheritance
  *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
  */
-group CASTDbg;
+group ASTDbg;
 
 parserMembers() ::= <<
-protected TreeAdaptor adaptor =
-    new DebugTreeAdaptor(dbg,new CommonTreeAdaptor());
+protected DebugTreeAdaptor adaptor =
+    new DebugTreeAdaptor(null,new CommonTreeAdaptor());
 public void setTreeAdaptor(TreeAdaptor adaptor) {
     this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
 }
@@ -42,4 +45,14 @@ public TreeAdaptor getTreeAdaptor() {
 }<\n>
 >>
 
- at rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
+parserCtorBody() ::= <<
+>>
+
+createListenerAndHandshake() ::= <<
+<super.createListenerAndHandshake()>
+>>
+
+ctorForPredefinedListener() ::= <<
+>>
+
+ at rewriteElement.pregen() ::= ""
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg
new file mode 100644
index 0000000..2220253
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg
@@ -0,0 +1,206 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+retval.tree = (<ASTLabelType>)(ADAPTOR->errorNode(ADAPTOR, INPUT, retval.start, LT(-1), EXCEPTION));
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)(ADAPTOR->create(ADAPTOR, <label>));
+ADAPTOR->addChild(ADAPTOR, root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_0));
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>ADAPTOR->addChild(ADAPTOR, root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=(<labelType>)LT(1);<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <createNodeFromToken(...)>, root_0));})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>ADAPTOR->addChild(ADAPTOR, root_0, <label>.tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <label>.tree, root_0));
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabelAST(...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabelAST(...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabelAST(...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)(ADAPTOR->create(ADAPTOR, <label>));
+ADAPTOR->addChild(ADAPTOR, root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)(ADAPTOR->create(ADAPTOR, <label>));
+root_0 = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_0));
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+<hetero>New(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)(ADAPTOR->create(ADAPTOR, <label>))
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp(...)>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{<\n>
+<endif>
+<if(!ruleDescriptor.isSynPred)>
+	retval.stop = LT(-1);<\n>
+<endif>
+	retval.tree = (<ASTLabelType>)(ADAPTOR->rulePostProcessing(ADAPTOR, root_0));
+	ADAPTOR->setTokenBoundaries(ADAPTOR, retval.tree, retval.start, retval.stop);
+    <ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);}; separator="\n">
+    <ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);}; separator="\n">
+<if(backtracking)>
+}<\n>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg
new file mode 100644
index 0000000..702505a
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg
@@ -0,0 +1,327 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> _last;<\n>
+<ASTLabelType> _first_0;<\n>
+>>
+
+/** Add a variable to track last element matched */
+ruleInitializations() ::= <<
+<super.ruleInitializations()>
+_last = NULL;<\n>
+_first_0 = NULL;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( BACKTRACKING ==0 ) {<endif>
+<if(rewriteMode)>
+retval.tree = (<ASTLabelType>)_first_0;
+if ( ADAPTOR->getParent(ADAPTOR, retval.tree) != NULL && ADAPTOR->isNilNode(ADAPTOR, ADAPTOR->getParent(ADAPTOR, retval.tree) ) )
+{
+    retval.tree = (<ASTLabelType>)ADAPTOR->getParent(ADAPTOR, retval.tree);
+}
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = (<ASTLabelType>)LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel>;
+<ASTLabelType> _first_<treeLevel>;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel>;
+<endif>
+_save_last_<treeLevel> = _last;
+_first_<treeLevel>     = NULL;
+<if(!rewriteMode)>
+root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->nilNode(ADAPTOR));
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( BACKTRACKING ==0 ) {<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel> == NULL ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel> == NULL ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<if(backtracking)>}<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( LA(1)==ANTLR3_TOKEN_DOWN ) {
+    MATCHT(ANTLR3_TOKEN_DOWN, NULL); 
+    <children:element()>
+    MATCHT(ANTLR3_TOKEN_UP, NULL); 
+}
+<else>
+MATCHT(ANTLR3_TOKEN_DOWN, NULL); 
+<children:element()>
+MATCHT(ANTLR3_TOKEN_UP, NULL); 
+<endif>
+<if(!rewriteMode)>
+ADAPTOR->addChild(ADAPTOR, root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( BACKTRACKING ==0 ) {<endif>
+<if(hetero)>
+<label>_tree = <hetero>New(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)ADAPTOR->dupNode(ADAPTOR, <label>);
+<endif>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else>
+<if(backtracking)>if ( BACKTRACKING ==0 ) {<endif>
+if ( _first_<treeLevel> == NULL ) _first_<treeLevel> = <label>;
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( BACKTRACKING == 0 ) {<endif>
+<if(hetero)>
+<label>_tree = <hetero>New(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)ADAPTOR->dupNode(ADAPTOR, <label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)ADAPTOR->dupTree(ADAPTOR, <label>);
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( BACKTRACKING == 0 ) {<endif>
+<if(hetero)>
+<label>_tree = <hetero>New(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)ADAPTOR->dupNode(ADAPTOR, <label>);
+<endif><\n>
+ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ){<endif>
+<if(hetero)>
+<label>_tree = <hetero>New(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)ADAPTOR->dupNode(ADAPTOR, <label>);
+<endif>
+root_<treeLevel> = (<ASTLabelType>)ADAPTOR->becomeRoot(ADAPTOR, <label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )
+{
+<endif>
+<if(!rewriteMode)>
+	ADAPTOR->addChild(ADAPTOR, root_<treeLevel>, <label>.tree);
+<else>
+	if ( _first_<treeLevel> == NULL ) _first_<treeLevel> = <label>.tree;
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<super.listLabelAST(elem=label,...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( ( <actions.(actionScope).synpredgate> ) ) <endif>root_<treeLevel> = (<ASTLabelType>)(ADAPTOR->becomeRoot(ADAPTOR, <label>.tree, root_<treeLevel>));
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<super.listLabelAST(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+>>
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+<hetero>New(stream_<token>->nextNode(stream_<token>))
+<else>
+stream_<token>->nextNode(stream_<token>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp(...)>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<if(!ruleDescriptor.isSynPred)>
+	retval.stop = LT(-1);<\n>
+<endif>
+	retval.tree = (<ASTLabelType>)ADAPTOR->rulePostProcessing(ADAPTOR, root_0);
+<if(backtracking)>
+}
+<endif>
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{if (stream_<it> != NULL) stream_<it>->free(stream_<it>);}; separator="\n">
+>>
diff --git a/src/org/antlr/codegen/templates/C/C.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
similarity index 69%
rename from src/org/antlr/codegen/templates/C/C.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
index c57e21b..1186308 100644
--- a/src/org/antlr/codegen/templates/C/C.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
@@ -1,7 +1,9 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
- Templates and C runtime Copyright (c) 2006-2007 Jim Idle
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -28,9 +30,9 @@
 */
 
 /*
- * This code generating template and the assocated C runtime was produced by:
+ * This code generating template and the associated C runtime was produced by:
  * Jim Idle jimi|hereisanat|idle|dotgoeshere|ws. 
- * If it does cause the destruction of the Universe, it will be pretty cool so long as 
+ * If it causes the destruction of the Universe, it will be pretty cool so long as 
  * I am in a different one at the time. 
  */
 group C implements ANTLRCore ;
@@ -38,8 +40,8 @@ group C implements ANTLRCore ;
 cTypeInitMap ::= [
 	"int"		    : "0",              // Integers     start out being 0
 	"long"		    : "0",              // Longs        start out being 0
-	"float"		    : "0.0",            // Floats       start out being 0
-	"double"	    : "0.0",            // Doubles      start out being 0
+	"float"		    : "0.0",           // Floats       start out being 0
+	"double"	    : "0.0",           // Doubles      start out being 0
 	"ANTLR3_BOOLEAN"    : "ANTLR3_FALSE",   // Booleans     start out being Antlr C for false
 	"byte"		    : "0",              // Bytes        start out being 0
 	"short"		    : "0",              // Shorts       start out being 0
@@ -68,7 +70,6 @@ leadIn(type) ::=
  *
  * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
  *
- * View this file with tabs set to 8 (:set ts=8 in gvim) and indent at 4 (:set sw=4 in gvim)
  *
 >>
 
@@ -90,7 +91,7 @@ outputFile( LEXER,
             bitsets,
             buildTemplate,
             buildAST,
-            rewrite,
+            rewriteMode,
             profile,
             backtracking,
             synpreds,
@@ -107,6 +108,35 @@ outputFile( LEXER,
 <<
 <leadIn("C source")>
 */
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
 <if(actions.(actionScope).header)>
 
 /* =============================================================================
@@ -139,16 +169,7 @@ outputFile( LEXER,
 <endif>
 
 
-/* Aids in accessing scopes for grammar programmers
- */
-#undef	SCOPE_TYPE
-#undef	SCOPE_STACK
-#undef	SCOPE_TOP
-#define	SCOPE_TYPE(scope)   p<name>_##scope##_SCOPE
-#define SCOPE_STACK(scope)  p<name>_##scope##Stack
-#define	SCOPE_TOP(scope)    ctx->p<name>_##scope##Top
-#define	SCOPE_SIZE(scope)			(ctx->SCOPE_STACK(scope)->size(ctx->SCOPE_STACK(scope)))
-#define SCOPE_INSTANCE(scope, i)	(ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope),i))
+
 
 /* MACROS that hide the C interface implementations from the
  * generated code, which makes it a little more understandable to the human eye.
@@ -156,12 +177,26 @@ outputFile( LEXER,
  * of code as you cannot see what is happening when single stepping in debuggers
  * and so on. The exception (in my book at least) is for generated code, where you are
  * not maintaining it, but may wish to read and understand it. If you single step it, you know that input()
- * hides some indirect calls, but is always refering to the input stream. This is
+ * hides some indirect calls, but is always referring to the input stream. This is
  * probably more readable than ctx->input->istream->input(snarfle0->blarg) and allows me to rejig
  * the runtime interfaces without changing the generated code too often, without
  * confusing the reader of the generated output, who may not wish to know the gory
- * details of the interface inheritence.
+ * details of the interface inheritance.
  */
+ 
+#define		CTX	ctx
+
+/* Aids in accessing scopes for grammar programmers
+ */
+#undef	SCOPE_TYPE
+#undef	SCOPE_STACK
+#undef	SCOPE_TOP
+#define	SCOPE_TYPE(scope)   p<name>_##scope##_SCOPE
+#define SCOPE_STACK(scope)  p<name>_##scope##Stack
+#define	SCOPE_TOP(scope)    ctx->p<name>_##scope##Top
+#define	SCOPE_SIZE(scope)		ctx->p<name>_##scope##Stack_limit
+#define SCOPE_INSTANCE(scope, i)	(ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope),i))
+
 <if(LEXER)>
  
 /* Macros for accessing things in a lexer
@@ -205,47 +240,52 @@ outputFile( LEXER,
 
 #define	    LEXER					ctx->pLexer
 #define	    RECOGNIZER			    LEXER->rec
-#define	    RULEMEMO				RECOGNIZER->ruleMemo
+#define		LEXSTATE				RECOGNIZER->state
+#define		TOKSOURCE				LEXSTATE->tokSource
 #define	    GETCHARINDEX()			LEXER->getCharIndex(LEXER)
 #define	    GETLINE()				LEXER->getLine(LEXER)
 #define	    GETTEXT()				LEXER->getText(LEXER)
 #define	    GETCHARPOSITIONINLINE() LEXER->getCharPositionInLine(LEXER)
-#define	    EMIT()					LEXER->type = _type; LEXER->emit(LEXER)
+#define	    EMIT()					LEXSTATE->type = _type; LEXER->emit(LEXER)
 #define	    EMITNEW(t)				LEXER->emitNew(LEXER, t)
 #define	    MATCHC(c)				LEXER->matchc(LEXER, c)
 #define	    MATCHS(s)				LEXER->matchs(LEXER, s)
 #define	    MATCHRANGE(c1,c2)	    LEXER->matchRange(LEXER, c1, c2)
 #define	    MATCHANY()				LEXER->matchAny(LEXER)
-#define	    LTOKEN  				LEXER->token
-#define	    HASFAILED()				(RECOGNIZER->failed == ANTLR3_TRUE)
-#define	    BACKTRACKING			RECOGNIZER->backtracking
-#define	    FAILEDFLAG				RECOGNIZER->failed
+#define	    LTOKEN  				LEXSTATE->token
+#define	    HASFAILED()				(LEXSTATE->failed == ANTLR3_TRUE)
+#define	    BACKTRACKING			LEXSTATE->backtracking
+#define	    FAILEDFLAG				LEXSTATE->failed
 #define	    INPUT					LEXER->input
 #define	    STRSTREAM				INPUT
-#define		INDEX()					INPUT->istream->index(INPUT->istream)
-#define		SEEK(n)					INPUT->istream->seek(INPUT->istream, n)
-#define	    EOF_TOKEN				&(LEXER->tokSource->eofToken)
-#define	    HASEXCEPTION()			(RECOGNIZER->error == ANTLR3_TRUE)
-#define	    EXCEPTION				RECOGNIZER->exception
+#define		ISTREAM					INPUT->istream
+#define		INDEX()					ISTREAM->index(ISTREAM)
+#define		SEEK(n)					ISTREAM->seek(ISTREAM, n)
+#define	    EOF_TOKEN				&(LEXSTATE->tokSource->eofToken)
+#define	    HASEXCEPTION()			(LEXSTATE->error == ANTLR3_TRUE)
+#define	    EXCEPTION				LEXSTATE->exception
 #define	    CONSTRUCTEX()			RECOGNIZER->exConstruct(RECOGNIZER)
 #define	    LRECOVER()				LEXER->recover(LEXER)
-#define	    MARK()					INPUT->istream->mark(INPUT->istream)
-#define	    REWIND(m)				INPUT->istream->rewind(INPUT->istream, m)
-#define	    REWINDLAST()			INPUT->istream->rewindLast(INPUT->istream)
+#define	    MARK()					ISTREAM->mark(ISTREAM)
+#define	    REWIND(m)				ISTREAM->rewind(ISTREAM, m)
+#define	    REWINDLAST()			ISTREAM->rewindLast(ISTREAM)
 #define		MEMOIZE(ri,si)			RECOGNIZER->memoize(RECOGNIZER, ri, si)
 #define		HAVEPARSEDRULE(r)		RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
 #define		PUSHSTREAM(str)			LEXER->pushCharStream(LEXER, str)
 #define		POPSTREAM()				LEXER->popCharStream(LEXER)
-#define		SETTEXT(str)			LEXER->text = str
-#define		USER1					LEXER->user1
-#define		USER2					LEXER->user2
-#define		USER3					LEXER->user3
-#define		CUSTOM					LEXER->custom
+#define		SETTEXT(str)			LEXSTATE->text = str
+#define		SKIP()					LEXSTATE->token = &(TOKSOURCE->skipToken)
+#define		USER1					LEXSTATE->user1
+#define		USER2					LEXSTATE->user2
+#define		USER3					LEXSTATE->user3
+#define		CUSTOM					LEXSTATE->custom
+#define		RULEMEMO				LEXSTATE->ruleMemo
+#define		DBG						RECOGNIZER->debugger
 
 /* If we have been told we can rely on the standard 8 bit or 16 bit input
  * stream, then we can define our macros to use the direct pointers
  * in the input object, which is much faster than indirect calls. This
- * is really only significant to lexers wiht a lot of fragment rules (which
+ * is really only significant to lexers with a lot of fragment rules (which
  * do not place LA(1) in a temporary at the moment) and even then
  * only if there is a lot of input (order of say 1M or so).
  */
@@ -268,14 +308,14 @@ outputFile( LEXER,
 # define	    LA(n) ((NEXTCHAR + n) > (DATAP + INPUT->sizeBuf) ? ANTLR3_CHARSTREAM_EOF : (ANTLR3_UCHAR)(*(NEXTCHAR + n - 1)))
 # define	    CONSUME()											\
 {																	\
-    if	(NEXTCHAR \< (DATAP + INPUT->sizeBuf))					\
+    if	(NEXTCHAR \< (DATAP + INPUT->sizeBuf))						\
     {																\
 		INPUT->charPositionInLine++;								\
 		if  ((ANTLR3_UCHAR)(*NEXTCHAR) == INPUT->newlineChar)		\
 		{															\
-			INPUT->line++;										\
+			INPUT->line++;											\
 			INPUT->charPositionInLine	= 0;						\
-			INPUT->currentLine		= (void *)(NEXTCHAR + 1);	\
+			INPUT->currentLine		= (void *)(NEXTCHAR + 1);		\
 		}															\
 		INPUT->nextChar = (void *)(NEXTCHAR + 1);					\
     }																\
@@ -325,39 +365,45 @@ outputFile( LEXER,
 #undef		INDEX
 #undef      ADAPTOR
 #undef		SEEK
+#undef	    RULEMEMO		    
+#undef		DBG
 
 #define	    PARSER							ctx->pParser  
 #define	    RECOGNIZER						PARSER->rec
+#define		PSRSTATE						RECOGNIZER->state
 #define	    HAVEPARSEDRULE(r)				RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
 #define		MEMOIZE(ri,si)					RECOGNIZER->memoize(RECOGNIZER, ri, si)
 #define	    INPUT							PARSER->tstream
 #define	    STRSTREAM						INPUT
-#define		INDEX()							INPUT->istream->index(INPUT->istream)
-#define	    HASEXCEPTION()					(RECOGNIZER->error == ANTLR3_TRUE)
-#define	    EXCEPTION						RECOGNIZER->exception
+#define		ISTREAM							INPUT->istream
+#define		INDEX()							ISTREAM->index(INPUT->istream)
+#define	    HASEXCEPTION()					(PSRSTATE->error == ANTLR3_TRUE)
+#define	    EXCEPTION						PSRSTATE->exception
 #define	    MATCHT(t, fs)					RECOGNIZER->match(RECOGNIZER, t, fs)
 #define	    MATCHANYT()						RECOGNIZER->matchAny(RECOGNIZER)
-#define	    FOLLOWSTACK					    RECOGNIZER->following
+#define	    FOLLOWSTACK					    PSRSTATE->following
 #define	    FOLLOWPUSH(x)					FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
 #define	    FOLLOWPOP()						FOLLOWSTACK->pop(FOLLOWSTACK)
 #define	    PRECOVER()						RECOGNIZER->recover(RECOGNIZER)
 #define	    PREPORTERROR()					RECOGNIZER->reportError(RECOGNIZER)
-#define	    LA(n)							INPUT->istream->_LA(INPUT->istream, n)
+#define	    LA(n)							INPUT->istream->_LA(ISTREAM, n)
 #define	    LT(n)							INPUT->_LT(INPUT, n)
 #define	    CONSTRUCTEX()					RECOGNIZER->exConstruct(RECOGNIZER)
-#define	    CONSUME()						INPUT->istream->consume(INPUT->istream)
-#define	    MARK()							INPUT->istream->mark(INPUT->istream)
-#define	    REWIND(m)						INPUT->istream->rewind(INPUT->istream, m)
-#define	    REWINDLAST()					INPUT->istream->rewindLast(INPUT->istream)
-#define		SEEK(n)							INPUT->istream->seek(INPUT->istream, n)
-#define	    PERRORRECOVERY					RECOGNIZER->errorRecovery
-#define	    _fsp							RECOGNIZER->_fsp
-#define	    FAILEDFLAG						RECOGNIZER->failed
+#define	    CONSUME()						ISTREAM->consume(ISTREAM)
+#define	    MARK()							ISTREAM->mark(ISTREAM)
+#define	    REWIND(m)						ISTREAM->rewind(ISTREAM, m)
+#define	    REWINDLAST()					ISTREAM->rewindLast(ISTREAM)
+#define		SEEK(n)							ISTREAM->seek(ISTREAM, n)
+#define	    PERRORRECOVERY					PSRSTATE->errorRecovery
+#define	    FAILEDFLAG						PSRSTATE->failed
 #define	    HASFAILED()						(FAILEDFLAG == ANTLR3_TRUE)
-#define	    BACKTRACKING					RECOGNIZER->backtracking
+#define	    BACKTRACKING					PSRSTATE->backtracking
 #define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
 #define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
 #define     ADAPTOR                         ctx->adaptor
+#define		RULEMEMO						PSRSTATE->ruleMemo
+#define		DBG								RECOGNIZER->debugger
+
 <endif>
 
 <if(TREE_PARSER)>
@@ -392,36 +438,47 @@ outputFile( LEXER,
 #undef	    RECOVERFROMMISMATCHEDELEMENT
 #undef	    BACKTRACKING
 #undef      ADAPTOR
+#undef	    RULEMEMO		
+#undef		SEEK    
+#undef		INDEX
+#undef		DBG
 
 #define	    PARSER							ctx->pTreeParser  
 #define	    RECOGNIZER						PARSER->rec
+#define		PSRSTATE						RECOGNIZER->state
 #define	    HAVEPARSEDRULE(r)				RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
 #define	    INPUT							PARSER->ctnstream
+#define		ISTREAM							INPUT->tnstream->istream
 #define	    STRSTREAM						INPUT->tnstream
-#define	    HASEXCEPTION()					(RECOGNIZER->error == ANTLR3_TRUE)
-#define	    EXCEPTION						RECOGNIZER->exception
+#define	    HASEXCEPTION()					(PSRSTATE->error == ANTLR3_TRUE)
+#define	    EXCEPTION						PSRSTATE->exception
 #define	    MATCHT(t, fs)					RECOGNIZER->match(RECOGNIZER, t, fs)
 #define	    MATCHANYT()						RECOGNIZER->matchAny(RECOGNIZER)
-#define	    FOLLOWSTACK					    RECOGNIZER->following
+#define	    FOLLOWSTACK					    PSRSTATE->following
 #define	    FOLLOWPUSH(x)					FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
 #define	    FOLLOWPOP()						FOLLOWSTACK->pop(FOLLOWSTACK)
 #define	    PRECOVER()						RECOGNIZER->recover(RECOGNIZER)
 #define	    PREPORTERROR()					RECOGNIZER->reportError(RECOGNIZER)
-#define	    LA(n)							INPUT->tnstream->istream->_LA(INPUT->tnstream->istream, n)
+#define	    LA(n)							ISTREAM->_LA(ISTREAM, n)
 #define	    LT(n)							INPUT->tnstream->_LT(INPUT->tnstream, n)
 #define	    CONSTRUCTEX()					RECOGNIZER->exConstruct(RECOGNIZER)
-#define	    CONSUME()						INPUT->tnstream->istream->consume(INPUT->tnstream->istream)
-#define	    MARK()							INPUT->tnstream->istream->mark(INPUT->tnstream->istream)
-#define	    REWIND(m)						INPUT->tnstream->istream->rewind(INPUT->tnstream->istream, m)
-#define	    REWINDLAST(m)					INPUT->tnstream->istream->rewindLast(INPUT->tnstream->istream)
-#define	    PERRORRECOVERY					RECOGNIZER->errorRecovery
-#define	    _fsp							RECOGNIZER->_fsp
-#define	    FAILEDFLAG						RECOGNIZER->failed
+#define	    CONSUME()						ISTREAM->consume(ISTREAM)
+#define	    MARK()							ISTREAM->mark(ISTREAM)
+#define	    REWIND(m)						ISTREAM->rewind(ISTREAM, m)
+#define	    REWINDLAST()					ISTREAM->rewindLast(ISTREAM)
+#define	    PERRORRECOVERY					PSRSTATE->errorRecovery
+#define	    FAILEDFLAG						PSRSTATE->failed
 #define	    HASFAILED()						(FAILEDFLAG == ANTLR3_TRUE)
-#define	    BACKTRACKING					RECOGNIZER->backtracking
+#define	    BACKTRACKING					PSRSTATE->backtracking
 #define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
 #define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
 #define     ADAPTOR                         INPUT->adaptor
+#define		RULEMEMO						PSRSTATE->ruleMemo
+#define		SEEK(n)							ISTREAM->seek(ISTREAM, n)
+#define		INDEX()							ISTREAM->index(ISTREAM)
+#define		DBG								RECOGNIZER->debugger
+
+
 <endif>
 
 #define		TOKTEXT(tok, txt)				tok, (pANTLR3_UINT8)txt
@@ -483,7 +540,7 @@ headerFile( LEXER,
             bitsets,
             buildTemplate,
             buildAST,
-            rewrite,
+            rewriteMode,
             profile,
             backtracking, 
             synpreds, 
@@ -492,21 +549,21 @@ headerFile( LEXER,
             fileName,
             ANTLRVersion,
             generatedTimestamp,
-            scopes,
-	    superClass,
             trace,
+            scopes,
+			superClass,
             literals
         ) ::=
 <<
 <leadIn("C header")>
 <if(PARSER)>
- * The parser <mainName()>
+ * The parser <mainName()> 
 <endif>
 <if(LEXER)>
- * The lexer <mainName()>
+ * The lexer <mainName()> 
 <endif>
 <if(TREE_PARSER)>
- * The tree parser <mainName()>
+ * The tree parser <mainName()> 
 <endif>
 has the callable functions (rules) shown below,
  * which will invoke the code for the associated rule in the source grammar
@@ -523,16 +580,45 @@ has the callable functions (rules) shown below,
  *
 <if(LEXER)>
  * As this is a generated lexer, it is unlikely you will call it 'manually'. However
- * the entry points are provided anyway.
+ * the methods are provided anyway.
  *
 <endif>
- * The entry points for <name> are  as follows:
+ * The methods in p<name> are  as follows:
  *
  * <rules: {r | <if(!r.ruleDescriptor.isSynPred)> - <headerReturnType(ruleDescriptor=r.ruleDescriptor,...)>      p<name>-><r.ruleDescriptor.name>(p<name>)<endif>}; separator="\n * ">
  *
  * The return type for any particular rule is of course determined by the source
  * grammar file.
  */
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
 #ifndef	_<name>_H
 #define _<name>_H
 <actions.(actionScope).preincludes>
@@ -544,15 +630,36 @@ has the callable functions (rules) shown below,
 /* End of standard antlr 3 runtime definitions
  * =============================================================================
  */
- <actions.(actionScope).includes>
+<actions.(actionScope).includes>
+ 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Forward declare the context typedef so that we can use it before it is
+// properly defined. Delegators and delegates (from import statements) are
+// interdependent and their context structures contain pointers to each other
+// C only allows such things to be declared if you pre-declare the typedef.
+//
+typedef struct <name>_Ctx_struct <name>, * p<name>;
+
+<if(recognizer.grammar.delegates)>
+// Include delegate definition header files
+//
+<recognizer.grammar.delegates: {g|#include	\<<g.recognizerName>.h>}; separator="\n">
+         
+<endif>
+
+
 <actions.(actionScope).header>
 
-#ifdef	WIN32
-// Disable: Unreferenced parameter,                - Rules with parameters that are not used
-//          constant conditional,                  - ANTLR realizes that a prediction is always true (synpred usually)
-//          initialized but unused variable        - tree rewrite vairables declared but not needed
-//          Unreferenced local variable            - lexer rulle decalres but does not always use _type
-//          potentially unitialized variable used  - retval always returned from a rule 
+#ifdef	ANTLR3_WINDOWS
+// Disable: Unreferenced parameter,							- Rules with parameters that are not used
+//          constant conditional,							- ANTLR realizes that a prediction is always true (synpred usually)
+//          initialized but unused variable					- tree rewrite variables declared but not needed
+//          Unreferenced local variable						- lexer rule declares but does not always use _type
+//          potentially unitialized variable used			- retval always returned from a rule 
+//			unreferenced local function has been removed	- susually getTokenNames or freeScope, they can go without warnigns
 //
 // These are only really displayed at warning level /W4 but that is the code ideal I am aiming at
 // and the codegen must generate some of these warnings by necessity, apart from 4100, which is
@@ -563,6 +670,7 @@ has the callable functions (rules) shown below,
 #pragma warning( disable : 4101 )
 #pragma warning( disable : 4127 )
 #pragma warning( disable : 4189 )
+#pragma warning( disable : 4505 )
 #pragma warning( disable : 4701 )
 #endif
 <if(backtracking)>
@@ -576,14 +684,17 @@ has the callable functions (rules) shown below,
 <rules:{r |<headerReturnScope(ruleDescriptor=r.ruleDescriptor,...)>}>
 
 <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(scope=it)><endif>}>
-<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(scope=it)><endif>}>
 <rules:{r |<ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)>}>
-<rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}>
-
+<if(recognizer.grammar.delegators)>
+// Include delegator definition header files
+//
+<recognizer.grammar.delegators: {g|#include	\<<g.recognizerName>.h>}; separator="\n">
+         
+<endif>
 
 /** Context tracking structure for <mainName()>
  */
-typedef struct <name>_Ctx_struct
+struct <name>_Ctx_struct
 {
     /** Built in ANTLR3 context tracker contains all the generic elements
      *  required for context tracking.
@@ -597,6 +708,15 @@ typedef struct <name>_Ctx_struct
 <if(TREE_PARSER)>
     pANTLR3_TREE_PARSER	    pTreeParser;
 <endif>
+
+<if(recognizer.grammar.delegates)>
+	<recognizer.grammar.delegates:
+         {g|p<g.recognizerName>	<g:delegateName()>;}; separator="\n">
+<endif>
+<if(recognizer.grammar.delegators)>
+	<recognizer.grammar.delegators:
+         {g|p<g.recognizerName>	<g:delegateName()>;}; separator="\n">
+<endif>
 <scopes:{<if(it.isDynamicGlobalScope)>
     <globalAttributeScopeDef(scope=it)>
 <endif>}; separator="\n\n">
@@ -607,38 +727,31 @@ typedef struct <name>_Ctx_struct
 <if(LEXER)>
     <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*m<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);<endif>}; separator="\n";>
 <endif>
-<if(PARSER)>
-    <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
-<endif>
-<if(TREE_PARSER)>
+<if(!LEXER)>
     <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<recognizer.grammar.delegatedRules:{ruleDescriptor|
+    <headerReturnType(ruleDescriptor)> (*<ruleDescriptor.name>)(struct <name>_Ctx_struct * ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
 <endif>
-    unsigned char * (*getGrammarFileName)();
+    const char * (*getGrammarFileName)();
     void	    (*free)   (struct <name>_Ctx_struct * ctx);
     <@members>
     <@end>
     <actions.(actionScope).context>
-}
-    <name>, * p<name>;
+};
 
-<if(LEXER)>
-/* Function protoypes for the lexer functions that external translation units
- * may wish to call.
- */
-ANTLR3_API p<name> <name>New         (pANTLR3_INPUT_STREAM     instream);
-<endif>
-<if(PARSER)>
-/* Function protoypes for the parser functions that external translation units
- * may wish to call.
- */
-ANTLR3_API p<name> <name>New         (pANTLR3_COMMON_TOKEN_STREAM     instream);
-<endif>
-<if(TREE_PARSER)>
-/* Function protoypes for the treeparser functions that external translation units
- * may wish to call.
- */
-ANTLR3_API p<name> <name>New         (pANTLR3_COMMON_TREE_NODE_STREAM     instream);
+// Function protoypes for the constructor functions that external translation units
+// such as delegators and delegates may wish to call.
+//
+ANTLR3_API p<name> <name>New         (<inputType()> instream<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>);
+ANTLR3_API p<name> <name>NewSSD      (<inputType()> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>);
+<if(!recognizer.grammar.grammarIsRoot)>
+extern pANTLR3_UINT8   <recognizer.grammar.composite.rootGrammar.recognizerName>TokenNames[];
 <endif>
+
+
 /** Symbolic definitions of all the tokens that the <grammarType()> will work with.
  * \{
  *
@@ -659,14 +772,35 @@ ANTLR3_API p<name> <name>New         (pANTLR3_COMMON_TREE_NODE_STREAM     instre
 #define	EOF	ANTLR3_TOKEN_EOF
 #endif
 
+#ifndef TOKENSOURCE
+#define TOKENSOURCE(lxr) lxr->pLexer->rec->state->tokSource
+#endif
+
 /* End of token definitions for <name>
  * =============================================================================
  */
 /** \} */
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif
-/* END - Note:Keep extra linefeed to satisfy UNIX systems */
 
+/* END - Note:Keep extra line feed to satisfy UNIX systems */
+
+>>
+
+inputType() ::=<<
+<if(LEXER)>
+pANTLR3_INPUT_STREAM
+<endif>
+<if(PARSER)>
+pANTLR3_COMMON_TOKEN_STREAM
+<endif>
+<if(TREE_PARSER)>
+pANTLR3_COMMON_TREE_NODE_STREAM
+<endif>
 >>
 
 grammarType() ::= <<
@@ -695,7 +829,17 @@ mainName() ::= <<
 
 headerReturnScope(ruleDescriptor) ::= "<returnScope(...)>"
 
-headerReturnType(ruleDescriptor) ::= "<returnType()>"
+headerReturnType(ruleDescriptor) ::= <<
+<if(LEXER)>
+<if(!r.ruleDescriptor.isSynPred)>
+ void
+<else>
+ <ruleDescriptor:returnType()>
+<endif>
+<else>
+ <ruleDescriptor:returnType()>
+<endif>
+>>
 
 // Produce the lexer output
 //
@@ -706,17 +850,18 @@ lexer(  grammar,
         rules, 
         numRules, 
         labelType="pANTLR3_COMMON_TOKEN",
-        filterMode) ::= <<
+        filterMode,
+        superClass) ::= <<
 
 <if(filterMode)>
 /* Forward declare implementation function for ANTLR3_TOKEN_SOURCE interface when
- * this is a fliter mode lexer.
+ * this is a filter mode lexer.
  */
 static pANTLR3_COMMON_TOKEN <name>NextToken   (pANTLR3_TOKEN_SOURCE toksource);
 
 /* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering
  * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather
- * than just BACKTRACKING. IN some cases this might generate code akin to:
+ * than just BACKTRACKING. In some cases this might generate code akin to:
  *   if (BACKTRACKING) if (BACKTRACKING > 1) memoize.
  * However, I assume that the C compilers/optimizers are smart enough to work this one out
  * these days - Jim
@@ -745,20 +890,32 @@ static void
 <name>Free  (p<name> ctx)
 {
 <if(memoize)>
-    RULEMEMO->free(RULEMEMO);
+	if	(RULEMEMO != NULL)
+	{
+		RULEMEMO->free(RULEMEMO);
+		RULEMEMO = NULL;
+	}
+<endif>
+<if(grammar.directDelegates)>
+	// Free the lexers that we delegated to
+	// functions to. NULL the state so we only free it once.
+	//
+	<grammar.directDelegates:
+         {g|ctx-><g:delegateName()>->pLexer->rec->state = NULL;
+         ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n">
 <endif>
     LEXER->free(LEXER);
     
     ANTLR3_FREE(ctx);
 }
 
-/** \brief Name of the gramar file that generated this code
+/** \brief Name of the grammar file that generated this code
  */
-static unsigned char fileName[] = "<fileName>";
+static const char fileName[] = "<fileName>";
 
 /** \brief Return the name of the grammar file that generated this code.
  */
-static unsigned char * getGrammarFileName()
+static const char * getGrammarFileName()
 {
 	return fileName;
 }
@@ -769,27 +926,43 @@ static unsigned char * getGrammarFileName()
 
 /** \brief Create a new lexer called <name>
  *
- * \param[in] instream Pointer to an initialized input stream
+ * \param[in]    instream Pointer to an initialized input stream
+ * \return 
+ *     - Success p<name> initialized for the lex start
+ *     - Fail NULL
+ */
+ANTLR3_API p<name> <name>New         
+(<inputType()> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
+{
+	// See if we can create a new lexer with the standard constructor
+	//
+	return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+/** \brief Create a new lexer called <name>
  *
+ * \param[in]    instream Pointer to an initialized input stream
+ * \param[state] state Previously created shared recognizer stat
  * \return 
  *     - Success p<name> initialized for the lex start
- *     - Fail (p<name>)(ANTLR3_ERR_NOMEM)
+ *     - Fail NULL
  */
-ANTLR3_API p<name> <name>New         (pANTLR3_INPUT_STREAM     instream)
+ANTLR3_API p<name> <name>NewSSD         
+(pANTLR3_INPUT_STREAM instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
 {
-    p<name> lexCtx; /* Context structure we will build and return   */
+    p<name> ctx; // Context structure we will build and return
 
-    lexCtx = (p<name>) ANTLR3_MALLOC(sizeof(<name>));
+    ctx = (p<name>) ANTLR3_CALLOC(1, sizeof(<name>));
 
-    if  (lexCtx == NULL)
+    if  (ctx == NULL)
     {
-        /* Failed to allocate memory for lexer context */
-        return  (p<name>)ANTLR3_ERR_NOMEM;
+        // Failed to allocate memory for lexer context
+        return  NULL;
     }
 
     /* -------------------------------------------------------------------
      * Memory for basic structure is allocated, now to fill in
-     * in base ANTLR3 structures. We intialize the function pointers
+     * in base ANTLR3 structures. We initialize the function pointers
      * for the standard ANTLR3 lexer function set, but upon return
      * from here, the programmer may set the pointers to provide custom
      * implementations of each function. 
@@ -800,49 +973,64 @@ ANTLR3_API p<name> <name>New         (pANTLR3_INPUT_STREAM     instream)
     
     /* Create a base lexer, using the supplied input stream
      */
-    lexCtx->pLexer	= antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream);
-
+    ctx->pLexer	= antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream, state);
+    
     /* Check that we allocated the memory correctly
      */
-    if	(lexCtx->pLexer == (pANTLR3_LEXER)ANTLR3_ERR_NOMEM)
+    if	(ctx->pLexer == NULL)
     {
-	ANTLR3_FREE(lexCtx);
-	return  (p<name>)ANTLR3_ERR_NOMEM;
+		ANTLR3_FREE(ctx);
+		return  NULL;
     }
 <if(memoize)>
-    /* Create a LIST for recording rule memos.
-     */
-    lexCtx->pLexer->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
+<if(grammar.grammarIsRoot)>
+    // Create a LIST for recording rule memos.
+    //
+    ctx->pLexer->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
+<endif>
 <endif>
 
     /* Install the implementation of our <name> interface
      */
-    <rules:{r | <if(!r.ruleDescriptor.isSynPred)>lexCtx->m<r.ruleDescriptor.name>	= m<r.ruleDescriptor.name>;<endif>}; separator="\n";>
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)>ctx->m<r.ruleDescriptor.name>	= m<r.ruleDescriptor.name>;<endif>}; separator="\n";>
     
     /** When the nextToken() call is made to this lexer's pANTLR3_TOKEN_SOURCE
      *  it will call mTokens() in this generated code, and will pass it the ctx
      * pointer of this lexer, not the context of the base lexer, so store that now.
      */
-    lexCtx->pLexer->ctx	    = lexCtx;
+    ctx->pLexer->ctx	    = ctx;
     
-    /** Install the token matching function
+    /**Install the token matching function
      */
-    lexCtx->pLexer->mTokens = (void (*) (void *))(mTokens);
-    
-    lexCtx->getGrammarFileName	= getGrammarFileName;
-    lexCtx->free		= <name>Free;
+    ctx->pLexer->mTokens = (void (*) (void *))(mTokens);
     
+    ctx->getGrammarFileName	= getGrammarFileName;
+    ctx->free		= <name>Free;
+
+<if(grammar.directDelegates)>
+	// Initialize the lexers that we are going to delegate some
+	// functions to.
+	//
+	<grammar.directDelegates:
+         {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, ctx->pLexer->rec->state, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+<endif>    
+<if(grammar.delegators)>
+	// Install the pointers back to lexers that will delegate us to perform certain functions
+	// for them.
+	//
+	<grammar.delegators:
+         {g|ctx-><g:delegateName()>			= <g:delegateName()>;}; separator="\n">
+<endif>    
 <if(filterMode)>
     /* We have filter mode turned on, so install the filtering nextToken function
      */
-    lexCtx->pLexer->tokSource->nextToken = <name>NextToken;
+    ctx->pLexer->rec->state->tokSource->nextToken = <name>NextToken;
 <endif>
-
 	 <actions.lexer.apifuncs>
-	 
+
     /* Return the newly built lexer to the caller
      */
-    return  lexCtx;
+    return  ctx;
 }
 <if(cyclicDFAs)>
 
@@ -896,22 +1084,24 @@ static pANTLR3_COMMON_TOKEN
 <name>NextToken(pANTLR3_TOKEN_SOURCE toksource) 
 {
     pANTLR3_LEXER   lexer;
-
+	pANTLR3_RECOGNIZER_SHARED_STATE state;
+	
     lexer   = (pANTLR3_LEXER)(toksource->super);
+    state	= lexer->rec->state;
     
     /* Get rid of any previous token (token factory takes care of
      * any deallocation when this token is finally used up.
      */
-    lexer		->token	    = NULL;
-    lexer->rec	->error	    = ANTLR3_FALSE;	    /* Start out without an exception	*/
-    lexer->rec	->failed    = ANTLR3_FALSE;
+    state		->token	    = NULL;
+    state		->error	    = ANTLR3_FALSE;	    /* Start out without an exception	*/
+    state		->failed    = ANTLR3_FALSE;
 
     /* Record the start of the token in our input stream.
      */
-    lexer->tokenStartCharIndex			= lexer->input->istream->index(lexer->input->istream);
-    lexer->tokenStartCharPositionInLine	= lexer->input->getCharPositionInLine(lexer->input);
-    lexer->tokenStartLine				= lexer->input->getLine(lexer->input);
-    lexer->text							= NULL;
+    state->tokenStartCharIndex			= lexer->input->istream->index(lexer->input->istream);
+    state->tokenStartCharPositionInLine	= lexer->input->getCharPositionInLine(lexer->input);
+    state->tokenStartLine				= lexer->input->getLine(lexer->input);
+    state->text							= NULL;
 
     /* Now call the matching rules and see if we can generate a new token
      */
@@ -929,25 +1119,25 @@ static pANTLR3_COMMON_TOKEN
 			return  teof;
 		}
 		
-		lexer->token		= NULL;
-		lexer->rec->error	= ANTLR3_FALSE;	    /* Start out without an exception	*/
+		state->token		= NULL;
+		state->error		= ANTLR3_FALSE;	    /* Start out without an exception	*/
 		
 		{
-			ANTLR3_UINT64   m;
+			ANTLR3_MARKER   m;
 		    
-			m							= lexer->input->istream->mark(lexer->input->istream);
-			lexer->rec->backtracking	= 1;				/* No exceptions */
-			lexer->rec->failed			= ANTLR3_FALSE;
+			m						= lexer->input->istream->mark(lexer->input->istream);
+			state->backtracking		= 1;				/* No exceptions */
+			state->failed			= ANTLR3_FALSE;
 		 
 			/* Call the generated lexer, see if it can get a new token together.
 			 */
 			lexer->mTokens(lexer->ctx);   
-    		lexer->rec->backtracking	= 0;
+    		state->backtracking	= 0;
 	    	    
     		<! mTokens backtracks with synpred at BACKTRACKING==2
 				and we set the synpredgate to allow actions at level 1. !>
 	               
-			if	(lexer->rec->failed == ANTLR3_TRUE)
+			if	(state->failed == ANTLR3_TRUE)
 			{
 				lexer->input->istream->rewind(lexer->input->istream, m);
 				lexer->input->istream->consume(lexer->input->istream); <! advance one char and try again !>
@@ -955,13 +1145,15 @@ static pANTLR3_COMMON_TOKEN
 			else
 			{
 				lexer->emit(lexer);					/* Assemble the token and emit it to the stream */
-				return	lexer->token;
+				return	state->token;
 			}	
 		}
     }
 }
 >>
 
+actionGate() ::= "BACKTRACKING==0"
+
 filteringActionGate() ::= "BACKTRACKING==1"
 
 /** How to generate a parser */
@@ -977,13 +1169,16 @@ genericParser(  grammar,
                 superClass,
                 ASTLabelType="pANTLR3_BASE_TREE",
                 labelType,
-				members
+				members,
+				rewriteElementType, filterMode
               ) ::= <<
 
+
+<if(grammar.grammarIsRoot)>
 /** \brief Table of all token names in symbolic order, mainly used for
  *         error reporting.
  */
-static pANTLR3_UINT8   <name>TokenNames[]
+pANTLR3_UINT8   <name>TokenNames[<length(tokenNames)>+4]
      = {
         (pANTLR3_UINT8) "\<invalid>",       /* String to print to indicate an invalid token */
         (pANTLR3_UINT8) "\<EOR>",
@@ -991,19 +1186,28 @@ static pANTLR3_UINT8   <name>TokenNames[]
         (pANTLR3_UINT8) "\<UP>", 
         <tokenNames:{(pANTLR3_UINT8) <it>}; separator=",\n">
        };
+<endif>
 
     <@members>
 
     <@end>
+<rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}>
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(scope=it)><endif>}>
 
-/* Forward declare the locally static matching functions we have generated.
- */
+// Forward declare the locally static matching functions we have generated.
+//
 <rules:{r | static <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
 static void	<name>Free(p<name> ctx);
+<if(!LEXER)>
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+<if(recognizer.grammar.delegatedRules)>
+// Delegated rules
+//
+<recognizer.grammar.delegatedRules:{ruleDescriptor|static <headerReturnType(ruleDescriptor)> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>
 
-/* Function to initialize bitset APIs
- */
-static	void <name>LoadFollowSets();
+<endif>
+<endif>
 
 /* For use in tree output where we are accumulating rule labels via label += ruleRef
  * we need a function that knows how to free a return scope when the list is destroyed. 
@@ -1014,38 +1218,53 @@ static	void ANTLR3_CDECL freeScope(void * scope)
     ANTLR3_FREE(scope);
 }
 
-/** \brief Name of the gramar file that generated this code
+/** \brief Name of the grammar file that generated this code
  */
-static unsigned char fileName[] = "<fileName>";
+static const char fileName[] = "<fileName>";
 
 /** \brief Return the name of the grammar file that generated this code.
  */
-static unsigned char * getGrammarFileName()
+static const char * getGrammarFileName()
 {
 	return fileName;
 }
-/** \brief Create a new <name> parser and retrun a context for it.
+/** \brief Create a new <name> parser and return a context for it.
  *
  * \param[in] instream Pointer to an input stream interface.
  *
  * \return Pointer to new parser context upon success.
  */
 ANTLR3_API p<name>
-<name>New   (<inputStreamType> instream)
+<name>New   (<inputStreamType> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
+{
+	// See if we can create a new parser with the standard constructor
+	//
+	return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+/** \brief Create a new <name> parser and return a context for it.
+ *
+ * \param[in] instream Pointer to an input stream interface.
+ *
+ * \return Pointer to new parser context upon success.
+ */
+ANTLR3_API p<name>
+<name>NewSSD   (<inputStreamType> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
 {
     p<name> ctx;	    /* Context structure we will build and return   */
     
-    ctx	= (p<name>) ANTLR3_MALLOC(sizeof(<name>));
+    ctx	= (p<name>) ANTLR3_CALLOC(1, sizeof(<name>));
     
     if	(ctx == NULL)
     {
-	/* Failed to allocate memory for parser context */
-        return  (p<name>)ANTLR3_ERR_NOMEM;
+		// Failed to allocate memory for parser context
+		//
+        return  NULL;
     }
     
     /* -------------------------------------------------------------------
      * Memory for basic structure is allocated, now to fill in
-     * the base ANTLR3 structures. We intialize the function pointers
+     * the base ANTLR3 structures. We initialize the function pointers
      * for the standard ANTLR3 parser function set, but upon return
      * from here, the programmer may set the pointers to provide custom
      * implementations of each function. 
@@ -1057,17 +1276,23 @@ ANTLR3_API p<name>
 <if(PARSER)>
     /* Create a base parser/recognizer, using the supplied token stream
      */
-    ctx->pParser	    = antlr3ParserNewStream(ANTLR3_SIZE_HINT, instream->tstream);
+    ctx->pParser	    = antlr3ParserNewStream(ANTLR3_SIZE_HINT, instream->tstream, state);
 <endif>
 <if(TREE_PARSER)>
     /* Create a base Tree parser/recognizer, using the supplied tree node stream
      */
-    ctx->pTreeParser		= antlr3TreeParserNewStream(ANTLR3_SIZE_HINT, instream);
+    ctx->pTreeParser		= antlr3TreeParserNewStream(ANTLR3_SIZE_HINT, instream, state);
 <endif>
 
     /* Install the implementation of our <name> interface
      */
     <rules:{r | ctx-><r.ruleDescriptor.name>	= <r.ruleDescriptor.name>;}; separator="\n";>
+<if(grammar.delegatedRules)>
+	// Install the delegated methods so that they appear to be a part of this 
+	// parser
+	//
+    <grammar.delegatedRules:{ruleDescriptor | ctx-><ruleDescriptor.name>	= <ruleDescriptor.name>;}; separator="\n";>
+<endif>
 
     ctx->free			= <name>Free;
     ctx->getGrammarFileName	= getGrammarFileName;
@@ -1083,25 +1308,34 @@ ANTLR3_API p<name>
     <@apifuncs>
 
     <@end>
-    
+<if(grammar.directDelegates)>
+	// Initialize the parsers that we are going to delegate some
+	// functions to.
+	//
+	<grammar.directDelegates:
+         {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, PSRSTATE, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+<endif>    
+<if(grammar.delegators)>
+	// Install the pointers back to parsers that will delegate us to perform certain functions
+	// for them.
+	//
+	<grammar.delegators:
+         {g|ctx-><g:delegateName()>			= <g:delegateName()>;}; separator="\n">
+<endif>
     <actions.parser.apifuncs>
     <actions.treeparser.apifuncs>
 <if(memoize)>
+<if(grammar.grammarIsRoot)>
     /* Create a LIST for recording rule memos.
      */
-<if(TREE_PARSER)>
-    ctx->pTreeParser->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
-<else>
-    ctx->pParser->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
+     RULEMEMO    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
 <endif>
 <endif>	
     /* Install the token table
      */
-    RECOGNIZER->tokenNames   = <name>TokenNames;
+    PSRSTATE->tokenNames   = <grammar.composite.rootGrammar.recognizerName>TokenNames;
     
-    /* Initialize the follow bit sets
-     */
-    <name>LoadFollowSets();
+    <@debugStuff()>
     
     /* Return the newly built parser to the caller
      */
@@ -1120,6 +1354,25 @@ ANTLR3_API p<name>
     
     <@cleanup>
     <@end>
+<if(grammar.directDelegates)>
+	// Free the parsers that we delegated to
+	// functions to.NULL the state so we only free it once.
+	//
+	<grammar.directDelegates:
+         {g| ctx-><g:delegateName()>-><if(TREE_PARSER)>pTreeParser<else>pParser<endif>->rec->state = NULL;
+         ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n">
+<endif>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+	if	(RULEMEMO != NULL)
+	{
+		RULEMEMO->free(RULEMEMO);
+		RULEMEMO = NULL;
+	}
+<endif>
+<endif>
+	// Free this parser
+	//
 <if(TREE_PARSER)>
     ctx->pTreeParser->free(ctx->pTreeParser);<\n>
 <else>
@@ -1141,7 +1394,7 @@ ANTLR3_API p<name>
  */
 static pANTLR3_UINT8    *getTokenNames() 
 {
-        return <name>TokenNames; 
+        return <grammar.composite.rootGrammar.recognizerName>TokenNames; 
 }
 
     <members>
@@ -1151,18 +1404,6 @@ static pANTLR3_UINT8    *getTokenNames()
 <bitsets:bitsetDeclare(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
                     words64=it.bits)>
      
-/** Load up the static bitsets for following set for error recovery.
- *  \remark
- *  These are static after the parser is generated, hence they are static
- *  delcarations in the parser and are thread safe after initialization.
- */
-static
-void <name>LoadFollowSets()
-{
-    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>
-    return;
-}
 
 <if(cyclicDFAs)>
 
@@ -1179,6 +1420,17 @@ void <name>LoadFollowSets()
  * Parsing rules
  */
 <rules; separator="\n\n">
+<if(grammar.delegatedRules)>
+	// Delegated methods that appear to be a part of this 
+	// parser
+	//
+<grammar.delegatedRules:{ruleDescriptor|
+    <returnType()> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope:parameterScope(scope=it)>) 
+    \{ 
+        <if(ruleDescriptor.hasReturnValue)>return <endif>ctx-><ruleDescriptor.grammar:delegateName()>-><ruleDescriptor.name>(ctx-><ruleDescriptor.grammar:delegateName()><if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); 
+	\}}; separator="\n">
+
+<endif>
 /* End of parsing rules
  * ==============================================
  */
@@ -1210,7 +1462,7 @@ parser(	grammar,
 		labelType="pANTLR3_COMMON_TOKEN", 
 		members={<actions.parser.members>}
 		) ::= <<
-<genericParser(inputStreamType="pANTLR3_COMMON_TOKEN_STREAM", ...)>
+<genericParser(inputStreamType="pANTLR3_COMMON_TOKEN_STREAM", rewriteElementType="TOKEN", ...)>
 >>
 
 /** How to generate a tree parser; same as parser except the input
@@ -1228,9 +1480,9 @@ treeParser(	grammar,
 			labelType={<ASTLabelType>}, 
 			ASTLabelType="pANTLR3_BASE_TREE", 
 			superClass="TreeParser", 
-			members={<actions.treeparser.members>}
+			members={<actions.treeparser.members>}, filterMode
 			) ::= <<
-<genericParser(inputStreamType="pANTLR3_COMMON_TREE_NODE_STREAM", ...)>
+<genericParser(inputStreamType="pANTLR3_COMMON_TREE_NODE_STREAM", rewriteElementType="NODE", ...)>
 >>
 
 /** A simpler version of a rule template that is specific to the imaginary
@@ -1244,11 +1496,13 @@ synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 <<
 // $ANTLR start <ruleName>
 static void <ruleName>_fragment(p<name> ctx <ruleDescriptor.parameterScope:parameterScope(scope=it)>) 
-{   
+{
+	<ruleLabelDefs()>
+	<ruleLabelInitializations()>
 <if(trace)>
-    printf("enter <ruleName> %d failed = %d, backtracking = %d\\n",input.LT(1),failed,BACKTRACKING);
+    ANTLR3_PRINTF("enter <ruleName> %d failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING);
     <block>
-    printf("exit <ruleName> %d, failed = %d, backtracking = %d\\n",input.LT(1),failed,BACKTRACKING);
+    ANTLR3_PRINTF("exit <ruleName> %d, failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING);
     
 <else>
     <block>
@@ -1261,7 +1515,7 @@ static void <ruleName>_fragment(p<name> ctx <ruleDescriptor.parameterScope:param
 synpred(predname) ::= <<
 static ANTLR3_BOOLEAN <predname>(p<name> ctx) 
 {
-    ANTLR3_UINT64   start;
+    ANTLR3_MARKER   start;
     ANTLR3_BOOLEAN  success;
 
     BACKTRACKING++;
@@ -1281,16 +1535,20 @@ lexerSynpred(predname) ::= <<
 <synpred(predname)>
 >>
 
-ruleMemoization(name) ::= <<
+ruleMemoization(rname) ::= <<
 <if(memoize)>
 if ( (BACKTRACKING>0) && (HAVEPARSEDRULE(<ruleDescriptor.index>)) )
 {
 <if(ruleDescriptor.hasMultipleReturnValues)>
 <if(!ruleDescriptor.isSynPred)>
-	retval.start = 0;
-	<scopeClean()><\n>
+	retval.start = 0;<\n>
 <endif>
 <endif>
+    <(ruleDescriptor.actions.after):execAfter()>
+    <finalCode(finalBlock=finally)>
+<if(!ruleDescriptor.isSynPred)>
+    <scopeClean()><\n>
+<endif>
     return <ruleReturnValue()>; 
 }
 <endif>
@@ -1298,6 +1556,10 @@ if ( (BACKTRACKING>0) && (HAVEPARSEDRULE(<ruleDescriptor.index>)) )
 
 /** How to test for failure and return from rule */
 checkRuleBacktrackFailure() ::= <<
+if  (HASEXCEPTION())
+{
+    goto rule<ruleDescriptor.name>Ex;
+}
 <if(backtracking)>
 if (HASFAILED())
 {
@@ -1312,7 +1574,7 @@ ruleBacktrackFailure() ::= <<
 <if(backtracking)>
 if (BACKTRACKING>0)
 {
-    FAILEDFLAG = <true()>; 
+    FAILEDFLAG = <true()>;
     <scopeClean()>
     return <ruleReturnValue()>;
 }
@@ -1330,13 +1592,13 @@ rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memo
 static <returnType()>
 <ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope(scope=it)>)
 {   
-    <if(trace)>printf("enter <ruleName> %s failed=%d, backtracking=%d\n", LT(1), BACKTRACKING);<endif>
+    <if(trace)>ANTLR3_PRINTF("enter <ruleName> %s failed=%d, backtracking=%d\n", LT(1), BACKTRACKING);<endif>
     <ruleDeclarations()>
     <ruleDescriptor.actions.declarations>
     <ruleLabelDefs()>
     <ruleInitializations()>
     <ruleDescriptor.actions.init>
-    <ruleMemoization(name=ruleName)>
+    <ruleMemoization(rname=ruleName)>
     <ruleLabelInitializations()>
     <@preamble()>
     {
@@ -1351,43 +1613,55 @@ static <returnType()>
     }
     else
     {
-	<(ruleDescriptor.actions.after):execAction()>
+	<(ruleDescriptor.actions.after):execAfter()>
     }
 <else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    if (HASEXCEPTION())
-    {
-        PREPORTERROR();
-        PRECOVER();
-    }
-<if(ruleDescriptor.actions.after)>
-    else
-    {
-	<(ruleDescriptor.actions.after):execAction()>
-    }<\n>
-<endif>
-<endif>
-<endif>
+    <if(!emptyRule)>
+        <if(actions.(actionScope).rulecatch)>
+            <actions.(actionScope).rulecatch>
+        <else>
+            if (HASEXCEPTION())
+            {
+                PREPORTERROR();
+                PRECOVER();
+                <@setErrorReturnValue()>
+            }
+            <if(ruleDescriptor.actions.after)>
+            else
+            {
+                <(ruleDescriptor.actions.after):execAfter()>
+            }<\n>
+            <endif>
+        <endif>
+    <endif>
 <endif>
-    <if(trace)>System.out.println("exit <ruleName> "+LT(1)+" failed="+failed+" backtracking="+BACKTRACKING);<endif>
+
+    <if(trace)>ANTLR3_PRINTF("exit <ruleName> %d failed=%s backtracking=%s\n", LT(1), failed, BACKTRACKING);<endif>
     <memoize()>
-    <finally>
+<if(finally)>
+    <finalCode(finalBlock=finally)>
+<endif>
+    <scopeClean()>
     <@postamble()>
     return <ruleReturnValue()>;
 }
 /* $ANTLR end <ruleName> */
 >>
 
+finalCode(finalBlock) ::= <<
+{
+    <finalBlock>
+}
+
+>>
+
 catch(decl,action) ::= <<
 /* catch(decl,action)
  */
-if  ((HASEXCEPTION()) && (EXCEPTION->type == <e.decl>) )
 {
     <e.action>
 }
+
 >>
 
 ruleDeclarations() ::= <<
@@ -1399,7 +1673,7 @@ ruleDeclarations() ::= <<
 }>
 <endif>
 <if(memoize)>
-ANTLR3_UINT64 <ruleDescriptor.name>_StartIndex;
+ANTLR3_UINT32 <ruleDescriptor.name>_StartIndex;
 <endif>
 >>
 
@@ -1437,7 +1711,7 @@ ruleLabelInitializations() ::= <<
 >
 <if(ruleDescriptor.hasMultipleReturnValues)>
 <if(!ruleDescriptor.isSynPred)>
-retval.start = LT(1);<\n>
+retval.start = LT(1); retval.stop = retval.start;<\n>
 <endif>
 <endif>
 >>
@@ -1508,7 +1782,6 @@ ruleCleanUp() ::= <<
 //
 goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */
 rule<ruleDescriptor.name>Ex: ;
-<scopeClean()>
 <if(ruleDescriptor.hasMultipleReturnValues)>
 <if(!TREE_PARSER)>
 <if(!ruleDescriptor.isSynPred)>
@@ -1549,25 +1822,25 @@ void m<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDes
     <if(trace)>System.out.println("enter <ruleName> '"+(char)LA(1)+"' line="+GETLINE()+":"+GETCHARPOSITIONINLINE()+" failed="+failed+" backtracking="+BACKTRACKING);<endif>
 
 <if(nakedBlock)>
-    <ruleMemoization(name=ruleName)>
+    <ruleMemoization(rname=ruleName)>
     <lexerRuleLabelInit()>
     <ruleDescriptor.actions.init>
         
     <block><\n>
 <else>
-    <ruleMemoization(name=ruleName)>   
+    <ruleMemoization(rname=ruleName)>
     <lexerRuleLabelInit()>
     _type	    = <ruleName>;
        
     <ruleDescriptor.actions.init>
     
     <block>
-	LEXER->type = _type;
+	LEXSTATE->type = _type;
 <endif>
-    <if(trace)> fprintf(stderr, "exit <ruleName> '%c' line=%d:%d failed = %d, backtracking =%d\n",LA(1),GETLINE(),GETCHARPOSITIONINLINE(),failed,BACKTRACKING);<endif>
+    <if(trace)> ANTLR3_FPRINTF(stderr, "exit <ruleName> '%c' line=%d:%d failed = %d, backtracking =%d\n",LA(1),GETLINE(),GETCHARPOSITIONINLINE(),failed,BACKTRACKING);<endif>
     <ruleCleanUp()>
     <lexerRuleLabelFree()>
-    <(ruleDescriptor.actions.after):execAction()>
+    <(ruleDescriptor.actions.after):execAfter()>
     <memoize>
 }
 // $ANTLR end <ruleName>
@@ -1688,7 +1961,7 @@ earlyExitEx() ::= <<
  */
 CONSTRUCTEX();
 EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION;
-EXCEPTION->name = ANTLR3_EARLY_EXIT_NAME;
+EXCEPTION->name = (void *)ANTLR3_EARLY_EXIT_NAME;
 <\n>
 >>
 positiveClosureBlockSingleAlt ::= positiveClosureBlock
@@ -1739,17 +2012,22 @@ case <i>:
 >>
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
 // <fileName>:<description>
 {
     <@declarations()>
     <@initializations()>
     <elements:element()>
+    <rew>
     <@cleanup()>
 }
 >>
 
 // E L E M E N T S
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
 
 /** Dump the elements one per line */
 element() ::= <<
@@ -1758,16 +2036,13 @@ element() ::= <<
 >>
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label> = (<labelType>)LT(1);<\n>
-<endif>
-MATCHT(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>); 
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label> = (<labelType>)<endif> MATCHT(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>); 
 <checkRuleBacktrackFailure()>
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -1834,12 +2109,14 @@ else
 mismatchedSetEx() ::= <<
 CONSTRUCTEX();
 EXCEPTION->type         = ANTLR3_MISMATCHED_SET_EXCEPTION;
-EXCEPTION->name         = ANTLR3_MISMATCHED_SET_NAME;
+EXCEPTION->name         = (void *)ANTLR3_MISMATCHED_SET_NAME;
 <if(PARSER)>
 EXCEPTION->expectingSet = &FOLLOW_set_in_<ruleName><elementIndex>;
 <endif>
 >>
 
+matchRuleBlockSet ::= matchSet
+
 matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 <matchSet(...)>
 <listLabel(elem=label,...)>
@@ -1848,10 +2125,10 @@ matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 /** Match a string literal */
 lexerStringRef(string,label) ::= <<
 <if(label)>
-int <label>Start = GETCHARINDEX();
+<label>Start = GETCHARINDEX();
 MATCHS(<string>); 
 <checkRuleBacktrackFailure()>
-<labelType> <label> = LEXER->tokFactory->newToken(LEXER->tokFactory);
+<label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
 <label>->setType(<label>, ANTLR3_TOKEN_INVALID);
 <label>->setStartIndex(<label>, <label>Start);
 <label>->setStopIndex(<label>, GETCHARINDEX()-1);
@@ -1890,51 +2167,50 @@ wildcardCharListLabel(label, elementIndex) ::= <<
 >>
 
 /** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
- */
-ruleRef(rule,label,elementIndex,args) ::= <<
-FOLLOWPUSH(FOLLOW_<rule>_in_<ruleName><elementIndex>);
-<if(label)>
-<label>=<rule>(ctx<if(args)>, <args; separator=", "><endif>);<\n>
-<else>
-<rule>(ctx<if(args)>, <args; separator=", "><endif>);<\n>
-<endif>
+ *  and a return value or values. The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+FOLLOWPUSH(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)>ctx-><scope:delegateName()>-><endif><rule.name>(ctx<if(scope)>-><scope:delegateName()><endif><if(args)>, <args; separator=", "><endif>);<\n>
 FOLLOWPOP();
-if  (HASEXCEPTION())
-{
-    goto rule<ruleDescriptor.name>Ex;
-}
 <checkRuleBacktrackFailure()>
 >>
 
 /** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
 <listLabel(elem=label,...)>
 >>
 
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
+/** A lexer rule reference 
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
 /* <description> */
 <if(label)>
 {
-    ANTLR3_UINT64 <label>Start<elementIndex> = GETCHARINDEX();
-    m<rule>(ctx <if(args)>, <endif><args; separator=", ">); 
+    ANTLR3_MARKER <label>Start<elementIndex> = GETCHARINDEX();
+    <if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">); 
     <checkRuleBacktrackFailure()>
-    <label> = LEXER->tokFactory->newToken(LEXER->tokFactory);
+    <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
     <label>->setType(<label>, ANTLR3_TOKEN_INVALID);
     <label>->setStartIndex(<label>, <label>Start<elementIndex>);
     <label>->setStopIndex(<label>, GETCHARINDEX()-1);
     <label>->input = INPUT;
 }
 <else>
-m<rule>(ctx <if(args)>, <endif><args; separator=", ">); 
+<if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">); 
 <checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -1943,12 +2219,12 @@ lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
 lexerMatchEOF(label,elementIndex) ::= <<
 <if(label)>
 {
-    ANTLR3_UINT64 <label>Start<elementIndex>;
+    ANTLR3_UINT32 <label>Start<elementIndex>;
     <labelType> <label>;
     <label>Start<elementIndex> = GETCHARINDEX();
     MATCHC(ANTLR3_CHARSTREAM_EOF); 
     <checkRuleBacktrackFailure()>
-    <label> = LEXER->tokFactory->newToken(LEXER->tokFactory);
+    <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
     <label>->setType(<label>, ANTLR3_TOKEN_EOF);
     <label>->setStartIndex(<label>, <label>Start<elementIndex>);
     <label>->setStopIndex(<label>, GETCHARINDEX()-1);
@@ -1961,7 +2237,7 @@ lexerMatchEOF(label,elementIndex) ::= <<
 >>
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
 <root:element()>
 <actionsAfterRoot:element()>
 <if(nullableChildList)>
@@ -1995,8 +2271,8 @@ if ( !(<evalPredicate(...)>) )
 newFPE() ::= <<
     CONSTRUCTEX();
     EXCEPTION->type         = ANTLR3_FAILED_PREDICATE_EXCEPTION;
-    EXCEPTION->message      = "<description>";
-    EXCEPTION->ruleName	 = "<ruleName>";
+    EXCEPTION->message      = (void *)"<description>";
+    EXCEPTION->ruleName	 = (void *)"<ruleName>";
     <\n>
 >>
 
@@ -2014,7 +2290,7 @@ dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
 <else>
         <ruleBacktrackFailure()>
     
-        <newNVException()>    
+        <newNVException()>
         goto rule<ruleDescriptor.name>Ex;
 
 <endif>
@@ -2025,7 +2301,7 @@ dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
 newNVException() ::= <<
 CONSTRUCTEX();
 EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
-EXCEPTION->message      = "<description>";
+EXCEPTION->message      = (void *)"<description>";
 EXCEPTION->decisionNum  = <decisionNumber>;
 EXCEPTION->state        = <stateNumber>;
 <@noViableAltException()>
@@ -2142,7 +2418,8 @@ dfaEdgeSwitch(labels, targetState) ::= <<
  *  The <name> attribute is inherited via the parser, lexer, ...
  */
 dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, INPUT->istream, &cdfa<decisionNumber>);
+alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, ISTREAM, &cdfa<decisionNumber>);
+<checkRuleBacktrackFailure()>
 >>
 
 /* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits)
@@ -2220,7 +2497,7 @@ static ANTLR3_INT32 dfa<dfa.decisionNumber>_sst(p<name> ctx, pANTLR3_BASE_RECOGN
     
     CONSTRUCTEX();
     EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
-    EXCEPTION->message      = "<dfa.description>";
+    EXCEPTION->message      = (void *)"<dfa.description>";
     EXCEPTION->decisionNum  = <dfa.decisionNumber>;
     EXCEPTION->state        = _s;
     <@noViableAltException()>
@@ -2266,7 +2543,7 @@ ANTLR3_CYCLIC_DFA cdfa<dfa.decisionNumber>
 cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
 {
     ANTLR3_UINT32 LA<decisionNumber>_<stateNumber>;<\n>
-    ANTLR3_UINT32 index<decisionNumber>_<stateNumber>;<\n>
+    ANTLR3_MARKER index<decisionNumber>_<stateNumber>;<\n>
 
 	LA<decisionNumber>_<stateNumber> = LA(1);<\n>
     <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
@@ -2312,7 +2589,7 @@ orPredicates(operands) ::= "((<first(operands)>)<rest(operands):{o | ||(<o>)}>)"
 
 notPredicate(pred) ::= "!( <evalPredicate(...)> )"
 
-evalPredicate(pred,description) ::= "<pred>"
+evalPredicate(pred,description) ::= "(<pred>)"
 
 evalSynPredicate(pred,description) ::= "<pred>(ctx)"
 
@@ -2387,6 +2664,7 @@ globalAttributeScopeFuncDecl(scope) ::=
  * Function declaration for creating a <name>_<scope.name> scope set 
  */
 static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
+static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope);
 /* ----------------------------------------------------------------------------- */
 
 <endif>
@@ -2396,10 +2674,28 @@ globalAttributeScopeFuncMacro(scope) ::= <<
 <if(scope.attributes)>
 /* globalAttributeScopeFuncMacro(scope)
  */
-/** Macro for popping the top value from a <scopeStack(sname=scope.name)>
+/** Function  for popping the top value from a <scopeStack(sname=scope.name)>
  */
-#define <scopePopName(sname=scope.name,...)>()  SCOPE_TOP(<scope.name>) = ctx-><scopeStack(sname=scope.name,...)>->pop(ctx-><scopeStack(sname=scope.name,...)>)
+void
+<scopePopName(sname=scope.name,...)>(p<name> ctx)
+{
+    // First see if the user defined a function they want to be called when a
+    // scope is popped/freed.
+    //
+	// If the user supplied the scope entries with a free function,then call it first
+	//
+    if	(SCOPE_TOP(<scope.name>)->free != NULL)
+	{
+        SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>));
+	}
 
+    // Now we decrement the scope's upper limit bound. We do not actually pop the scope as
+    // we want to reuse scope entries if we do continuous push and pops. Most scopes don't
+    // next too far so we don't want to keep freeing and allocating them
+    //
+    ctx-\><scopeStack(sname=scope.name,...)>_limit--;
+    SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-\><scopeStack(sname=scope.name,...)>-\>get(ctx-\><scopeStack(sname=scope.name,...)>, ctx-\><scopeStack(sname=scope.name,...)>_limit - 1));
+}
 <endif>
 >>
 
@@ -2411,6 +2707,7 @@ ruleAttributeScopeFuncDecl(scope) ::= <<
  * Function declarations for creating a <name>_<scope.name> scope set 
  */
 static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
+static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope);
 /* ----------------------------------------------------------------------------- */
 
 <endif>
@@ -2420,13 +2717,33 @@ ruleAttributeScopeFuncMacro(scope) ::= <<
 <if(scope.attributes)>
 /* ruleAttributeScopeFuncMacro(scope)
  */
-/** Macro for popping the top value from a <scopeStack(sname=scope.name,...)>
+/** Function for popping the top value from a <scopeStack(sname=scope.name,...)>
  */
-#define <scopePopName(sname=scope.name,...)>()  SCOPE_TOP(<scope.name>) = ctx-><scopeStack(sname=scope.name,...)>->pop(ctx-><scopeStack(sname=scope.name)>)
+void
+<scopePopName(sname=scope.name,...)>(p<name> ctx)
+{
+    // First see if the user defined a function they want to be called when a
+    // scope is popped/freed.
+    //
+	// If the user supplied the scope entries with a free function,then call it first
+	//
+    if	(SCOPE_TOP(<scope.name>)->free != NULL)
+	{
+        SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>));
+	}
+
+    // Now we decrement the scope's upper limit bound. We do not actually pop the scope as
+    // we want to reuse scope entries if we do continuous push and pops. Most scopes don't
+    // next too far so we don't want to keep freeing and allocating them
+    //
+    ctx-\><scopeStack(sname=scope.name,...)>_limit--;
+    SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-\><scopeStack(sname=scope.name,...)>-\>get(ctx-\><scopeStack(sname=scope.name,...)>, ctx-\><scopeStack(sname=scope.name,...)>_limit - 1));
+}
 
 <endif>
 >>
-globalAttributeScopeDef(scope) ::= 
+
+globalAttributeScopeDef(scope) ::=
 <<
 /* globalAttributeScopeDef(scope)
  */
@@ -2435,6 +2752,7 @@ globalAttributeScopeDef(scope) ::=
  *  and <scopePopName(sname=scope.name,...)>()
  */
 pANTLR3_STACK <scopeStack(sname=scope.name)>;
+ANTLR3_UINT32 <scopeStack(sname=scope.name)>_limit;
 /** Pointer to the top of the stack for the global scope <scopeStack(sname=scope.name)>
  */
 <scopeType(sname=scope.name,...)>    (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
@@ -2451,6 +2769,7 @@ ruleAttributeScopeDef(scope) ::= <<
  *  and <scopePopName(sname=scope.name,...)>()
  */
 pANTLR3_STACK <scopeStack(sname=scope.name,...)>;
+ANTLR3_UINT32 <scopeStack(sname=scope.name,...)>_limit;
 <scopeType(sname=scope.name,...)>   (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
 <scopeType(sname=scope.name,...)>   <scopeTopDecl(sname=scope.name,...)>;
 
@@ -2475,21 +2794,23 @@ ruleAttributeScopeFuncs(scope) ::= <<
 
 globalAttributeScope(scope) ::= <<
 <if(scope.attributes)>
-/* globalAttributeScope(scope)  
+/* globalAttributeScope(scope)
  */
 ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
-ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(ANTLR3_SIZE_HINT);
+ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(0);
+ctx-><scopeStack(sname=scope.name,...)>_limit    = 0;
 <scopeTop(sname=scope.name,...)>      = NULL;
 <endif>
 >>
 
-ruleAttributeScope(scope) ::= 
+ruleAttributeScope(scope) ::=
 <<
 <if(scope.attributes)>
 /* ruleAttributeScope(scope)
  */
 ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
-ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(ANTLR3_SIZE_HINT);
+ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(0);
+ctx-><scopeStack(sname=scope.name,...)>_limit    = 0;
 <scopeTop(sname=scope.name,...)>      = NULL;
 <endif>
 >>
@@ -2519,7 +2840,7 @@ ctx-><scopeTopDecl(sname=sname,...)>
 >>
 
 scopePop(sname) ::= <<
-<scopePopName(sname=sname,...)>();
+<scopePopName(sname=sname,...)>(ctx);
 >>
 
 scopePush(sname) ::= <<
@@ -2551,9 +2872,9 @@ attributeFuncs(scope) ::= <<
 /* attributeFuncs(scope)
  */
 
-static void ANTLR3_CDECL <scope.name>Free(void * data)
+static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope)
 {
-    ANTLR3_FREE(data);
+    ANTLR3_FREE(scope);
 }
 
 /** \brief Allocate initial memory for a <name> <scope.name> scope variable stack entry and
@@ -2573,11 +2894,9 @@ static void ANTLR3_CDECL <scope.name>Free(void * data)
  *   void ANTLR3_CDECL myfunc( <scopeType(sname=scope.name)> ptr). 
  * \endcode
  *
- * It should perform any custom freeing stuff that you need (call ANTLR_FREE, not free()
- * then free the entry it is given with: 
- * \code
- *   ANTLR3_FREE(ptr);
- * \endcode
+ * It should perform any custom freeing stuff that you need (call ANTLR_FREE3, not free()
+ * NB: It should not free the pointer it is given, which is the scope stack entry itself
+ * and will be freed by the function that calls your custom free routine.
  * 
  */ 
 static <scopeType(sname=scope.name)>
@@ -2587,34 +2906,51 @@ static <scopeType(sname=scope.name)>
      */
     <scopeType(sname=scope.name)>      newAttributes;
 
-    /* Allocate the memory for a new structure
+    /* Allocate the memory for a new structure if we need one.
      */
-    newAttributes = (<scopeType(sname=scope.name)>) ANTLR3_MALLOC(sizeof(<scopeStruct(sname=scope.name)>));
-
-    if  (newAttributes != NULL)
+    if (ctx-\><scopeStack(sname=scope.name)>-\>size(ctx-\><scopeStack(sname=scope.name)>) \> ctx-\><scopeStack(sname=scope.name)>_limit)
     {
-	/* Standard ANTLR3 library implementation
-	 */
-	ctx-><scopeStack(sname=scope.name)>->push(ctx-><scopeStack(sname=scope.name)>, newAttributes, <scope.name>Free);
-	
+        // The current limit value was less than the number of scopes available on the stack so
+        // we can just reuse one. Our limit tracks the stack count, so the index of the entry we want
+        // is one less than that, or conveniently, the current value of limit.
+        //
+        newAttributes = (<scopeType(sname=scope.name)>)ctx-><scopeStack(sname=scope.name)>->get(ctx-><scopeStack(sname=scope.name)>, ctx-\><scopeStack(sname=scope.name)>_limit);
+    }
+    else
+    {
+        // Need a new allocation
+        //
+        newAttributes = (<scopeType(sname=scope.name)>) ANTLR3_MALLOC(sizeof(<scopeStruct(sname=scope.name)>));
+        if  (newAttributes != NULL)
+        {
+            /* Standard ANTLR3 library implementation
+             */
+            ctx-\><scopeStack(sname=scope.name)>-\>push(ctx-\><scopeStack(sname=scope.name)>, newAttributes, (void (*)(void *))<scope.name>Free);
+        }
+    }
+
+    // Blank out any previous free pointer, the user might or might install a new one.
+    //
+    newAttributes->free = NULL;
+
+    // Indicate the position in the available stack that the current level is at
+    //
+    ctx-><scopeStack(sname=scope.name)>_limit++;
+
 	/* Return value is the pointer to the new entry, which may be used locally
 	 * without de-referencing via the context.
-	 */
-    }
- 
-    /* Calling routine will throw an exeception if this
-     * fails and this pointer is NULL.
      */
     return  newAttributes;
 }<\n>
 
 <endif>
 >>
+returnStructName() ::= "<it.name>_return"
 
 returnType() ::= <<
 <if(!ruleDescriptor.isSynPred)>
 <if(ruleDescriptor.hasMultipleReturnValues)>
-<name>_<ruleDescriptor.name>_return
+<ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>
 <else>
 <if(ruleDescriptor.hasSingleReturnValue)>
 <ruleDescriptor.singleValueReturnType>
@@ -2632,7 +2968,7 @@ ANTLR3_BOOLEAN
  */
 ruleLabelType(referencedRule) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<name>_<referencedRule.name>_return
+<referencedRule.grammar.recognizerName>_<referencedRule.name>_return
 <else>
 <if(referencedRule.hasSingleReturnValue)>
 <referencedRule.singleValueReturnType>
@@ -2642,6 +2978,10 @@ void
 <endif>
 >>
 
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
 /** Using a type to init value map, try to init a type; if not in table
  *  must be an object, default value is "0".
  */
@@ -2662,6 +3002,8 @@ ruleLabelInitVal(label) ::= <<
 <endif>
 >>
 
+ASTLabelType() ::= "<if(recognizer.ASTLabelType)><recognizer.ASTLabelType><else>pANTLR3_BASE_TREE<endif>"
+
 /** Define a return struct for a rule if the code needs to access its
  *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
  *  subgroups to stick in members.
@@ -2669,7 +3011,7 @@ ruleLabelInitVal(label) ::= <<
 returnScope() ::= <<
 <if(!ruleDescriptor.isSynPred)>
 <if(ruleDescriptor.hasMultipleReturnValues)>
-typedef struct <returnType(...)>_struct
+typedef struct <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>_struct
 {
 <if(!TREE_PARSER)>
     /** Generic return elements for ANTLR3 rules that are not in tree parsers or returning trees
@@ -2677,12 +3019,13 @@ typedef struct <returnType(...)>_struct
     pANTLR3_COMMON_TOKEN    start;
     pANTLR3_COMMON_TOKEN    stop;
 <else>
-    pANTLR3_BASE_TREE       start;
+    <recognizer.ASTLabelType>       start;
+    <recognizer.ASTLabelType>       stop;
 <endif>
     <@ruleReturnMembers()>   
     <ruleDescriptor.returnScope.attributes:{<it.decl>;}; separator="\n">
 }
-    <returnType(...)>;<\n><\n>
+    <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>;<\n><\n>
 <endif>
 <endif>
 >>
@@ -2699,24 +3042,24 @@ parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;"
  */
 scopeAttributeRef(scope,attr,index,negIndex) ::= <<
 <if(negIndex)>
-((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), ctx->SCOPE_STACK(scope)->size(ctx->SCOPE_STACK(scope))-<negIndex>-1)-><attr.name>
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name>
 <else>
 <if(index)>
-((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), (ANTLR3_UINT64)<index>))-><attr.name>
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name>
 <else>
-SCOPE_TOP(<scope>)-><attr.name>
+	(SCOPE_TOP(<scope>))-><attr.name>
 <endif>
 <endif>
 >>
 
 scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
 <if(negIndex)>
-((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), ctx->SCOPE_STACK(scope)->size(ctx->SCOPE_STACK(scope))-<negIndex>-1)-><attr.name> = <expr>;
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name> = <expr>;
 <else>
 <if(index)>
-((SCOPE_TYPE(scope))ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope), (ANTLR3_UINT64)<index>))-><attr.name> = <expr>;
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name> = <expr>;
 <else>
-SCOPE_TOP(<scope>)-><attr.name>=<expr>;
+	(SCOPE_TOP(<scope>))-><attr.name>=<expr>;
 <endif>
 <endif>
 >>
@@ -2761,22 +3104,23 @@ listLabelRef(label) ::= "list_<label>"
 
 // not sure the next are the right approach
 //
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>->getText(<scope>)"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>->getType(<scope>)"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>->getLine(<scope>)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>->getCharPositionInLine(<scope>)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>->getChannel(<scope>)"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>->getTokenIndex(<scope>)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>->tree"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))"
+tokenLabelPropertyRef_tree(scope,attr) ::= "(<scope>->tree)"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>->getText(<scope>)->toInt32(<scope>->getText(<scope>)))"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>.stop)"
 ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)"
 ruleLabelPropertyRef_text(scope,attr) ::= <<
 <if(TREE_PARSER)>
-STRSTREAM->toStringSS(STRSTREAM, <scope>.start, <scope>.start)
+(STRSTREAM->toStringSS(STRSTREAM, <scope>.start, <scope>.start))
 <else>
-STRSTREAM->toStringTT(STRSTREAM, <scope>.start, <scope>.stop)
+(STRSTREAM->toStringTT(STRSTREAM, <scope>.start, <scope>.stop))
 <endif>
 >>
 
@@ -2785,17 +3129,17 @@ ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
 /** Isolated $RULE ref ok in lexer as it's a Token */
 lexerRuleLabel(label) ::= "<label>"
 
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>->getType(<scope>)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>->getLine(<scope>)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>->getCharPositionInLine(<scope>)"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>->getChannel(<scope>)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>->getTokenIndex(<scope>)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>->getText(<scope>)"
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))"
 
 // Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval).start"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval).stop"
-rulePropertyRef_tree(scope,attr) ::= "((<labelType>)retval).tree"
+rulePropertyRef_start(scope,attr) ::= "retval.start"
+rulePropertyRef_stop(scope,attr) ::= "retval.stop"
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
 rulePropertyRef_text(scope,attr) ::= <<
 <if(TREE_PARSER)>
 INPUT->toStringSS(INPUT, ADAPTOR->getTokenStartIndex(ADAPTOR, retval.start), ADAPTOR->getTokenStopIndex(ADAPTOR, retval.start))
@@ -2807,20 +3151,31 @@ rulePropertyRef_st(scope,attr) ::= "retval.st"
 
 lexerRulePropertyRef_text(scope,attr) ::= "LEXER->getText(LEXER)"
 lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "LEXER->tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "LEXER->tokenStartCharPositionInLine"
-lexerRulePropertyRef_channel(scope,attr) ::= "LEXER->channel"
-lexerRulePropertyRef_start(scope,attr) ::= "LEXER->tokenStartCharIndex"
+lexerRulePropertyRef_line(scope,attr) ::= "LEXSTATE->tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "LEXSTATE->tokenStartCharPositionInLine"
+lexerRulePropertyRef_channel(scope,attr) ::= "LEXSTATE->channel"
+lexerRulePropertyRef_start(scope,attr) ::= "LEXSTATE->tokenStartCharIndex"
 lexerRulePropertyRef_stop(scope,attr) ::= "(LEXER->getCharIndex(LEXER)-1)"
 lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_int(scope,attr) ::= "LEXER->getText(LEXER)->toInt32(LEXER->getText(LEXER))"
 
 
 // setting $st and $tree is allowed in local rule. everything else is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "((<labelType>)retval).tree=<expr>;"
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree=<expr>;"
 ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;"
 
 
-/** How to execute an action */
+/** How to deal with an @after for C targets. Because we cannot rely on
+ *  any garbage collection, after code is executed even in backtracking
+ *  mode. Must be documented clearly.
+ */
+execAfter(action) ::= <<
+{
+    <action>
+}
+>>
+
+/** How to execute an action (when not backtracking) */
 execAction(action) ::= <<
 <if(backtracking)>
 <if(actions.(actionScope).synpredgate)>
@@ -2847,7 +3202,7 @@ bitsetDeclare(name, words64) ::= <<
 
 /** Bitset defining follow set for error recovery in rule state: <name>  */
 static	ANTLR3_BITWORD <name>_bits[]	= { <words64:{ANTLR3_UINT64_LIT(<it>)}; separator=", "> };
-static  ANTLR3_BITSET <name>	= { <name>_bits, <length(words64)>	};
+static  ANTLR3_BITSET_LIST <name>	= { <name>_bits, <length(words64)>	};
 >>
 
 bitset(name, words64) ::= <<
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg
new file mode 100644
index 0000000..c56cb86
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg
@@ -0,0 +1,240 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal C output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+const char * 
+ruleNames[] =
+	{
+		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n		", separator=", ">
+	};<\n>
+<endif>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+static ANTLR3_UINT32 ruleLevel = 0;
+static ANTLR3_UINT32 getRuleLevel() 
+{ 
+	return ruleLevel; 
+}
+static void incRuleLevel() 
+{ 
+	ruleLevel++; 
+}
+static void decRuleLevel() 
+{ 
+	ruleLevel--; 
+}
+<else> <! imported grammar !>
+static ANTLR3_UINT32 
+getRuleLevel() 
+{ 
+	return <grammar.delegators:{g| <g:delegateName()>}>->getRuleLevel(); 
+}
+static void incRuleLevel() 
+{ 
+	<grammar.delegators:{g| <g:delegateName()>}>->incRuleLevel(); 
+}
+static void 
+decRuleLevel() 
+{ 
+	<grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel(); 
+}
+<endif>
+<if(profile)>
+// Profiling not yet implemented for C target
+//
+<endif>
+<if(grammar.grammarIsRoot)> 
+<ctorForPredefinedListener()>
+<else>
+<ctorForDelegateGrammar()>
+<endif>
+
+static ANTLR3_BOOLEAN 
+evalPredicate(p<name> ctx, ANTLR3_BOOLEAN result, const char * predicate) 
+{
+    DBG->semanticPredicate(DBG, result, predicate);
+    return result;
+}<\n>
+>>
+
+ at genericParser.debugStuff() ::= <<
+<if(grammar.grammarIsRoot)> 
+<createListenerAndHandshake()>
+<endif>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+>>
+
+/** Basically we don't want to set any dbg listeners as root will have it. */
+ctorForDelegateGrammar() ::= <<
+
+>>
+
+ctorForPredefinedListener() ::= <<
+
+>>
+
+createListenerAndHandshake() ::= <<
+{
+	// DEBUG MODE code
+	//
+	pANTLR3_DEBUG_EVENT_LISTENER	 proxy;
+	proxy = antlr3DebugListenerNew();
+	proxy->grammarFileName = INPUT->tokenSource->strFactory->newStr8(INPUT->tokenSource->strFactory, (pANTLR3_UINT8)ctx->getGrammarFileName());
+	
+<if(TREE_PARSER)>
+	proxy->adaptor = ADAPTOR;
+<endif>
+	PARSER->setDebugListener(PARSER, proxy);
+
+	// Try to connect to the debugger (waits forever for a connection)
+	//	
+	proxy->handshake(proxy);
+	
+	// End DEBUG MODE code
+	//
+}
+>>
+
+
+ at rule.preamble() ::= <<
+DBG->enterRule(DBG, getGrammarFileName(), (const char *)"<ruleName>");
+if ( getRuleLevel()==0 ) 
+{
+	DBG->commence(DBG);
+}
+incRuleLevel();
+DBG->location(DBG, <ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
+>>
+
+ at rule.postamble() ::= <<
+DBG->location(DBG, <ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
+DBG->exitRule(DBG, getGrammarFileName(), (const char *)"<ruleName>");
+decRuleLevel();
+if ( getRuleLevel()==0 ) 
+{
+	DBG->terminate(DBG);
+}
+<\n>
+>>
+
+ at synpred.start() ::= "DBG->beginBacktrack(DBG, BACKTRACKING);"
+
+ at synpred.stop() ::= "DBG->endBacktrack(DBG, BACKTRACKING, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "DBG->enterSubRule(DBG, <decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "DBG->exitSubRule(DBG, <decisionNumber>);<\n>"
+
+enterDecision() ::=
+    "DBG->enterDecision(DBG, <decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "DBG->exitDecision(DBG, <decisionNumber>);<\n>"
+
+enterAlt(n) ::= "DBG->enterAlt(DBG, <n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "DBG->recognitionException(DBG, EXCEPTION);<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "DBG->location(DBG, <it.line>, <it.pos>);"
+
+ at matchSet.mismatchedSetException() ::=
+    "DBG->recognitionException(DBG, EXCEPTION);"
+
+ at newNVException.noViableAltException() ::= "DBG->recognitionException(DBG, EXCEPTION);"
+
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, ISTREAM, &cdfa<decisionNumber>);
+if  (HASEXCEPTION())
+{
+	DBG->recognitionException(DBG, EXCEPTION);
+    goto rule<ruleDescriptor.name>Ex;
+}
+<checkRuleBacktrackFailure()>
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+//static void 
+//dfaError(p<name> ctx) 
+//{
+//    DBG->recognitionException(DBG, EXCEPTION);
+//}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+evalPredicate(ctx, <pred>, (const char *)"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/cpp/CPP.stg b/tool/src/main/resources/org/antlr/codegen/templates/CPP/CPP.stg
similarity index 99%
rename from src/org/antlr/codegen/templates/cpp/CPP.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CPP/CPP.stg
index 86b5b70..8b577f3 100644
--- a/src/org/antlr/codegen/templates/cpp/CPP.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CPP/CPP.stg
@@ -269,7 +269,7 @@ filteringActionGate() ::= "backtracking == 1"
 genericParser(
 	grammar, name, scopes, tokens, tokenNames, rules, numRules, cyclicDFAs, 
 	bitsets, inputStreamType, superClass, ASTLabelType="Object",
-	labelType, members
+	labelType, members, filterMode
 	) ::= <<
 // genericParser	
 class <name> : public <@superClassName><superClass><@end> {
@@ -321,7 +321,7 @@ treeParser(grammar, name, scopes, tokens, tokenNames, globalAction,
 	rules, numRules, 
 	bitsets,
 	labelType={<ASTLabelType>}, ASTLabelType="Object", 
-	superClass="TreeParser", members={<actions.treeparser.members>}
+	superClass="TreeParser", members={<actions.treeparser.members>}, filterMode
 	) ::= <<
 <genericParser(inputStreamType="TreeNodeStream", ...)>
 >>
diff --git a/src/org/antlr/codegen/templates/CSharp/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/AST.stg
similarity index 54%
copy from src/org/antlr/codegen/templates/CSharp/AST.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/CSharp/AST.stg
index 2f698b0..574e8d9 100644
--- a/src/org/antlr/codegen/templates/CSharp/AST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/AST.stg
@@ -1,465 +1,415 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group AST;
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-<if(!TREE_PARSER)><! tree parser would already have imported !>
-using Antlr.Runtime.Tree;<\n>
-<endif>
->>
-
- at genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-protected ITreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
-public ITreeAdaptor TreeAdaptor
-{
-    get { return this.adaptor; }
-    set { this.adaptor = value; }
-}
->>
-
- at returnScope.ruleReturnMembers() ::= <<
-internal <ASTLabelType> tree;
-override public object Tree
-{
-	get { return tree; }
-}
->>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> root_0 = null;<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<ruleDescriptor.tokenLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
-<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor,"token <it>");}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(backtracking)>
-if ( backtracking==0 )
-{
-<endif>
-	retval.tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-	adaptor.SetTokenBoundaries(retval.Tree, retval.start, retval.stop);
-<if(backtracking)>
-}
-<endif>
-<endif>
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
- at alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-root_0 = (<ASTLabelType>)adaptor.GetNilNode();<\n>
-<endif>
-<endif>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ID but track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>stream_<token>.Add(<label>);<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.AddChild(root_0, adaptor.Create(<label>));})>
->>
-
-matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(adaptor.Create(<label>), root_0);})>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.AddChild(root_0, <label>.Tree);
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>stream_<rule>.Add(<label>.Tree);
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label+".Tree",...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefBang(...)>
-<listLabel(elem=label+".Tree",...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(elem=label+".Tree",...)>
->>
-
-// WILDCARD AST
-
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-// TODO: ugh, am i really missing the combinations for Track and ListLabel?
-// there's got to be a better way
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-
-// AST REWRITE
-// elements:          <referencedElementsDeep; separator=", ">
-// token labels:      <referencedTokenLabels; separator=", ">
-// rule labels:       <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels:  <referencedRuleListLabels; separator=", ">
-<if(backtracking)>
-if ( backtracking==0 ) {<\n>
-<endif>
-<prevRuleRootRef()>.tree = root_0;
-<rewriteCodeLabels()>
-root_0 = (<ASTLabelType>)adaptor.GetNilNode();
-<alts:rewriteAlt(); separator="else ">
-<if(backtracking)>
-}
-<endif>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor, "token <it>", <it>);};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor,"token <it>", list_<it>);};
-    separator="\n"
->
-<referencedRuleLabels
-    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", (<it>!=null ? <it>.Tree : null));};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", list_<it>);};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements,     // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if ( <referencedElementsDeep:{el | stream_<el>.HasNext()}; separator=" || "> )
-{
-    <alt>
-}
-<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements,     // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
-{
-    <alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements,     // elements in immediately block; no nested blocks
-	description) ::=
-<<
-if ( !(<referencedElements:{el | stream_<el>.HasNext()}; separator=" || ">) ) {
-    throw new RewriteEarlyExitException();
-}
-while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
-{
-    <alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>)
-{
-    <a.alt>
-}<\n>
-<else>
-{
-    <a.alt>
-}<\n>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = null;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
-<root:rewriteElement()>
-<children:rewriteElement()>
-adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen()>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,args) ::= <<
-adaptor.AddChild(root_<treeLevel>, <if(args)>adaptor.Create(<token>,<args; separator=", ">)<else>stream_<token>.Next()<endif>);<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.Next(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,args) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<if(args)>adaptor.Create(<token>,<args; separator=", ">)<else>stream_<token>.Next()<endif>, root_<treeLevel>);<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, adaptor.Create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>));<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(adaptor.Create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>), root_<treeLevel>);<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.Tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<rule>.Next());<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-adaptor.AddChild(root_<treeLevel>, <action>);<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, ((<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)stream_<label>.Next()).Tree);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+using Antlr.Runtime.Tree;<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected ITreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
+public ITreeAdaptor TreeAdaptor
+{
+    get { return this.adaptor; }
+    set {
+	this.adaptor = value;
+	<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+}
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+private <ASTLabelType> tree;
+override public object Tree
+{
+	get { return tree; }
+	set { tree = (<ASTLabelType>) value; }
+}
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{RewriteRule<rewriteElementType>Stream stream_<it> = new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = (<ASTLabelType>)adaptor.GetNilNode();<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.Add(<label>.Tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.Add(<label>.Tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements:          <referencedElementsDeep; separator=", ">
+// token labels:      <referencedTokenLabels; separator=", ">
+// rule labels:       <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels:  <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.Tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.GetNilNode();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+input.ReplaceChildren(adaptor.GetParent(retval.Start),
+                      adaptor.GetChildIndex(retval.Start),
+                      adaptor.GetChildIndex(_last),
+                      retval.Tree);
+<endif>
+<endif>
+<! if parser or rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.Tree = root_0;
+<endif>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.Tree = root_0;
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{RewriteRule<rewriteElementType>Stream stream_<it> = new RewriteRule<rewriteElementType>Stream(adaptor, "token <it>", <it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{RewriteRule<rewriteElementType>Stream stream_<it> = new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "rule <it>", <it>!=null ? <it>.Tree : null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.HasNext()}; separator=" || ">) ) {
+    throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>)
+{
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.Tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.AddChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.Create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.NextNode()
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTDbg.stg
similarity index 56%
copy from src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTDbg.stg
index 5dc1610..1fe702b 100644
--- a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTDbg.stg
@@ -1,44 +1,97 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : C# by code generator.
- */
-group ASTDbg;
-
-parserMembers() ::= <<
-protected ITreeAdaptor adaptor = new DebugTreeAdaptor(dbg, new CommonTreeAdaptor());
-public ITreeAdaptor TreeAdaptor
-{
-	get { return this.adaptor; }
-	set { this.adaptor = new DebugTreeAdaptor(dbg, value); }
-}<\n>
->>
-
- at rewriteElement.pregen() ::= "dbg.Location(<e.line>,<e.pos>);"
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : C# by code generator.
+ */
+group ASTDbg;
+
+parserMembers() ::= <<
+protected DebugTreeAdaptor adaptor;
+public ITreeAdaptor TreeAdaptor
+{
+	get { 
+<if(grammar.grammarIsRoot)>
+		return this.adaptor;
+<else>
+		this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor 
+<endif><\n>
+    		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+	set { this.adaptor = new DebugTreeAdaptor(dbg, value); }
+}<\n>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy = new DebugEventSocketProxy(this, port, adaptor);
+DebugListener = proxy;
+<!
+Original line follows, replaced by the next two ifs:
+set<inputStreamType>(new Debug<inputStreamType>(input,proxy));
+ !>
+<if(PARSER)>
+TokenStream = new DebugTokenStream(input,proxy);<\n>
+<endif>
+<if(TREE_PARSER)>
+TokenStream = new DebugTreeNodeStream(input,proxy);<\n>
+<endif>
+try {
+    proxy.Handshake();
+} catch (IOException ioe) {
+    ReportError(ioe);
+}
+>>
+
+ at ctorForRootGrammar.finally() ::= <<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+proxy.TreeAdaptor = adap;
+>>
+
+ at ctorForProfilingRootGrammar.finally() ::=<<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+proxy.TreeAdaptor = adap;
+>>
+
+ at ctorForPredefinedListener.superClassRef() ::= "base(input, dbg)"
+
+ at ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;<\n>
+<endif>
+>>
+
+ at rewriteElement.pregen() ::= "dbg.Location(<e.line>,<e.pos>);"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTParser.stg
new file mode 100644
index 0000000..ef44412
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTParser.stg
@@ -0,0 +1,220 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+// Conversion of the second argument necessary, but harmless
+retval.Tree = (<ASTLabelType>)adaptor.ErrorNode(input, (IToken) retval.Start, input.LT(-1), re);
+<! System.Console.WriteLine("<ruleName> returns " + ((CommonTree)retval.Tree).ToStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = <createNodeFromToken(...)>;
+	adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>
+}
+<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = <createNodeFromToken(...)>;
+	root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>
+}
+<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( state.backtracking == 0 ) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( state.backtracking == 0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>adaptor.AddChild(root_0, <label>.Tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+	adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>
+}
+<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+	root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>
+}
+<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)adaptor.Create(<label>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+<endif>
+	retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(!TREE_PARSER)>
+	adaptor.SetTokenBoundaries(retval.Tree, (IToken) retval.Start, (IToken) retval.Stop);
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTTreeParser.stg
new file mode 100644
index 0000000..c835580
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ASTTreeParser.stg
@@ -0,0 +1,315 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> _first_0 = null;
+<ASTLabelType> _last = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.Tree = (<ASTLabelType>)_first_0;
+if ( adaptor.GetParent(retval.Tree)!=null && adaptor.IsNil( adaptor.GetParent(retval.Tree) ) )
+    retval.Tree = (<ASTLabelType>)adaptor.GetParent(retval.Tree);
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = null;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = (<ASTLabelType>) <root.el.label>.Tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1) == Token.DOWN )
+{
+    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+	<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+	adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>
+}
+<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+	<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+	root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>
+}
+<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<treeLevel>, <label>.Tree);
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = (<ASTLabelType>) <label>.Tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.NextNode())
+<else>
+stream_<token>.NextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+<endif>
+	retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(backtracking)>
+}
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/CSharp.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/CSharp.stg
similarity index 68%
copy from src/org/antlr/codegen/templates/CSharp/CSharp.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/CSharp/CSharp.stg
index b13a211..cc520fb 100644
--- a/src/org/antlr/codegen/templates/CSharp/CSharp.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/CSharp.stg
@@ -1,1368 +1,1456 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group CSharp implements ANTLRCore;
-
-csharpTypeInitMap ::= [
-	"int":"0",
-	"uint":"0",
-	"long":"0",
-	"ulong":"0",
-	"float":"0.0",
-	"double":"0.0",
-	"bool":"false",
-	"byte":"0",
-	"sbyte":"0",
-	"short":"0",
-	"ushort":"0",
-	"char":"char.MinValue",
-	default:"null" // anything other than an atomic type
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs, 
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
-	   backtracking, synpreds, memoize, numRules,
-	   fileName, ANTLRVersion, generatedTimestamp, trace,
-	   scopes, superClass, literals) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<if(actions.(actionScope).namespace)>
-namespace <actions.(actionScope).namespace>
-{
-<endif>
-
-<actions.(actionScope).header>
-
-<@imports>
-using System;
-using Antlr.Runtime;
-<if(TREE_PARSER)>
-using Antlr.Runtime.Tree;
-<endif>
-using IList 		= System.Collections.IList;
-using ArrayList 	= System.Collections.ArrayList;
-using Stack 		= Antlr.Runtime.Collections.StackList;
-
-<if(backtracking)>
-using IDictionary	= System.Collections.IDictionary;
-using Hashtable 	= System.Collections.Hashtable;
-<endif>
-
-
-<@end>
-
-<docComment>
-<recognizer>
-<if(actions.(actionScope).namespace)>
-}
-<endif>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-public class <name> : Lexer 
-{
-    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <actions.lexer.members>
-
-    public <name>() <! needed by subclasses !>
-    {
-		InitializeCyclicDFAs();
-    }
-    public <name>(ICharStream input) 
-		: base(input)
-	{
-		InitializeCyclicDFAs();
-<if(backtracking)>
-        ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-    }
-    
-    override public string GrammarFileName
-    {
-    	get { return "<fileName>";} 
-    }
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator="\n\n">
-
-   	<synpreds:{p | <lexerSynpred(p)>}>
-
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-	private void InitializeCyclicDFAs()
-	{
-	    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
-	}
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-    
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-override public IToken NextToken() 
-{
-    while (true) 
-	{
-        if ( input.LA(1) == (int)CharStreamConstants.EOF ) 
-		{
-            return Token.EOF_TOKEN;
-        }
-
-	    token = null;
-		channel = Token.DEFAULT_CHANNEL;
-        tokenStartCharIndex = input.Index();
-        tokenStartCharPositionInLine = input.CharPositionInLine;
-        tokenStartLine = input.Line;
-	    text = null;
-        try 
-		{
-            int m = input.Mark();
-            backtracking = 1; <! means we won't throw slow exception !>
-            failed = false;
-            mTokens();
-            backtracking = 0;
-<!
-			mTokens backtracks with synpred at backtracking==2
-            and we set the synpredgate to allow actions at level 1. 
-!>
-            if ( failed ) 
-			{
-	            input.Rewind(m);
-                input.Consume(); <! // advance one char and try again !>
-            }
-            else 
-			{
-				Emit();
-                return token;
-            }
-        }
-        catch (RecognitionException re) 
-		{
-            // shouldn't happen in backtracking mode, but...
-            ReportError(re);
-            Recover(re);
-        }
-    }
-}
-
-override public void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
-{
-	if ( backtracking > 1 ) 
-		base.Memoize(input, ruleIndex, ruleStartIndex);
-}
-
-override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
-{
-	if ( backtracking>1 ) 
-		return base.AlreadyParsedRule(input, ruleIndex);
-	return false;
-}
->>
-
-filteringActionGate() ::= "(backtracking == 1)"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              ASTLabelType="object", labelType, members) ::= <<
-public class <name> : <@superClassName><superClass><@end> 
-{
-    public static readonly string[] tokenNames = new string[] 
-	{
-        "\<invalid>", 
-		"\<EOR>", 
-		"\<DOWN>", 
-		"\<UP>", 
-		<tokenNames; separator=", \n">
-    };
-
-    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <@members>
-   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-
-    public <name>(<inputStreamType> input) 
-		: base(input)
-	{
-		InitializeCyclicDFAs();
-<if(backtracking)>
-        ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-    }
-    <@end>
-
-    override public string[] TokenNames
-	{
-		get { return tokenNames; }
-	}
-
-    override public string GrammarFileName
-	{
-		get { return "<fileName>"; }
-	}
-
-    <members>
-
-    <rules; separator="\n\n">
-
-   	<synpreds:{p | <synpred(p)>}>
-
-   	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-	private void InitializeCyclicDFAs()
-	{
-    	<cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
-	}
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>
-}
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="IToken", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="ITokenStream", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="ITreeNodeStream", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) //throws RecognitionException
-{   
-<if(trace)>
-    TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    try
-    {
-        <block>
-    }
-    finally
-    {
-        TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    }
-<else>
-    <block>
-<endif>
-}
-// $ANTLR end <ruleName>
->>
-
-synpredDecls(name) ::= <<
-SynPredPointer <name>;<\n>
->>
-
-synpred(name) ::= <<
-public bool <name>() 
-{
-    backtracking++;
-    <@start()>
-    int start = input.Mark();
-    try 
-    {
-        <name>_fragment(); // can never throw exception
-    }
-    catch (RecognitionException re) 
-    {
-        Console.Error.WriteLine("impossible: "+re);
-    }
-    bool success = !failed;
-    input.Rewind(start);
-    <@stop()>
-    backtracking--;
-    failed = false;
-    return success;
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( (backtracking > 0) && AlreadyParsedRule(input, <ruleDescriptor.index>) ) 
-{
-	return <ruleReturnValue()>; 
-}
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if ( backtracking > 0 ) {failed = true; return <ruleReturnValue()>;}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// $ANTLR start <ruleName>
-// <fileName>:<description>
-public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [1]
-{   
-    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try 
-	{
-	    <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    catch (RecognitionException re) 
-	{
-        ReportError(re);
-        Recover(input,re);
-    }<\n>
-<endif>
-<endif>
-<endif>
-    finally 
-	{
-        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-// $ANTLR end <ruleName>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) 
-{
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = new <returnType()>();
-retval.start = input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.Index();
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.Push(new <it>_scope());}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.Push(new <it.name>_scope());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.Pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.Pop();}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{<labelType> <it.label.text> = null;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{IList list_<it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{<labelType> <it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{IList list_<it.label.text> = null;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = input.LT(-1);<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( backtracking > 0 ) 
-{
-	Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); 
-}
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start <ruleName> 
-public void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [2]
-{
-    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleDeclarations()>
-    try 
-	{
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        int _type = <ruleName>;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        this.type = _type;
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    finally 
-	{
-        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-    }
-}
-// $ANTLR end <ruleName>
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-override public void mTokens() // throws RecognitionException 
-{
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber> = <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) 
-{
-    <alts:altSwitchCase()>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber> = <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) 
-{
-    <alts:altSwitchCase()>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int cnt<decisionNumber> = 0;
-<decls>
-<@preloop()>
-do 
-{
-    int alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) 
-	{
-		<alts:altSwitchCase()>
-		default:
-		    if ( cnt<decisionNumber> >= 1 ) goto loop<decisionNumber>;
-		    <ruleBacktrackFailure()>
-	            EarlyExitException eee =
-	                new EarlyExitException(<decisionNumber>, input);
-	            <@earlyExitException()>
-	            throw eee;
-    }
-    cnt<decisionNumber>++;
-} while (true);
-
-loop<decisionNumber>:
-	;	// Stops C# compiler whinging that label 'loop<decisionNumber>' has no statements
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-do 
-{
-    int alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) 
-	{
-		<alts:altSwitchCase()>
-		default:
-		    goto loop<decisionNumber>;
-    }
-} while (true);
-
-loop<decisionNumber>:
-	;	// Stops C# compiler whinging that label 'loop<decisionNumber>' has no statements
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-case <i> :
-    <@prealt()>
-    <it>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
-// <fileName>:<description>
-{
-	<@declarations()>
-	<elements:element()>
-	<@cleanup()>
-}
->>
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-<@prematch()>
-<it.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label> = (<labelType>)input.LT(1);<\n>
-<endif>
-Match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label> == null) list_<label> = new ArrayList();
-list_<label>.Add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-Match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= input.LA(1);<\n>
-<else>
-<label> = (<labelType>)input.LT(1);<\n>
-<endif>
-<endif>
-if ( <s> ) 
-{
-    input.Consume();
-    <postmatchCode>
-<if(!LEXER)>
-    errorRecovery = false;
-<endif>
-    <if(backtracking)>failed = false;<endif>
-}
-else 
-{
-    <ruleBacktrackFailure()>
-    MismatchedSetException mse =
-        new MismatchedSetException(null,input);
-    <@mismatchedSetException()>
-<if(LEXER)>
-    Recover(mse);
-<else>
-    RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
-<endif>
-    throw mse;
-}<\n>
->>
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label) ::= <<
-<if(label)>
-int <label>Start = CharIndex;
-Match(<string>); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, CharIndex-1);
-<else>
-Match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label> = (<labelType>)input.LT(1);<\n>
-<endif>
-MatchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
- */
-ruleRef(rule,label,elementIndex,args) ::= <<
-PushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
-<if(label)>
-<label> = <rule>(<args; separator=", ">);<\n>
-<else>
-<rule>(<args; separator=", ">);<\n>
-<endif>
-followingStackPointer_--;
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
-<else>
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;
-Match(EOF); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
-<else>
-Match(EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1) == Token.DOWN )
-{
-    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) 
-{
-    <ruleBacktrackFailure()>
-    throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else 
-{
-<if(eotPredictsAlt)>
-    alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else 
-{
-    alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>)
-{
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) 
-{
-<edges; separator="\n">
-	default:
-<if(eotPredictsAlt)>
-    	alt<decisionNumber> = <eotPredictsAlt>;
-    	break;
-<else>
-	    <ruleBacktrackFailure()>
-	    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
-	        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-	    <@noViableAltException()>
-	    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) 
-{
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) 
-{
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-	default:
-    	alt<decisionNumber> = <eotPredictsAlt>;
-    	break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{case <it>:}; separator="\n">
-	{
-    <targetState>
-    }
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
->>
-
-/* Dump DFA tables.
- */
-cyclicDFA(dfa) ::= <<
-static readonly short[] DFA<dfa.decisionNumber>_eot = {
-    <dfa.eot:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-static readonly short[] DFA<dfa.decisionNumber>_eof = {
-    <dfa.eof:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-static readonly int[] DFA<dfa.decisionNumber>_min = {
-    <dfa.min:{n|<n>}; null="0", wrap="\n", separator=", ">
-    };
-static readonly int[] DFA<dfa.decisionNumber>_max = {
-    <dfa.max:{n|<n>}; null="0", wrap="\n", separator=", ">
-    };
-static readonly short[] DFA<dfa.decisionNumber>_accept = {
-    <dfa.accept:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-static readonly short[] DFA<dfa.decisionNumber>_special = {
-    <dfa.special:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-
-static readonly short[] dfa<dfa.decisionNumber>_transition_null = null;
-<dfa.edgeTransitionClassMap.keys:{table |
-static readonly short[] dfa<dfa.decisionNumber>_transition<i0> = \{
-	<table; separator=", ", wrap="\n    ", null="-1">
-	\};}>
-
-static readonly short[][] DFA<dfa.decisionNumber>_transition = {
-	<dfa.transitionEdgeTables:{whichTable|dfa<dfa.decisionNumber>_transition<whichTable>}; null="_null", separator=",\n">
-    };
-
-protected class DFA<dfa.decisionNumber> : DFA
-{
-    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) 
-    {
-        this.recognizer = recognizer;
-        this.decisionNumber = <dfa.decisionNumber>;
-        this.eot = DFA<dfa.decisionNumber>_eot;
-        this.eof = DFA<dfa.decisionNumber>_eof;
-        this.min = DFA<dfa.decisionNumber>_min;
-        this.max = DFA<dfa.decisionNumber>_max;
-        this.accept     = DFA<dfa.decisionNumber>_accept;
-        this.special    = DFA<dfa.decisionNumber>_special;
-        this.transition = DFA<dfa.decisionNumber>_transition;
-    }
-
-    override public string Description
-    {
-        get { return "<dfa.description>"; }
-    }
-
-    <@errorMethod()>
-}<\n>
-<if(dfa.specialStateSTs)>
-
-protected internal int DFA<dfa.decisionNumber>_SpecialStateTransition(DFA dfa, int s, IIntStream input) //throws NoViableAltException
-{
-	int _s = s;
-    switch ( s )
-    {
-    <dfa.specialStateSTs:{state |
-       	case <i0> : <! compressed special state numbers 0..n-1 !>
-           	<state>}; separator="\n">
-    }
-<if(backtracking)>
-    if (backtracking > 0) {failed = true; return -1;}<\n>
-<endif>
-    NoViableAltException nvae =
-        new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
-    dfa.Error(nvae);
-    throw nvae;
-}<\n>
-<endif>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-int index<decisionNumber>_<stateNumber> = input.Index();
-input.Rewind();<\n>
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.Seek(index<decisionNumber>_<stateNumber>);<\n>
-<endif>
-if ( s >= 0 ) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>) { s = <targetStateNumber>; }<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left> && <right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | || <o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "<pred>"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>) == <atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber> \>= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) \>= <lower> && input.LA(<k>) \<= <upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\" || \">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected class <scope.name>_scope 
-{
-    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected class <scope.name>_scope 
-{
-    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.name>_return
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Generate the C# type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<csharpTypeInitMap.(typeName)>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-public class <returnType()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope 
-{
-    <scope.attributes:{public <it.decl>;}; separator="\n">
-    <@ruleReturnMembers()>
-};
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name>
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack[<index>]).<attr.name>
-<else>
-((<scope>_scope)<scope>_stack.Peek()).<attr.name>
-<endif>
-<endif>
->>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name> = <expr>;
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack[<index>]).<attr.name> = <expr>;
-<else>
-((<scope>_scope)<scope>_stack.Peek()).<attr.name> = <expr>;
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> = <expr>;
-<else>
-<attr.name> = <expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.Text"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.Type"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.Line"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.CharPositionInLine"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.Channel"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.TokenIndex"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(<scope>.start),
-  input.TreeAdaptor.GetTokenStopIndex(<scope>.start) )
-<else>
-input.ToString(<scope>.start,<scope>.stop)
-<endif>
->>
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.Type"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.Line"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.CharPositionInLine"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.Channel"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.TokenIndex"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.Text"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(retval.start),
-  input.TreeAdaptor.GetTokenStopIndex(retval.start) )
-<else>
-input.ToString(retval.start,input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "Text"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "channel"
-lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree = <expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st = <expr>;"
-
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if ( <actions.(actionScope).synpredgate> )
-{
-  <action>
-}
-<else>
-if ( backtracking == 0 ) 
-{
-  <action>
-}
-<endif>
-<else>
-<action>
-<endif>
->>
-
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{<it>UL};separator=",">});<\n>
->>
-
-codeFileExtension() ::= ".cs"
-
-true() ::= "true"
-false() ::= "false"
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group CSharp implements ANTLRCore;
+
+csharpTypeInitMap ::= [
+	"int":"0",
+	"uint":"0",
+	"long":"0",
+	"ulong":"0",
+	"float":"0.0",
+	"double":"0.0",
+	"bool":"false",
+	"byte":"0",
+	"sbyte":"0",
+	"short":"0",
+	"ushort":"0",
+	"char":"char.MinValue",
+	default:"null" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs, 
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@debugPreprocessor()>
+<actions.(actionScope).header>
+
+<@imports>
+using System;
+using Antlr.Runtime;
+<if(TREE_PARSER)>
+using Antlr.Runtime.Tree;
+<endif>
+using IList 		= System.Collections.IList;
+using ArrayList 	= System.Collections.ArrayList;
+using Stack 		= Antlr.Runtime.Collections.StackList;
+
+<if(backtracking)>
+using IDictionary	= System.Collections.IDictionary;
+using Hashtable 	= System.Collections.Hashtable;
+<endif>
+
+
+<@end>
+
+<if(actions.(actionScope).namespace)>
+namespace <actions.(actionScope).namespace>
+{
+<endif>
+
+<docComment>
+<recognizer>
+<if(actions.(actionScope).namespace)>
+}
+<endif>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="IToken",
+      filterMode, superClass="Lexer") ::= <<
+public class <grammar.recognizerName> : <@superClassName><superClass><@end> {
+    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <actions.lexer.members>
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    public <grammar.recognizerName>() <! needed by subclasses !>
+    {
+		InitializeCyclicDFAs();
+    }
+    public <grammar.recognizerName>(ICharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: this(input, null<grammar.delegators:{g|, <g:delegateName()>}>) {
+    }
+    public <grammar.recognizerName>(ICharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: base(input, state) {
+		InitializeCyclicDFAs(); <! Necessary in C#??? Not removed yet. !>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        state.ruleMemo = new Hashtable[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+<endif>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+        <grammar.delegators:
+         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+    }
+    
+    override public string GrammarFileName
+    {
+    	get { return "<fileName>";} 
+    }
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+   	<synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+	private void InitializeCyclicDFAs(<@debugInitializeCyclicDFAs()>)
+	{
+	    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<@debugAddition()>);}; separator="\n">
+	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
+	}
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+    
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+override public IToken NextToken() 
+{
+    while (true) 
+	{
+        if ( input.LA(1) == (int)CharStreamConstants.EOF ) 
+		{
+            return Token.EOF_TOKEN;
+        }
+
+	    state.token = null;
+		state.channel = Token.DEFAULT_CHANNEL;
+        state.tokenStartCharIndex = input.Index();
+        state.tokenStartCharPositionInLine = input.CharPositionInLine;
+        state.tokenStartLine = input.Line;
+	    state.text = null;
+        try 
+		{
+            int m = input.Mark();
+            state.backtracking = 1; <! means we won't throw slow exception !>
+            state.failed = false;
+            mTokens();
+            state.backtracking = 0;
+<!
+			mTokens backtracks with synpred at backtracking==2
+            and we set the synpredgate to allow actions at level 1. 
+!>
+            if ( state.failed ) 
+			{
+	            input.Rewind(m);
+                input.Consume(); <! // advance one char and try again !>
+            }
+            else 
+			{
+				Emit();
+                return state.token;
+            }
+        }
+        catch (RecognitionException re) 
+		{
+            // shouldn't happen in backtracking mode, but...
+            ReportError(re);
+            Recover(re);
+        }
+    }
+}
+
+override public void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
+{
+	if ( state.backtracking > 1 ) 
+		base.Memoize(input, ruleIndex, ruleStartIndex);
+}
+
+override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+	if ( state.backtracking>1 ) 
+		return base.AlreadyParsedRule(input, ruleIndex);
+	return false;
+}
+>>
+
+actionGate() ::= "(state.backtracking==0)"
+
+filteringActionGate() ::= "(state.backtracking == 1)"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="object", labelType, members, rewriteElementType) ::= <<
+public class <grammar.recognizerName> : <@superClassName><superClass><@end> 
+{
+<if(grammar.grammarIsRoot)>
+    public static readonly string[] tokenNames = new string[] 
+	{
+        "\<invalid>", 
+		"\<EOR>", 
+		"\<DOWN>", 
+		"\<UP>", 
+		<tokenNames; separator=", \n">
+    };<\n>
+<endif>
+
+    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <@members>
+    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+
+    public <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>) {
+    }
+
+    public <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: base(input, state) {
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">         
+        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+   }
+    <@end>
+
+    override public string[] TokenNames {
+		get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
+    }
+
+    override public string GrammarFileName {
+		get { return "<fileName>"; }
+    }
+
+    <members>
+
+    <rules; separator="\n\n">
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor|
+    public <returnType()> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException 
+    \{
+    	<if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); 
+    \}}; separator="\n">
+
+   	<synpreds:{p | <synpred(p)>}>
+
+   	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+	private void InitializeCyclicDFAs(<@debugInitializeCyclicDFAs()>)
+	{
+    	<cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<@debugAddition()>);}; separator="\n">
+	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
+	}
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+}
+>>
+
+parserCtorBody() ::= <<
+<@initializeCyclicDFAs>InitializeCyclicDFAs();<@end>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new Hashtable[<length(grammar.allImportedRules)>+1];<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="IToken", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start "<ruleName>"
+public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
+    <ruleLabelDefs()>
+<if(trace)>
+    TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try
+    {
+        <block>
+    }
+    finally
+    {
+        TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+}
+// $ANTLR end "<ruleName>"
+>>
+
+synpredDecls(name) ::= <<
+SynPredPointer <name>;<\n>
+>>
+
+synpred(name) ::= <<
+public bool <name>() 
+{
+    state.backtracking++;
+    <@start()>
+    int start = input.Mark();
+    try 
+    {
+        <name>_fragment(); // can never throw exception
+    }
+    catch (RecognitionException re) 
+    {
+        Console.Error.WriteLine("impossible: "+re);
+    }
+    bool success = !state.failed;
+    input.Rewind(start);
+    <@stop()>
+    state.backtracking--;
+    state.failed = false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( (state.backtracking > 0) && AlreadyParsedRule(input, <ruleDescriptor.index>) ) 
+{
+	return <ruleReturnValue()>; 
+}
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if ( state.backtracking > 0 ) {state.failed = true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [1]
+{   
+    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try 
+	{
+	    <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (RecognitionException re) 
+	{
+        ReportError(re);
+        Recover(input,re);
+	<@setErrorReturnValue()>
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally 
+	{
+        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+// $ANTLR end "<ruleName>"
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) 
+{
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = new <returnType()>();
+retval.Start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.Index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Push(new <it.name>_scope());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{<labelType> <it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{IList list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|<ll:ruleLabelDef(label=it)> <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{IList list_<it.label.text> = null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.Stop = input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( state.backtracking > 0 ) 
+{
+	Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); 
+}
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+public void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [2]
+{
+ 	<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+	<if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+		try
+		{
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        int _type = <ruleName>;
+	int _channel = DEFAULT_TOKEN_CHANNEL;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        state.type = _type;
+        state.channel = _channel;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally 
+	{
+        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+override public void mTokens() // throws RecognitionException 
+{
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) 
+{
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) 
+{
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber> = 0;
+<decls>
+<@preloop()>
+do 
+{
+    int alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+	{
+		<alts:altSwitchCase()>
+		default:
+		    if ( cnt<decisionNumber> >= 1 ) goto loop<decisionNumber>;
+		    <ruleBacktrackFailure()>
+	            EarlyExitException eee<decisionNumber> =
+	                new EarlyExitException(<decisionNumber>, input);
+	            <@earlyExitException()>
+	            throw eee<decisionNumber>;
+    }
+    cnt<decisionNumber>++;
+} while (true);
+
+loop<decisionNumber>:
+	;	// Stops C# compiler whining that label 'loop<decisionNumber>' has no statements
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+do 
+{
+    int alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+	{
+		<alts:altSwitchCase()>
+		default:
+		    goto loop<decisionNumber>;
+    }
+} while (true);
+
+loop<decisionNumber>:
+	;	// Stops C# compiler whining that label 'loop<decisionNumber>' has no statements
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+	<@declarations()>
+	<elements:element()>
+	<rew>
+	<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=(<labelType>)<endif>Match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> == null) list_<label> = new ArrayList();
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) 
+{
+    input.Consume();
+    <postmatchCode>
+<if(!LEXER)>
+    state.errorRecovery = false;
+<endif>
+    <if(backtracking)>state.failed = false;<endif>
+}
+else 
+{
+    <ruleBacktrackFailure()>
+    MismatchedSetException mse = new MismatchedSetException(null,input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    Recover(mse);
+    throw mse;
+<else>
+    throw mse;
+    <! use following code to make it recover inline; remove throw mse;
+    RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int <label>Start = CharIndex;
+Match(<string>); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, CharIndex-1);
+<else>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)>
+<label> = <if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+<else>
+<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+<endif>
+state.followingStackPointer--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+Match(EOF); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1) == Token.DOWN )
+{
+    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) 
+{
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else 
+{
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
+        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <@noViableAltException()>
+    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else 
+{
+    alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>)
+{
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+<edges; separator="\n">
+	default:
+<if(eotPredictsAlt)>
+    	alt<decisionNumber> = <eotPredictsAlt>;
+    	break;
+<else>
+	    <ruleBacktrackFailure()>
+	    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
+	        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+	    <@noViableAltException()>
+	    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+	default:
+    	alt<decisionNumber> = <eotPredictsAlt>;
+    	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+	{
+    <targetState>
+    }
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
+>>
+
+/* Dump DFA tables.
+ */
+cyclicDFA(dfa) ::= <<
+const string DFA<dfa.decisionNumber>_eotS =
+    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_eofS =
+    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_minS =
+    "<dfa.javaCompressedMin; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_maxS =
+    "<dfa.javaCompressedMax; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_acceptS =
+    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_specialS =
+    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>";
+static readonly string[] DFA<dfa.decisionNumber>_transitionS = {
+        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+};
+
+static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+static readonly short[][] DFA<dfa.decisionNumber>_transition = DFA.UnpackEncodedStringArray(DFA<dfa.decisionNumber>_transitionS);
+
+protected class DFA<dfa.decisionNumber> : DFA
+{
+    <@debugMember()>
+    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer)
+    {
+        this.recognizer = recognizer;
+        this.decisionNumber = <dfa.decisionNumber>;
+        this.eot = DFA<dfa.decisionNumber>_eot;
+        this.eof = DFA<dfa.decisionNumber>_eof;
+        this.min = DFA<dfa.decisionNumber>_min;
+        this.max = DFA<dfa.decisionNumber>_max;
+        this.accept = DFA<dfa.decisionNumber>_accept;
+        this.special = DFA<dfa.decisionNumber>_special;
+        this.transition = DFA<dfa.decisionNumber>_transition;
+
+    }
+    <@dbgCtor()>
+
+    override public string Description
+    {
+        get { return "<dfa.description>"; }
+    }
+
+    <@errorMethod()>
+}<\n>
+<if(dfa.specialStateSTs)>
+
+protected internal int DFA<dfa.decisionNumber>_SpecialStateTransition(DFA dfa, int s, IIntStream _input) //throws NoViableAltException
+{
+        <if(LEXER)>
+        IIntStream input = _input;
+        <endif>
+        <if(PARSER)>
+        ITokenStream input = (ITokenStream)_input;
+        <endif>
+        <if(TREE_PARSER)>
+        ITreeNodeStream input = (ITreeNodeStream)_input;
+        <endif>
+	int _s = s;
+    switch ( s )
+    {
+    <dfa.specialStateSTs:{state |
+       	case <i0> : <! compressed special state numbers 0..n-1 !>
+           	<state>}; separator="\n">
+    }
+<if(backtracking)>
+    if (state.backtracking > 0) {state.failed = true; return -1;}<\n>
+<endif>
+    NoViableAltException nvae<dfa.decisionNumber> =
+        new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
+    dfa.Error(nvae<dfa.decisionNumber>);
+    throw nvae<dfa.decisionNumber>;
+}<\n>
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+int index<decisionNumber>_<stateNumber> = input.Index();
+input.Rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.Seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s >= 0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>) { s = <targetStateNumber>; }<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left> && <right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | || <o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber> \>= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) \>= <lower> && input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope 
+{
+    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope 
+{
+    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<csharpTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public class <ruleDescriptor:returnStructName()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope
+{
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+};
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name> = <expr>;
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name> = <expr>;
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name> = <expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+((<scope> != null) ? <scope>.<attr.name> : <initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> = <expr>;
+<else>
+<attr.name> = <expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "((<scope> != null) ? <scope>.Text : null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "((<scope> != null) ? <scope>.Type : 0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "((<scope> != null) ? <scope>.Line : 0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "((<scope> != null) ? <scope>.CharPositionInLine : 0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "((<scope> != null) ? <scope>.Channel : 0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "((<scope> != null) ? <scope>.TokenIndex : 0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "((<scope> != null) ? ((<labelType>)<scope>.Start) : null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "((<scope> != null) ? ((<labelType>)<scope>.Stop) : null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "((<scope> != null) ? ((<ASTLabelType>)<scope>.Tree) : null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+((<scope> != null) ? input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
+  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start)) : null)
+<else>
+((<scope> != null) ? input.ToString((IToken)(<scope>.Start),(IToken)(<scope>.Stop)) : null)
+<endif>
+>>
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> != null) ? <scope>.ST : null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "((<scope> != null) ? <scope>.Type : 0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "((<scope> != null) ? <scope>.Line : 0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "((<scope> != null) ? <scope>.CharPositionInLine : -1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "((<scope> != null) ? <scope>.Channel : 0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "((<scope> != null) ? <scope>.TokenIndex : 0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "((<scope> != null) ? <scope>.Text : null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.Start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.Stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.Tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start) )
+<else>
+input.ToString((IToken)retval.Start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.ST"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.Tree = <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.ST = <expr>;"
+
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+  <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{<it>UL};separator=",">});<\n>
+>>
+
+codeFileExtension() ::= ".cs"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/src/org/antlr/codegen/templates/CSharp/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/Dbg.stg
similarity index 50%
copy from src/org/antlr/codegen/templates/CSharp/Dbg.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/CSharp/Dbg.stg
index f000d01..2ffc078 100644
--- a/src/org/antlr/codegen/templates/CSharp/Dbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/Dbg.stg
@@ -1,192 +1,300 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template overrides to add debugging to normal Java output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-group Dbg;
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-using Antlr.Runtime.Debug;
->>
-
- at genericParser.members() ::= <<
-public static readonly string[] ruleNames = new string[] {
-    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
-};<\n>
-public int ruleLevel = 0;
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-public <name>(<inputStreamType> input) : <if(profile)>this(input, new Profiler(null))<else>base(input)<endif>
-{
-<if(profile)>
-        Profiler p = (Profiler)dbg;
-        p.Parser = this;
-<endif><\n>
-<if(memoize)>
-    ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-}
-<if(profile)>
-override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
-{
-    ((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
-    return base.AlreadyParsedRule(input, ruleIndex);
-}<\n>
-override public void Memoize(IIntStream input,
-                    int ruleIndex,
-                    int ruleStartIndex)
-{
-    ((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
-    base.Memoize(input, ruleIndex, ruleStartIndex);
-}<\n>
-<endif>
-public <name>(<inputStreamType> input, IDebugEventListener dbg)
-	: base(input, dbg)
-{
-}<\n>
-protected bool EvalPredicate(bool result, string predicate) 
-{
-    dbg.SemanticPredicate(result, predicate);
-    return result;
-}<\n>
->>
-
- at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
-
- at rule.preamble() ::= <<
-try 
-{
-	dbg.EnterRule("<ruleName>");
-	if ( ruleLevel==0 ) {dbg.Commence();}
-	ruleLevel++;
-	dbg.Location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
->>
-
- at rule.postamble() ::= <<
-dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
-}
-finally
-{
-    dbg.ExitRule("<ruleName>");
-    ruleLevel--;
-    if ( ruleLevel==0 ) {dbg.Terminate();}
-}<\n>
->>
-
- at synpred.start() ::= "dbg.BeginBacktrack(backtracking);"
-
- at synpred.stop() ::= "dbg.EndBacktrack(backtracking, success);"
-
-// Common debug event triggers used by region overrides below
-
-enterSubRule() ::=
-    "try { dbg.EnterSubRule(<decisionNumber>);<\n>"
-
-exitSubRule() ::=
-    "} finally { dbg.ExitSubRule(<decisionNumber>); }<\n>"
-
-enterDecision() ::=
-    "try { dbg.EnterDecision(<decisionNumber>);<\n>"
-
-exitDecision() ::=
-    "} finally { dbg.ExitDecision(<decisionNumber>); }<\n>"
-
-enterAlt(n) ::= "dbg.EnterAlt(<n>);<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
- at block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
- at block.postdecision() ::= "<exitDecision()>"
-
- at block.postbranch() ::= "<exitSubRule()>"
-
- at ruleBlock.predecision() ::= "<enterDecision()>"
-
- at ruleBlock.postdecision() ::= "<exitDecision()>"
-
- at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
- at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
- at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
- at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
- at positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
- at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
- at positiveClosureBlock.earlyExitException() ::=
-    "dbg.RecognitionException(eee);<\n>"
-
- at closureBlock.preloop() ::= "<enterSubRule()>"
-
- at closureBlock.postloop() ::= "<exitSubRule()>"
-
- at closureBlock.predecision() ::= "<enterDecision()>"
-
- at closureBlock.postdecision() ::= "<exitDecision()>"
-
- at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
-
- at element.prematch() ::=
-    "dbg.Location(<it.line>,<it.pos>);"
-
- at matchSet.mismatchedSetException() ::=
-    "dbg.RecognitionException(mse);"
-
- at dfaState.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
-
- at dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
-
-dfaDecision(decisionNumber,description) ::= <<
-try 
-{
-    isCyclicDecision = true;
-    <super.dfaDecision(...)>
-}
-catch (NoViableAltException nvae) 
-{
-    dbg.RecognitionException(nvae);
-    throw nvae;
-}
->>
-
- at cyclicDFA.errorMethod() ::= <<
-public virtual void Error(NoViableAltException nvae) 
-{
-    dbg.RecognitionException(nvae);
-}
->>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-EvalPredicate(<pred>,"<description>")
->>
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at outputFile.debugPreprocessor() ::= "#define ANTLR_DEBUG"
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.Runtime.Debug;
+using IOException = System.IO.IOException;
+>>
+
+ at genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+public static readonly string[] ruleNames = new string[] {
+    "invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n    ", separator=", ">
+};<\n>
+<endif>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+    private int ruleLevel = 0;
+    public int RuleLevel {
+	get { return ruleLevel; }
+    }
+    public void IncRuleLevel() { ruleLevel++; }
+    public void DecRuleLevel() { ruleLevel--; }
+<if(profile)>
+    <ctorForProfilingRootGrammar()>
+<else>
+    <ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else> <! imported grammar !>
+    public int RuleLevel {
+	get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; }
+    }
+    public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
+    public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
+    <ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+    ((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    return base.AlreadyParsedRule(input, ruleIndex);
+}<\n>
+override public void Memoize(IIntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    base.Memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+protected bool EvalPredicate(bool result, string predicate) 
+{
+    dbg.SemanticPredicate(result, predicate);
+    return result;
+}<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+public <name>(<inputStreamType> input)
+    : this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState()) {
+}
+
+public <name>(<inputStreamType> input, int port, RecognizerSharedState state)
+    : base(input, state) {
+    <parserCtorBody()>
+    <createListenerAndHandshake()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+>>
+
+ at parserCtorBody.initializeCyclicDFAs() ::= <<
+InitializeCyclicDFAs(dbg);
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+    this(input, new Profiler(null), new RecognizerSharedState());
+}
+
+public <name>(<inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state)
+    : base(input, dbg, state) {
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}
+<\n>
+>>
+
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+public <name>(<inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+    : base(input, dbg, state) {
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>(<inputStreamType> input, IDebugEventListener dbg)
+    : <@superClassRef>base(input, dbg, new RecognizerSharedState())<@end> {
+<if(profile)>
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+<endif>
+    <parserCtorBody()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy(this, port, input.TreeAdaptor);
+<else>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy(this, port, null);
+<endif>
+DebugListener = proxy;
+try
+{
+    proxy.Handshake();
+}
+catch (IOException ioe)
+{
+    ReportError(ioe);
+}
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.preamble() ::= <<
+try {
+	dbg.EnterRule(GrammarFileName, "<ruleName>");
+	if ( RuleLevel==0 ) {dbg.Commence();}
+	IncRuleLevel();
+	dbg.Location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
+>>
+
+ at lexer.debugInitializeCyclicDFAs() ::= "IDebugEventListener dbg"
+
+ at lexer.debugAddition() ::= ", dbg"
+
+ at genericParser.debugInitializeCyclicDFAs() ::= "IDebugEventListener dbg"
+
+ at genericParser.debugAddition() ::= ", dbg"
+
+ at rule.postamble() ::= <<
+dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
+}
+finally {
+    dbg.ExitRule(GrammarFileName, "<ruleName>");
+    DecRuleLevel();
+    if ( RuleLevel==0 ) {dbg.Terminate();}
+}<\n>
+>>
+
+ at synpred.start() ::= "dbg.BeginBacktrack(state.backtracking);"
+
+ at synpred.stop() ::= "dbg.EndBacktrack(state.backtracking, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { dbg.EnterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally { dbg.ExitSubRule(<decisionNumber>); }<\n>"
+
+enterDecision() ::=
+    "try { dbg.EnterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally { dbg.ExitDecision(<decisionNumber>); }<\n>"
+
+enterAlt(n) ::= "dbg.EnterAlt(<n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "dbg.RecognitionException(eee<decisionNumber>);<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "dbg.Location(<it.line>,<it.pos>);"
+
+ at matchSet.mismatchedSetException() ::=
+    "dbg.RecognitionException(mse);"
+
+ at dfaState.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
+
+ at dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
+
+dfaDecision(decisionNumber,description) ::= <<
+try 
+{
+    isCyclicDecision = true;
+    <super.dfaDecision(...)>
+}
+catch (NoViableAltException nvae) 
+{
+    dbg.RecognitionException(nvae);
+    throw nvae;
+}
+>>
+
+ at cyclicDFA.dbgCtor() ::= <<
+    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer, IDebugEventListener dbg) : this(recognizer)
+    {
+		this.dbg = dbg;
+    }
+>> 
+
+ at cyclicDFA.debugMember() ::= <<
+IDebugEventListener dbg;
+
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+public override void Error(NoViableAltException nvae) 
+{
+    dbg.RecognitionException(nvae);
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+EvalPredicate(<pred>,"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ST.stg
similarity index 88%
rename from src/org/antlr/codegen/templates/CSharp/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp/ST.stg
index a37814a..c61b8a1 100644
--- a/src/org/antlr/codegen/templates/CSharp/ST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp/ST.stg
@@ -1,169 +1,173 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template subgroup to add template rewrite output
- *  If debugging, then you'll also get STDbg.stg loaded.
- */
-group ST;
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-using Antlr.StringTemplate;
-using Antlr.StringTemplate.Language;
-using Hashtable = System.Collections.Hashtable;
-
->>
-
-/** Add this to each rule's return value struct */
- at returnScope.ruleReturnMembers() ::= <<
-public StringTemplate st;
-public override object Template 		{ get { return st; } }
-public override string ToString() 		{ return (st == null) ? null : st.ToString(); }
->>
-
- at genericParser.members() ::= <<
-<@super.members()>
-protected StringTemplateGroup templateLib =
-  new StringTemplateGroup("<name>Templates", typeof(AngleBracketTemplateLexer));
-
-public StringTemplateGroup TemplateLib
-{
- 	get { return this.templateLib; }
- 	set { this.templateLib = value; }
-}
-
-/// \<summary> Allows convenient multi-value initialization:
-///  "new STAttrMap().Add(...).Add(...)"
-/// \</summary>
-protected class STAttrMap : Hashtable
-{
-  public STAttrMap Add(string attrName, object value) 
-  {
-    base.Add(attrName, value);
-    return this;
-  }
-  public STAttrMap Add(string attrName, int value) 
-  {
-    base.Add(attrName, value);
-    return this;
-  }
-}
->>
-
-/** x+=rule when output=template */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label+".Template",...)>
->>
-
-rewriteTemplate(alts) ::= <<
-
-// TEMPLATE REWRITE
-<if(backtracking)>
-if ( backtracking==0 )
-{
-  <alts:rewriteTemplateAlt(); separator="else ">
-  <if(rewrite)><replaceTextInLine()><endif>
-}
-<else>
-<alts:rewriteTemplateAlt(); separator="else ">
-<if(rewrite)><replaceTextInLine()><endif>
-<endif>
->>
-
-replaceTextInLine() ::= <<
-<if(TREE_PARSER)>
-((TokenRewriteStream)input.TokenStream).Replace(
-  input.TreeAdaptor.GetTokenStartIndex(retval.start),
-  input.TreeAdaptor.GetTokenStopIndex(retval.start),
-  retval.st);
-<else>
-((TokenRewriteStream)input).Replace(
-  ((Token)retval.start).TokenIndex,
-  input.LT(-1).TokenIndex,
-  retval.st);
-<endif>
->>
-
-rewriteTemplateAlt() ::= <<
-// <it.description>
-<if(it.pred)>
-if (<it.pred>) {
-    retval.st = <it.alt>;
-}<\n>
-<else>
-{
-    retval.st = <it.alt>;
-}<\n>
-<endif>
->>
-
-rewriteEmptyTemplate(alts) ::= <<
-null;
->>
-
-/** Invoke a template with a set of attribute name/value pairs.
- *  Set the value of the rule's template *after* having set
- *  the attributes because the rule's template might be used as
- *  an attribute to build a bigger template; you get a self-embedded
- *  template.
- */
-rewriteExternalTemplate(name,args) ::= <<
-templateLib.GetInstanceOf("<name>"<if(args)>,
-  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
-  <endif>)
->>
-
-/** expr is a string expression that says what template to load */
-rewriteIndirectTemplate(expr,args) ::= <<
-templateLib.GetInstanceOf(<expr><if(args)>,
-  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
-  <endif>)
->>
-
-/** Invoke an inline template with a set of attribute name/value pairs */
-rewriteInlineTemplate(args, template) ::= <<
-new StringTemplate(templateLib, "<template>"<if(args)>,
-  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
-  <endif>)
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-<action>
->>
-
-/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
-actionSetAttribute(st,attrName,expr) ::= <<
-(<st>).SetAttribute("<attrName>",<expr>);
->>
-
-/** Translate %{stringExpr} */
-actionStringConstructor(stringExpr) ::= <<
-new StringTemplate(templateLib,<stringExpr>)
->>
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+group ST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.StringTemplate;
+using Antlr.StringTemplate.Language;
+<if(!backtracking)>
+using Hashtable = System.Collections.Hashtable;
+<endif>
+
+>>
+
+/** Add this to each rule's return value struct */
+ at returnScope.ruleReturnMembers() ::= <<
+private StringTemplate st;
+public StringTemplate ST    { get { return st; } set { st = value; } }
+public override object Template 		{ get { return st; } }
+public override string ToString() 		{ return (st == null) ? null : st.ToString(); }
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+protected StringTemplateGroup templateLib =
+  new StringTemplateGroup("<name>Templates", typeof(AngleBracketTemplateLexer));
+
+public StringTemplateGroup TemplateLib
+{
+ 	get { return this.templateLib; }
+ 	set { this.templateLib = value; }
+}
+
+/// \<summary> Allows convenient multi-value initialization:
+///  "new STAttrMap().Add(...).Add(...)"
+/// \</summary>
+protected class STAttrMap : Hashtable
+{
+  public STAttrMap Add(string attrName, object value) 
+  {
+    base.Add(attrName, value);
+    return this;
+  }
+  public STAttrMap Add(string attrName, int value) 
+  {
+    base.Add(attrName, value);
+    return this;
+  }
+}
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Template",...)>
+>>
+
+rewriteTemplate(alts) ::= <<
+
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+  <alts:rewriteTemplateAlt(); separator="else ">
+  <if(rewriteMode)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewriteMode)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+((TokenRewriteStream)input.TokenStream).Replace(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start),
+  retval.ST);
+<else>
+((TokenRewriteStream)input).Replace(
+  ((IToken)retval.Start).TokenIndex,
+  input.LT(-1).TokenIndex,
+  retval.ST);
+<endif>
+>>
+
+rewriteTemplateAlt() ::= <<
+// <it.description>
+<if(it.pred)>
+if (<it.pred>) {
+    retval.ST = <it.alt>;
+}<\n>
+<else>
+{
+    retval.ST = <it.alt>;
+}<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+null;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <<
+templateLib.GetInstanceOf("<name>"<if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <<
+templateLib.GetInstanceOf(<expr><if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <<
+new StringTemplate(templateLib, "<template>"<if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
+  <endif>)
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>).SetAttribute("<attrName>",<expr>);
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+new StringTemplate(templateLib,<stringExpr>)
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
similarity index 54%
rename from src/org/antlr/codegen/templates/CSharp/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
index 2f698b0..574e8d9 100644
--- a/src/org/antlr/codegen/templates/CSharp/AST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
@@ -1,465 +1,415 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group AST;
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-<if(!TREE_PARSER)><! tree parser would already have imported !>
-using Antlr.Runtime.Tree;<\n>
-<endif>
->>
-
- at genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-protected ITreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
-public ITreeAdaptor TreeAdaptor
-{
-    get { return this.adaptor; }
-    set { this.adaptor = value; }
-}
->>
-
- at returnScope.ruleReturnMembers() ::= <<
-internal <ASTLabelType> tree;
-override public object Tree
-{
-	get { return tree; }
-}
->>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> root_0 = null;<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<ruleDescriptor.tokenLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
-<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor,"token <it>");}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(backtracking)>
-if ( backtracking==0 )
-{
-<endif>
-	retval.tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-	adaptor.SetTokenBoundaries(retval.Tree, retval.start, retval.stop);
-<if(backtracking)>
-}
-<endif>
-<endif>
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
- at alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-root_0 = (<ASTLabelType>)adaptor.GetNilNode();<\n>
-<endif>
-<endif>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ID but track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>stream_<token>.Add(<label>);<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.AddChild(root_0, adaptor.Create(<label>));})>
->>
-
-matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(adaptor.Create(<label>), root_0);})>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.AddChild(root_0, <label>.Tree);
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>stream_<rule>.Add(<label>.Tree);
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label+".Tree",...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefBang(...)>
-<listLabel(elem=label+".Tree",...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(elem=label+".Tree",...)>
->>
-
-// WILDCARD AST
-
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-// TODO: ugh, am i really missing the combinations for Track and ListLabel?
-// there's got to be a better way
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-
-// AST REWRITE
-// elements:          <referencedElementsDeep; separator=", ">
-// token labels:      <referencedTokenLabels; separator=", ">
-// rule labels:       <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels:  <referencedRuleListLabels; separator=", ">
-<if(backtracking)>
-if ( backtracking==0 ) {<\n>
-<endif>
-<prevRuleRootRef()>.tree = root_0;
-<rewriteCodeLabels()>
-root_0 = (<ASTLabelType>)adaptor.GetNilNode();
-<alts:rewriteAlt(); separator="else ">
-<if(backtracking)>
-}
-<endif>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor, "token <it>", <it>);};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{RewriteRuleTokenStream stream_<it> = new RewriteRuleTokenStream(adaptor,"token <it>", list_<it>);};
-    separator="\n"
->
-<referencedRuleLabels
-    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", (<it>!=null ? <it>.Tree : null));};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", list_<it>);};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements,     // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if ( <referencedElementsDeep:{el | stream_<el>.HasNext()}; separator=" || "> )
-{
-    <alt>
-}
-<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements,     // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
-{
-    <alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements,     // elements in immediately block; no nested blocks
-	description) ::=
-<<
-if ( !(<referencedElements:{el | stream_<el>.HasNext()}; separator=" || ">) ) {
-    throw new RewriteEarlyExitException();
-}
-while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
-{
-    <alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>)
-{
-    <a.alt>
-}<\n>
-<else>
-{
-    <a.alt>
-}<\n>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = null;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
-<root:rewriteElement()>
-<children:rewriteElement()>
-adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen()>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,args) ::= <<
-adaptor.AddChild(root_<treeLevel>, <if(args)>adaptor.Create(<token>,<args; separator=", ">)<else>stream_<token>.Next()<endif>);<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.Next(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,args) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<if(args)>adaptor.Create(<token>,<args; separator=", ">)<else>stream_<token>.Next()<endif>, root_<treeLevel>);<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, adaptor.Create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>));<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(adaptor.Create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>), root_<treeLevel>);<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.Tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<rule>.Next());<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-adaptor.AddChild(root_<treeLevel>, <action>);<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.Next());<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, ((<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)stream_<label>.Next()).Tree);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+using Antlr.Runtime.Tree;<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected ITreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
+public ITreeAdaptor TreeAdaptor
+{
+    get { return this.adaptor; }
+    set {
+	this.adaptor = value;
+	<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+}
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+private <ASTLabelType> tree;
+override public object Tree
+{
+	get { return tree; }
+	set { tree = (<ASTLabelType>) value; }
+}
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{RewriteRule<rewriteElementType>Stream stream_<it> = new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = (<ASTLabelType>)adaptor.GetNilNode();<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.Add(<label>.Tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.Add(<label>.Tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements:          <referencedElementsDeep; separator=", ">
+// token labels:      <referencedTokenLabels; separator=", ">
+// rule labels:       <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels:  <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.Tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.GetNilNode();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+input.ReplaceChildren(adaptor.GetParent(retval.Start),
+                      adaptor.GetChildIndex(retval.Start),
+                      adaptor.GetChildIndex(_last),
+                      retval.Tree);
+<endif>
+<endif>
+<! if parser or rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.Tree = root_0;
+<endif>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.Tree = root_0;
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{RewriteRule<rewriteElementType>Stream stream_<it> = new RewriteRule<rewriteElementType>Stream(adaptor, "token <it>", <it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{RewriteRule<rewriteElementType>Stream stream_<it> = new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "rule <it>", <it>!=null ? <it>.Tree : null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{RewriteRuleSubtreeStream stream_<it> = new RewriteRuleSubtreeStream(adaptor, "token <it>", list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.HasNext()}; separator=" || ">) ) {
+    throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.HasNext()}; separator=" || "> )
+{
+    <alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>)
+{
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.Tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.AddChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.Create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.NextNode()
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
similarity index 56%
rename from src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
index 5dc1610..f0b0869 100644
--- a/src/org/antlr/codegen/templates/CSharp/ASTDbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
@@ -1,44 +1,97 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : C# by code generator.
- */
-group ASTDbg;
-
-parserMembers() ::= <<
-protected ITreeAdaptor adaptor = new DebugTreeAdaptor(dbg, new CommonTreeAdaptor());
-public ITreeAdaptor TreeAdaptor
-{
-	get { return this.adaptor; }
-	set { this.adaptor = new DebugTreeAdaptor(dbg, value); }
-}<\n>
->>
-
- at rewriteElement.pregen() ::= "dbg.Location(<e.line>,<e.pos>);"
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : C# by code generator.
+ */
+group ASTDbg;
+
+parserMembers() ::= <<
+protected DebugTreeAdaptor adaptor;
+public ITreeAdaptor TreeAdaptor
+{
+	get { 
+<if(grammar.grammarIsRoot)>
+		return this.adaptor;
+<else>
+		this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor 
+<endif><\n>
+    		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+	set { this.adaptor = new DebugTreeAdaptor(dbg, value); }
+}<\n>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy dbg = new DebugEventSocketProxy(this, port, adaptor);
+DebugListener = dbg;
+<!
+Original line follows, replaced by the next two ifs:
+set<inputStreamType>(new Debug<inputStreamType>(input,dbg));
+ !>
+<if(PARSER)>
+TokenStream = new DebugTokenStream(input,dbg);<\n>
+<endif>
+<if(TREE_PARSER)>
+TokenStream = new DebugTreeNodeStream(input,dbg);<\n>
+<endif>
+try {
+    dbg.Handshake();
+} catch (IOException ioe) {
+    ReportError(ioe);
+}
+>>
+
+ at ctorForRootGrammar.finally() ::= <<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+dbg.TreeAdaptor = adap;
+>>
+
+ at ctorForProfilingRootGrammar.finally() ::=<<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+dbg.TreeAdaptor = adap;
+>>
+
+ at ctorForPredefinedListener.superClassRef() ::= "base(input, dbg)"
+
+ at ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;<\n>
+<endif>
+>>
+
+ at rewriteElement.pregen() ::= "dbg.Location(<e.line>,<e.pos>);"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg
new file mode 100644
index 0000000..ef44412
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg
@@ -0,0 +1,220 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+// Conversion of the second argument necessary, but harmless
+retval.Tree = (<ASTLabelType>)adaptor.ErrorNode(input, (IToken) retval.Start, input.LT(-1), re);
+<! System.Console.WriteLine("<ruleName> returns " + ((CommonTree)retval.Tree).ToStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = <createNodeFromToken(...)>;
+	adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>
+}
+<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = <createNodeFromToken(...)>;
+	root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>
+}
+<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( state.backtracking == 0 ) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( state.backtracking == 0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>adaptor.AddChild(root_0, <label>.Tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+	adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>
+}
+<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+	<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+	root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>
+}
+<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)adaptor.Create(<label>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+<endif>
+	retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(!TREE_PARSER)>
+	adaptor.SetTokenBoundaries(retval.Tree, (IToken) retval.Start, (IToken) retval.Stop);
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg
new file mode 100644
index 0000000..c835580
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg
@@ -0,0 +1,315 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> _first_0 = null;
+<ASTLabelType> _last = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.Tree = (<ASTLabelType>)_first_0;
+if ( adaptor.GetParent(retval.Tree)!=null && adaptor.IsNil( adaptor.GetParent(retval.Tree) ) )
+    retval.Tree = (<ASTLabelType>)adaptor.GetParent(retval.Tree);
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = null;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.GetNilNode();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = (<ASTLabelType>) <root.el.label>.Tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1) == Token.DOWN )
+{
+    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+	<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+	adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>
+}
+<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>
+if ( state.backtracking == 0 )
+{
+<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+	<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+	root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>
+}
+<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<treeLevel>, <label>.Tree);
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = (<ASTLabelType>) <label>.Tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.NextNode())
+<else>
+stream_<token>.NextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+<endif>
+	retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(backtracking)>
+}
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/CSharp/CSharp.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
similarity index 67%
rename from src/org/antlr/codegen/templates/CSharp/CSharp.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
index b13a211..90030c9 100644
--- a/src/org/antlr/codegen/templates/CSharp/CSharp.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
@@ -1,1368 +1,1444 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group CSharp implements ANTLRCore;
-
-csharpTypeInitMap ::= [
-	"int":"0",
-	"uint":"0",
-	"long":"0",
-	"ulong":"0",
-	"float":"0.0",
-	"double":"0.0",
-	"bool":"false",
-	"byte":"0",
-	"sbyte":"0",
-	"short":"0",
-	"ushort":"0",
-	"char":"char.MinValue",
-	default:"null" // anything other than an atomic type
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs, 
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
-	   backtracking, synpreds, memoize, numRules,
-	   fileName, ANTLRVersion, generatedTimestamp, trace,
-	   scopes, superClass, literals) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<if(actions.(actionScope).namespace)>
-namespace <actions.(actionScope).namespace>
-{
-<endif>
-
-<actions.(actionScope).header>
-
-<@imports>
-using System;
-using Antlr.Runtime;
-<if(TREE_PARSER)>
-using Antlr.Runtime.Tree;
-<endif>
-using IList 		= System.Collections.IList;
-using ArrayList 	= System.Collections.ArrayList;
-using Stack 		= Antlr.Runtime.Collections.StackList;
-
-<if(backtracking)>
-using IDictionary	= System.Collections.IDictionary;
-using Hashtable 	= System.Collections.Hashtable;
-<endif>
-
-
-<@end>
-
-<docComment>
-<recognizer>
-<if(actions.(actionScope).namespace)>
-}
-<endif>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-public class <name> : Lexer 
-{
-    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <actions.lexer.members>
-
-    public <name>() <! needed by subclasses !>
-    {
-		InitializeCyclicDFAs();
-    }
-    public <name>(ICharStream input) 
-		: base(input)
-	{
-		InitializeCyclicDFAs();
-<if(backtracking)>
-        ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-    }
-    
-    override public string GrammarFileName
-    {
-    	get { return "<fileName>";} 
-    }
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator="\n\n">
-
-   	<synpreds:{p | <lexerSynpred(p)>}>
-
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-	private void InitializeCyclicDFAs()
-	{
-	    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
-	}
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-    
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-override public IToken NextToken() 
-{
-    while (true) 
-	{
-        if ( input.LA(1) == (int)CharStreamConstants.EOF ) 
-		{
-            return Token.EOF_TOKEN;
-        }
-
-	    token = null;
-		channel = Token.DEFAULT_CHANNEL;
-        tokenStartCharIndex = input.Index();
-        tokenStartCharPositionInLine = input.CharPositionInLine;
-        tokenStartLine = input.Line;
-	    text = null;
-        try 
-		{
-            int m = input.Mark();
-            backtracking = 1; <! means we won't throw slow exception !>
-            failed = false;
-            mTokens();
-            backtracking = 0;
-<!
-			mTokens backtracks with synpred at backtracking==2
-            and we set the synpredgate to allow actions at level 1. 
-!>
-            if ( failed ) 
-			{
-	            input.Rewind(m);
-                input.Consume(); <! // advance one char and try again !>
-            }
-            else 
-			{
-				Emit();
-                return token;
-            }
-        }
-        catch (RecognitionException re) 
-		{
-            // shouldn't happen in backtracking mode, but...
-            ReportError(re);
-            Recover(re);
-        }
-    }
-}
-
-override public void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
-{
-	if ( backtracking > 1 ) 
-		base.Memoize(input, ruleIndex, ruleStartIndex);
-}
-
-override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
-{
-	if ( backtracking>1 ) 
-		return base.AlreadyParsedRule(input, ruleIndex);
-	return false;
-}
->>
-
-filteringActionGate() ::= "(backtracking == 1)"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              ASTLabelType="object", labelType, members) ::= <<
-public class <name> : <@superClassName><superClass><@end> 
-{
-    public static readonly string[] tokenNames = new string[] 
-	{
-        "\<invalid>", 
-		"\<EOR>", 
-		"\<DOWN>", 
-		"\<UP>", 
-		<tokenNames; separator=", \n">
-    };
-
-    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <@members>
-   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-
-    public <name>(<inputStreamType> input) 
-		: base(input)
-	{
-		InitializeCyclicDFAs();
-<if(backtracking)>
-        ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-    }
-    <@end>
-
-    override public string[] TokenNames
-	{
-		get { return tokenNames; }
-	}
-
-    override public string GrammarFileName
-	{
-		get { return "<fileName>"; }
-	}
-
-    <members>
-
-    <rules; separator="\n\n">
-
-   	<synpreds:{p | <synpred(p)>}>
-
-   	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-	private void InitializeCyclicDFAs()
-	{
-    	<cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
-	}
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>
-}
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="IToken", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="ITokenStream", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="ITreeNodeStream", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) //throws RecognitionException
-{   
-<if(trace)>
-    TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    try
-    {
-        <block>
-    }
-    finally
-    {
-        TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    }
-<else>
-    <block>
-<endif>
-}
-// $ANTLR end <ruleName>
->>
-
-synpredDecls(name) ::= <<
-SynPredPointer <name>;<\n>
->>
-
-synpred(name) ::= <<
-public bool <name>() 
-{
-    backtracking++;
-    <@start()>
-    int start = input.Mark();
-    try 
-    {
-        <name>_fragment(); // can never throw exception
-    }
-    catch (RecognitionException re) 
-    {
-        Console.Error.WriteLine("impossible: "+re);
-    }
-    bool success = !failed;
-    input.Rewind(start);
-    <@stop()>
-    backtracking--;
-    failed = false;
-    return success;
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( (backtracking > 0) && AlreadyParsedRule(input, <ruleDescriptor.index>) ) 
-{
-	return <ruleReturnValue()>; 
-}
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if ( backtracking > 0 ) {failed = true; return <ruleReturnValue()>;}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// $ANTLR start <ruleName>
-// <fileName>:<description>
-public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [1]
-{   
-    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try 
-	{
-	    <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    catch (RecognitionException re) 
-	{
-        ReportError(re);
-        Recover(input,re);
-    }<\n>
-<endif>
-<endif>
-<endif>
-    finally 
-	{
-        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-// $ANTLR end <ruleName>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) 
-{
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = new <returnType()>();
-retval.start = input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.Index();
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.Push(new <it>_scope());}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.Push(new <it.name>_scope());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.Pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.Pop();}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{<labelType> <it.label.text> = null;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{IList list_<it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{<labelType> <it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{IList list_<it.label.text> = null;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = input.LT(-1);<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( backtracking > 0 ) 
-{
-	Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); 
-}
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start <ruleName> 
-public void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [2]
-{
-    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleDeclarations()>
-    try 
-	{
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        int _type = <ruleName>;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        this.type = _type;
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    finally 
-	{
-        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-    }
-}
-// $ANTLR end <ruleName>
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-override public void mTokens() // throws RecognitionException 
-{
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber> = <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) 
-{
-    <alts:altSwitchCase()>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber> = <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) 
-{
-    <alts:altSwitchCase()>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int cnt<decisionNumber> = 0;
-<decls>
-<@preloop()>
-do 
-{
-    int alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) 
-	{
-		<alts:altSwitchCase()>
-		default:
-		    if ( cnt<decisionNumber> >= 1 ) goto loop<decisionNumber>;
-		    <ruleBacktrackFailure()>
-	            EarlyExitException eee =
-	                new EarlyExitException(<decisionNumber>, input);
-	            <@earlyExitException()>
-	            throw eee;
-    }
-    cnt<decisionNumber>++;
-} while (true);
-
-loop<decisionNumber>:
-	;	// Stops C# compiler whinging that label 'loop<decisionNumber>' has no statements
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-do 
-{
-    int alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) 
-	{
-		<alts:altSwitchCase()>
-		default:
-		    goto loop<decisionNumber>;
-    }
-} while (true);
-
-loop<decisionNumber>:
-	;	// Stops C# compiler whinging that label 'loop<decisionNumber>' has no statements
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-case <i> :
-    <@prealt()>
-    <it>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
-// <fileName>:<description>
-{
-	<@declarations()>
-	<elements:element()>
-	<@cleanup()>
-}
->>
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-<@prematch()>
-<it.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label> = (<labelType>)input.LT(1);<\n>
-<endif>
-Match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label> == null) list_<label> = new ArrayList();
-list_<label>.Add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-Match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= input.LA(1);<\n>
-<else>
-<label> = (<labelType>)input.LT(1);<\n>
-<endif>
-<endif>
-if ( <s> ) 
-{
-    input.Consume();
-    <postmatchCode>
-<if(!LEXER)>
-    errorRecovery = false;
-<endif>
-    <if(backtracking)>failed = false;<endif>
-}
-else 
-{
-    <ruleBacktrackFailure()>
-    MismatchedSetException mse =
-        new MismatchedSetException(null,input);
-    <@mismatchedSetException()>
-<if(LEXER)>
-    Recover(mse);
-<else>
-    RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
-<endif>
-    throw mse;
-}<\n>
->>
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label) ::= <<
-<if(label)>
-int <label>Start = CharIndex;
-Match(<string>); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, CharIndex-1);
-<else>
-Match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label> = (<labelType>)input.LT(1);<\n>
-<endif>
-MatchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
- */
-ruleRef(rule,label,elementIndex,args) ::= <<
-PushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
-<if(label)>
-<label> = <rule>(<args; separator=", ">);<\n>
-<else>
-<rule>(<args; separator=", ">);<\n>
-<endif>
-followingStackPointer_--;
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
-<else>
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;
-Match(EOF); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
-<else>
-Match(EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1) == Token.DOWN )
-{
-    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) 
-{
-    <ruleBacktrackFailure()>
-    throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else 
-{
-<if(eotPredictsAlt)>
-    alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else 
-{
-    alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>)
-{
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) 
-{
-<edges; separator="\n">
-	default:
-<if(eotPredictsAlt)>
-    	alt<decisionNumber> = <eotPredictsAlt>;
-    	break;
-<else>
-	    <ruleBacktrackFailure()>
-	    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
-	        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-	    <@noViableAltException()>
-	    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) 
-{
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) 
-{
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-	default:
-    	alt<decisionNumber> = <eotPredictsAlt>;
-    	break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{case <it>:}; separator="\n">
-	{
-    <targetState>
-    }
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
->>
-
-/* Dump DFA tables.
- */
-cyclicDFA(dfa) ::= <<
-static readonly short[] DFA<dfa.decisionNumber>_eot = {
-    <dfa.eot:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-static readonly short[] DFA<dfa.decisionNumber>_eof = {
-    <dfa.eof:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-static readonly int[] DFA<dfa.decisionNumber>_min = {
-    <dfa.min:{n|<n>}; null="0", wrap="\n", separator=", ">
-    };
-static readonly int[] DFA<dfa.decisionNumber>_max = {
-    <dfa.max:{n|<n>}; null="0", wrap="\n", separator=", ">
-    };
-static readonly short[] DFA<dfa.decisionNumber>_accept = {
-    <dfa.accept:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-static readonly short[] DFA<dfa.decisionNumber>_special = {
-    <dfa.special:{n|<n>}; null="-1", wrap="\n", separator=", ">
-    };
-
-static readonly short[] dfa<dfa.decisionNumber>_transition_null = null;
-<dfa.edgeTransitionClassMap.keys:{table |
-static readonly short[] dfa<dfa.decisionNumber>_transition<i0> = \{
-	<table; separator=", ", wrap="\n    ", null="-1">
-	\};}>
-
-static readonly short[][] DFA<dfa.decisionNumber>_transition = {
-	<dfa.transitionEdgeTables:{whichTable|dfa<dfa.decisionNumber>_transition<whichTable>}; null="_null", separator=",\n">
-    };
-
-protected class DFA<dfa.decisionNumber> : DFA
-{
-    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) 
-    {
-        this.recognizer = recognizer;
-        this.decisionNumber = <dfa.decisionNumber>;
-        this.eot = DFA<dfa.decisionNumber>_eot;
-        this.eof = DFA<dfa.decisionNumber>_eof;
-        this.min = DFA<dfa.decisionNumber>_min;
-        this.max = DFA<dfa.decisionNumber>_max;
-        this.accept     = DFA<dfa.decisionNumber>_accept;
-        this.special    = DFA<dfa.decisionNumber>_special;
-        this.transition = DFA<dfa.decisionNumber>_transition;
-    }
-
-    override public string Description
-    {
-        get { return "<dfa.description>"; }
-    }
-
-    <@errorMethod()>
-}<\n>
-<if(dfa.specialStateSTs)>
-
-protected internal int DFA<dfa.decisionNumber>_SpecialStateTransition(DFA dfa, int s, IIntStream input) //throws NoViableAltException
-{
-	int _s = s;
-    switch ( s )
-    {
-    <dfa.specialStateSTs:{state |
-       	case <i0> : <! compressed special state numbers 0..n-1 !>
-           	<state>}; separator="\n">
-    }
-<if(backtracking)>
-    if (backtracking > 0) {failed = true; return -1;}<\n>
-<endif>
-    NoViableAltException nvae =
-        new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
-    dfa.Error(nvae);
-    throw nvae;
-}<\n>
-<endif>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-int index<decisionNumber>_<stateNumber> = input.Index();
-input.Rewind();<\n>
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.Seek(index<decisionNumber>_<stateNumber>);<\n>
-<endif>
-if ( s >= 0 ) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>) { s = <targetStateNumber>; }<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left> && <right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | || <o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "<pred>"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>) == <atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber> \>= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) \>= <lower> && input.LA(<k>) \<= <upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\" || \">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected class <scope.name>_scope 
-{
-    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected class <scope.name>_scope 
-{
-    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.name>_return
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Generate the C# type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<csharpTypeInitMap.(typeName)>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-public class <returnType()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope 
-{
-    <scope.attributes:{public <it.decl>;}; separator="\n">
-    <@ruleReturnMembers()>
-};
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name>
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack[<index>]).<attr.name>
-<else>
-((<scope>_scope)<scope>_stack.Peek()).<attr.name>
-<endif>
-<endif>
->>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name> = <expr>;
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack[<index>]).<attr.name> = <expr>;
-<else>
-((<scope>_scope)<scope>_stack.Peek()).<attr.name> = <expr>;
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> = <expr>;
-<else>
-<attr.name> = <expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.Text"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.Type"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.Line"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.CharPositionInLine"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.Channel"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.TokenIndex"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(<scope>.start),
-  input.TreeAdaptor.GetTokenStopIndex(<scope>.start) )
-<else>
-input.ToString(<scope>.start,<scope>.stop)
-<endif>
->>
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.Type"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.Line"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.CharPositionInLine"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.Channel"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.TokenIndex"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.Text"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(retval.start),
-  input.TreeAdaptor.GetTokenStopIndex(retval.start) )
-<else>
-input.ToString(retval.start,input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "Text"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "channel"
-lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree = <expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st = <expr>;"
-
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if ( <actions.(actionScope).synpredgate> )
-{
-  <action>
-}
-<else>
-if ( backtracking == 0 ) 
-{
-  <action>
-}
-<endif>
-<else>
-<action>
-<endif>
->>
-
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{<it>UL};separator=",">});<\n>
->>
-
-codeFileExtension() ::= ".cs"
-
-true() ::= "true"
-false() ::= "false"
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group CSharp2 implements ANTLRCore;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs, 
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@debugPreprocessor()>
+// The variable 'variable' is assigned but its value is never used.
+#pragma warning disable 168, 219
+// Unreachable code detected.
+#pragma warning disable 162
+
+<actions.(actionScope).header>
+
+<@imports>
+using System;
+using Antlr.Runtime;
+<if(TREE_PARSER)>
+using Antlr.Runtime.Tree;
+<endif>
+using IList 		= System.Collections.IList;
+using ArrayList 	= System.Collections.ArrayList;
+using Stack 		= Antlr.Runtime.Collections.StackList;
+
+<if(backtracking)>
+using IDictionary	= System.Collections.IDictionary;
+using Hashtable 	= System.Collections.Hashtable;
+<endif>
+
+<@end>
+
+<if(actions.(actionScope).namespace)>
+namespace <actions.(actionScope).namespace>
+{
+<endif>
+
+<docComment>
+<recognizer>
+<if(actions.(actionScope).namespace)>
+}
+<endif>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="IToken",
+      filterMode, superClass="Lexer") ::= <<
+public partial class <grammar.recognizerName> : <@superClassName><superClass><@end> {
+    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <actions.lexer.members>
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    public <grammar.recognizerName>() <! needed by subclasses !>
+    {
+		InitializeCyclicDFAs();
+    }
+    public <grammar.recognizerName>(ICharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: this(input, null<grammar.delegators:{g|, <g:delegateName()>}>) {
+    }
+    public <grammar.recognizerName>(ICharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: base(input, state) {
+		InitializeCyclicDFAs(); <! Necessary in C#??? Not removed yet. !>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        state.ruleMemo = new Hashtable[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
+<endif>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+        <grammar.delegators:
+         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+    }
+    
+    override public string GrammarFileName
+    {
+    	get { return "<fileName>";} 
+    }
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+   	<synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+	private void InitializeCyclicDFAs(<@debugInitializeCyclicDFAs()>)
+	{
+	    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<@debugAddition()>);}; separator="\n">
+	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
+	}
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+    
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+override public IToken NextToken() 
+{
+    while (true) 
+	{
+        if ( input.LA(1) == (int)CharStreamConstants.EOF ) 
+		{
+            return Token.EOF_TOKEN;
+        }
+
+	    state.token = null;
+		state.channel = Token.DEFAULT_CHANNEL;
+        state.tokenStartCharIndex = input.Index();
+        state.tokenStartCharPositionInLine = input.CharPositionInLine;
+        state.tokenStartLine = input.Line;
+	    state.text = null;
+        try 
+		{
+            int m = input.Mark();
+            state.backtracking = 1; <! means we won't throw slow exception !>
+            state.failed = false;
+            mTokens();
+            state.backtracking = 0;
+<!
+			mTokens backtracks with synpred at backtracking==2
+            and we set the synpredgate to allow actions at level 1. 
+!>
+            if ( state.failed ) 
+			{
+	            input.Rewind(m);
+                input.Consume(); <! // advance one char and try again !>
+            }
+            else 
+			{
+				Emit();
+                return state.token;
+            }
+        }
+        catch (RecognitionException re) 
+		{
+            // shouldn't happen in backtracking mode, but...
+            ReportError(re);
+            Recover(re);
+        }
+    }
+}
+
+override public void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
+{
+	if ( state.backtracking > 1 ) 
+		base.Memoize(input, ruleIndex, ruleStartIndex);
+}
+
+override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+	if ( state.backtracking>1 ) 
+		return base.AlreadyParsedRule(input, ruleIndex);
+	return false;
+}
+>>
+
+actionGate() ::= "(state.backtracking==0)"
+
+filteringActionGate() ::= "(state.backtracking == 1)"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="object", labelType, members, rewriteElementType) ::= <<
+public partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+<if(grammar.grammarIsRoot)>
+    public static readonly string[] tokenNames = new string[] 
+	{
+        "\<invalid>", 
+		"\<EOR>", 
+		"\<DOWN>", 
+		"\<UP>", 
+		<tokenNames; separator=", \n">
+    };<\n>
+<endif>
+
+    <tokens:{public const int <it.name> = <it.type>;}; separator="\n">
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    <@members>
+    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+
+    public <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>) {
+    }
+
+    public <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: base(input, state) {
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">         
+        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+    }
+    <@end>
+
+    override public string[] TokenNames {
+		get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
+    }
+
+    override public string GrammarFileName {
+		get { return "<fileName>"; }
+    }
+
+    <members>
+
+    <rules; separator="\n\n">
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor|
+    public <returnType()> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException 
+    \{
+    	<if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); 
+    \}}; separator="\n">
+
+   	<synpreds:{p | <synpred(p)>}>
+
+   	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+	private void InitializeCyclicDFAs(<@debugInitializeCyclicDFAs()>)
+	{
+    	<cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<@debugAddition()>);}; separator="\n">
+	    <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>this.dfa<dfa.decisionNumber>.specialStateTransitionHandler = new DFA.SpecialStateTransitionHandler(DFA<dfa.decisionNumber>_SpecialStateTransition);<endif>}; separator="\n">
+	}
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+}
+>>
+
+parserCtorBody() ::= <<
+<@initializeCyclicDFAs>InitializeCyclicDFAs();<@end>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new Hashtable[<length(grammar.allImportedRules)>+1];<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="IToken", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start "<ruleName>"
+public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
+    <ruleLabelDefs()>
+<if(trace)>
+    TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try
+    {
+        <block>
+    }
+    finally
+    {
+        TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+}
+// $ANTLR end "<ruleName>"
+>>
+
+synpredDecls(name) ::= <<
+SynPredPointer <name>;<\n>
+>>
+
+synpred(name) ::= <<
+public bool <name>() 
+{
+    state.backtracking++;
+    <@start()>
+    int start = input.Mark();
+    try 
+    {
+        <name>_fragment(); // can never throw exception
+    }
+    catch (RecognitionException re) 
+    {
+        Console.Error.WriteLine("impossible: "+re);
+    }
+    bool success = !state.failed;
+    input.Rewind(start);
+    <@stop()>
+    state.backtracking--;
+    state.failed = false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( (state.backtracking > 0) && AlreadyParsedRule(input, <ruleDescriptor.index>) ) 
+{
+	return <ruleReturnValue()>; 
+}
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if ( state.backtracking > 0 ) {state.failed = true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [1]
+{   
+    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try 
+	{
+	    <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (RecognitionException re) 
+	{
+        ReportError(re);
+        Recover(input,re);
+	<@setErrorReturnValue()>
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally 
+	{
+        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+// $ANTLR end "<ruleName>"
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) 
+{
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = new <returnType()>();
+retval.Start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.Index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Push(new <it.name>_scope());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{<labelType> <it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{IList list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|<ll:ruleLabelDef(label=it)> <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{IList list_<it.label.text> = null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.Stop = input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( state.backtracking > 0 ) 
+{
+	Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); 
+}
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+public void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) // throws RecognitionException [2]
+{
+ 	<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+	<if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+		try
+		{
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        int _type = <ruleName>;
+	int _channel = DEFAULT_TOKEN_CHANNEL;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        state.type = _type;
+        state.channel = _channel;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally 
+	{
+        <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+override public void mTokens() // throws RecognitionException 
+{
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) 
+{
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) 
+{
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber> = 0;
+<decls>
+<@preloop()>
+do 
+{
+    int alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+	{
+		<alts:altSwitchCase()>
+		default:
+		    if ( cnt<decisionNumber> >= 1 ) goto loop<decisionNumber>;
+		    <ruleBacktrackFailure()>
+	            EarlyExitException eee<decisionNumber> =
+	                new EarlyExitException(<decisionNumber>, input);
+	            <@earlyExitException()>
+	            throw eee<decisionNumber>;
+    }
+    cnt<decisionNumber>++;
+} while (true);
+
+loop<decisionNumber>:
+	;	// Stops C# compiler whining that label 'loop<decisionNumber>' has no statements
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+do 
+{
+    int alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) 
+	{
+		<alts:altSwitchCase()>
+		default:
+		    goto loop<decisionNumber>;
+    }
+} while (true);
+
+loop<decisionNumber>:
+	;	// Stops C# compiler whining that label 'loop<decisionNumber>' has no statements
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+	<@declarations()>
+	<elements:element()>
+	<rew>
+	<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=(<labelType>)<endif>Match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> == null) list_<label> = new ArrayList();
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) 
+{
+    input.Consume();
+    <postmatchCode>
+<if(!LEXER)>
+    state.errorRecovery = false;
+<endif>
+    <if(backtracking)>state.failed = false;<endif>
+}
+else 
+{
+    <ruleBacktrackFailure()>
+    MismatchedSetException mse = new MismatchedSetException(null,input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    Recover(mse);
+    throw mse;
+<else>
+    throw mse;
+    <! use following code to make it recover inline; remove throw mse;
+    RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int <label>Start = CharIndex;
+Match(<string>); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, CharIndex-1);
+<else>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> = (<labelType>)input.LT(1);<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)>
+<label> = <if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+<else>
+<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+<endif>
+state.followingStackPointer--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+Match(EOF); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, CharIndex-1);
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1) == Token.DOWN )
+{
+    Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) 
+{
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else 
+{
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
+        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <@noViableAltException()>
+    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else 
+{
+    alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>)
+{
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+<edges; separator="\n">
+	default:
+<if(eotPredictsAlt)>
+    	alt<decisionNumber> = <eotPredictsAlt>;
+    	break;
+<else>
+	    <ruleBacktrackFailure()>
+	    NoViableAltException nvae_d<decisionNumber>s<stateNumber> =
+	        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+	    <@noViableAltException()>
+	    throw nvae_d<decisionNumber>s<stateNumber>;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) 
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+	default:
+    	alt<decisionNumber> = <eotPredictsAlt>;
+    	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+	{
+    <targetState>
+    }
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
+>>
+
+/* Dump DFA tables.
+ */
+cyclicDFA(dfa) ::= <<
+const string DFA<dfa.decisionNumber>_eotS =
+    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_eofS =
+    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_minS =
+    "<dfa.javaCompressedMin; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_maxS =
+    "<dfa.javaCompressedMax; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_acceptS =
+    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">";
+const string DFA<dfa.decisionNumber>_specialS =
+    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>";
+static readonly string[] DFA<dfa.decisionNumber>_transitionS = {
+        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+};
+
+static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+static readonly short[][] DFA<dfa.decisionNumber>_transition = DFA.UnpackEncodedStringArray(DFA<dfa.decisionNumber>_transitionS);
+
+protected class DFA<dfa.decisionNumber> : DFA
+{
+    <@debugMember()>
+    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer)
+    {
+        this.recognizer = recognizer;
+        this.decisionNumber = <dfa.decisionNumber>;
+        this.eot = DFA<dfa.decisionNumber>_eot;
+        this.eof = DFA<dfa.decisionNumber>_eof;
+        this.min = DFA<dfa.decisionNumber>_min;
+        this.max = DFA<dfa.decisionNumber>_max;
+        this.accept = DFA<dfa.decisionNumber>_accept;
+        this.special = DFA<dfa.decisionNumber>_special;
+        this.transition = DFA<dfa.decisionNumber>_transition;
+
+    }
+    <@dbgCtor()>
+
+    override public string Description
+    {
+        get { return "<dfa.description>"; }
+    }
+
+    <@errorMethod()>
+}<\n>
+<if(dfa.specialStateSTs)>
+
+protected internal int DFA<dfa.decisionNumber>_SpecialStateTransition(DFA dfa, int s, IIntStream _input) //throws NoViableAltException
+{
+        <if(LEXER)>
+        IIntStream input = _input;
+        <endif>
+        <if(PARSER)>
+        ITokenStream input = (ITokenStream)_input;
+        <endif>
+        <if(TREE_PARSER)>
+        ITreeNodeStream input = (ITreeNodeStream)_input;
+        <endif>
+	int _s = s;
+    switch ( s )
+    {
+    <dfa.specialStateSTs:{state |
+       	case <i0> : <! compressed special state numbers 0..n-1 !>
+           	<state>}; separator="\n">
+    }
+<if(backtracking)>
+    if (state.backtracking > 0) {state.failed = true; return -1;}<\n>
+<endif>
+    NoViableAltException nvae<dfa.decisionNumber> =
+        new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
+    dfa.Error(nvae<dfa.decisionNumber>);
+    throw nvae<dfa.decisionNumber>;
+}<\n>
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+int index<decisionNumber>_<stateNumber> = input.Index();
+input.Rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.Seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s >= 0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>) <endif>) { s = <targetStateNumber>; }<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left> && <right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | || <o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber> \>= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) \>= <lower> && input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope 
+{
+    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope 
+{
+    <scope.attributes:{protected internal <it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+default(<typeName>)
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public class <ruleDescriptor:returnStructName()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope
+{
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+};
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack[<scope>_stack.Count-<negIndex>-1]).<attr.name> = <expr>;
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name> = <expr>;
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name> = <expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+((<scope> != null) ? <scope>.<attr.name> : <initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> = <expr>;
+<else>
+<attr.name> = <expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "((<scope> != null) ? <scope>.Text : null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "((<scope> != null) ? <scope>.Type : 0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "((<scope> != null) ? <scope>.Line : 0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "((<scope> != null) ? <scope>.CharPositionInLine : 0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "((<scope> != null) ? <scope>.Channel : 0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "((<scope> != null) ? <scope>.TokenIndex : 0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "((<scope> != null) ? ((<labelType>)<scope>.Start) : null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "((<scope> != null) ? ((<labelType>)<scope>.Stop) : null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "((<scope> != null) ? ((<ASTLabelType>)<scope>.Tree) : null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+((<scope> != null) ? input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
+  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start)) : null)
+<else>
+((<scope> != null) ? input.ToString((IToken)(<scope>.Start),(IToken)(<scope>.Stop)) : null)
+<endif>
+>>
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> != null) ? <scope>.ST : null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "((<scope> != null) ? <scope>.Type : 0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "((<scope> != null) ? <scope>.Line : 0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "((<scope> != null) ? <scope>.CharPositionInLine : -1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "((<scope> != null) ? <scope>.Channel : 0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "((<scope> != null) ? <scope>.TokenIndex : 0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "((<scope> != null) ? <scope>.Text : null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.Start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.Stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.Tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start) )
+<else>
+input.ToString((IToken)retval.Start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.ST"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.Tree = <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.ST = <expr>;"
+
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+  <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{<it>UL};separator=",">});<\n>
+>>
+
+codeFileExtension() ::= ".cs"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/src/org/antlr/codegen/templates/CSharp/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
similarity index 50%
rename from src/org/antlr/codegen/templates/CSharp/Dbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
index f000d01..c65995c 100644
--- a/src/org/antlr/codegen/templates/CSharp/Dbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
@@ -1,192 +1,300 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template overrides to add debugging to normal Java output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-group Dbg;
-
- at outputFile.imports() ::= <<
-<@super.imports()>
-using Antlr.Runtime.Debug;
->>
-
- at genericParser.members() ::= <<
-public static readonly string[] ruleNames = new string[] {
-    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
-};<\n>
-public int ruleLevel = 0;
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-public <name>(<inputStreamType> input) : <if(profile)>this(input, new Profiler(null))<else>base(input)<endif>
-{
-<if(profile)>
-        Profiler p = (Profiler)dbg;
-        p.Parser = this;
-<endif><\n>
-<if(memoize)>
-    ruleMemo = new IDictionary[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-}
-<if(profile)>
-override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
-{
-    ((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
-    return base.AlreadyParsedRule(input, ruleIndex);
-}<\n>
-override public void Memoize(IIntStream input,
-                    int ruleIndex,
-                    int ruleStartIndex)
-{
-    ((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
-    base.Memoize(input, ruleIndex, ruleStartIndex);
-}<\n>
-<endif>
-public <name>(<inputStreamType> input, IDebugEventListener dbg)
-	: base(input, dbg)
-{
-}<\n>
-protected bool EvalPredicate(bool result, string predicate) 
-{
-    dbg.SemanticPredicate(result, predicate);
-    return result;
-}<\n>
->>
-
- at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
-
- at rule.preamble() ::= <<
-try 
-{
-	dbg.EnterRule("<ruleName>");
-	if ( ruleLevel==0 ) {dbg.Commence();}
-	ruleLevel++;
-	dbg.Location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
->>
-
- at rule.postamble() ::= <<
-dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
-}
-finally
-{
-    dbg.ExitRule("<ruleName>");
-    ruleLevel--;
-    if ( ruleLevel==0 ) {dbg.Terminate();}
-}<\n>
->>
-
- at synpred.start() ::= "dbg.BeginBacktrack(backtracking);"
-
- at synpred.stop() ::= "dbg.EndBacktrack(backtracking, success);"
-
-// Common debug event triggers used by region overrides below
-
-enterSubRule() ::=
-    "try { dbg.EnterSubRule(<decisionNumber>);<\n>"
-
-exitSubRule() ::=
-    "} finally { dbg.ExitSubRule(<decisionNumber>); }<\n>"
-
-enterDecision() ::=
-    "try { dbg.EnterDecision(<decisionNumber>);<\n>"
-
-exitDecision() ::=
-    "} finally { dbg.ExitDecision(<decisionNumber>); }<\n>"
-
-enterAlt(n) ::= "dbg.EnterAlt(<n>);<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
- at block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
- at block.postdecision() ::= "<exitDecision()>"
-
- at block.postbranch() ::= "<exitSubRule()>"
-
- at ruleBlock.predecision() ::= "<enterDecision()>"
-
- at ruleBlock.postdecision() ::= "<exitDecision()>"
-
- at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
- at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
- at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
- at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
- at positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
- at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
- at positiveClosureBlock.earlyExitException() ::=
-    "dbg.RecognitionException(eee);<\n>"
-
- at closureBlock.preloop() ::= "<enterSubRule()>"
-
- at closureBlock.postloop() ::= "<exitSubRule()>"
-
- at closureBlock.predecision() ::= "<enterDecision()>"
-
- at closureBlock.postdecision() ::= "<exitDecision()>"
-
- at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
-
- at element.prematch() ::=
-    "dbg.Location(<it.line>,<it.pos>);"
-
- at matchSet.mismatchedSetException() ::=
-    "dbg.RecognitionException(mse);"
-
- at dfaState.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
-
- at dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
-
-dfaDecision(decisionNumber,description) ::= <<
-try 
-{
-    isCyclicDecision = true;
-    <super.dfaDecision(...)>
-}
-catch (NoViableAltException nvae) 
-{
-    dbg.RecognitionException(nvae);
-    throw nvae;
-}
->>
-
- at cyclicDFA.errorMethod() ::= <<
-public virtual void Error(NoViableAltException nvae) 
-{
-    dbg.RecognitionException(nvae);
-}
->>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-EvalPredicate(<pred>,"<description>")
->>
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at outputFile.debugPreprocessor() ::= "#define ANTLR_DEBUG"
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.Runtime.Debug;
+using IOException = System.IO.IOException;
+>>
+
+ at genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+public static readonly string[] ruleNames = new string[] {
+    "invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n    ", separator=", ">
+};<\n>
+<endif>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+    private int ruleLevel = 0;
+    public int RuleLevel {
+	get { return ruleLevel; }
+    }
+    public void IncRuleLevel() { ruleLevel++; }
+    public void DecRuleLevel() { ruleLevel--; }
+<if(profile)>
+    <ctorForProfilingRootGrammar()>
+<else>
+    <ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else> <! imported grammar !>
+    public int RuleLevel {
+	get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; }
+    }
+    public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
+    public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
+    <ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+override public bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+    ((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    return base.AlreadyParsedRule(input, ruleIndex);
+}<\n>
+override public void Memoize(IIntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    base.Memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+protected bool EvalPredicate(bool result, string predicate) 
+{
+    dbg.SemanticPredicate(result, predicate);
+    return result;
+}<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+public <name>(<inputStreamType> input)
+    : this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState()) {
+}
+
+public <name>(<inputStreamType> input, int port, RecognizerSharedState state)
+    : base(input, state) {
+    <createListenerAndHandshake()>
+    <parserCtorBody()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+>>
+
+ at parserCtorBody.initializeCyclicDFAs() ::= <<
+InitializeCyclicDFAs(dbg);
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+    this(input, new Profiler(null), new RecognizerSharedState());
+}
+
+public <name>(<inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state)
+    : base(input, dbg, state) {
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}
+<\n>
+>>
+
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+public <name>(<inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+    : base(input, dbg, state) {
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>(<inputStreamType> input, IDebugEventListener dbg)
+    : <@superClassRef>base(input, dbg, new RecognizerSharedState())<@end> {
+<if(profile)>
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+<endif>
+    <parserCtorBody()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy dbg = new DebugEventSocketProxy(this, port, input.TreeAdaptor);
+<else>
+DebugEventSocketProxy dbg = new DebugEventSocketProxy(this, port, null);
+<endif>
+DebugListener = dbg;
+try
+{
+    dbg.Handshake();
+}
+catch (IOException ioe)
+{
+    ReportError(ioe);
+}
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.preamble() ::= <<
+try {
+	dbg.EnterRule(GrammarFileName, "<ruleName>");
+	if ( RuleLevel==0 ) {dbg.Commence();}
+	IncRuleLevel();
+	dbg.Location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
+>>
+
+ at lexer.debugInitializeCyclicDFAs() ::= "IDebugEventListener dbg"
+
+ at lexer.debugAddition() ::= ", dbg"
+
+ at genericParser.debugInitializeCyclicDFAs() ::= "IDebugEventListener dbg"
+
+ at genericParser.debugAddition() ::= ", dbg"
+
+ at rule.postamble() ::= <<
+dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
+}
+finally {
+    dbg.ExitRule(GrammarFileName, "<ruleName>");
+    DecRuleLevel();
+    if ( RuleLevel==0 ) {dbg.Terminate();}
+}<\n>
+>>
+
+ at synpred.start() ::= "dbg.BeginBacktrack(state.backtracking);"
+
+ at synpred.stop() ::= "dbg.EndBacktrack(state.backtracking, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { dbg.EnterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally { dbg.ExitSubRule(<decisionNumber>); }<\n>"
+
+enterDecision() ::=
+    "try { dbg.EnterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally { dbg.ExitDecision(<decisionNumber>); }<\n>"
+
+enterAlt(n) ::= "dbg.EnterAlt(<n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+    "dbg.RecognitionException(eee<decisionNumber>);<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "dbg.Location(<it.line>,<it.pos>);"
+
+ at matchSet.mismatchedSetException() ::=
+    "dbg.RecognitionException(mse);"
+
+ at dfaState.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
+
+ at dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException(nvae_d<decisionNumber>s<stateNumber>);"
+
+dfaDecision(decisionNumber,description) ::= <<
+try 
+{
+    isCyclicDecision = true;
+    <super.dfaDecision(...)>
+}
+catch (NoViableAltException nvae) 
+{
+    dbg.RecognitionException(nvae);
+    throw nvae;
+}
+>>
+
+ at cyclicDFA.dbgCtor() ::= <<
+    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer, IDebugEventListener dbg) : this(recognizer)
+    {
+		this.dbg = dbg;
+    }
+>> 
+
+ at cyclicDFA.debugMember() ::= <<
+IDebugEventListener dbg;
+
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+public override void Error(NoViableAltException nvae) 
+{
+    dbg.RecognitionException(nvae);
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+EvalPredicate(<pred>,"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/C/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg
similarity index 62%
rename from src/org/antlr/codegen/templates/C/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg
index b0e5c41..c61b8a1 100644
--- a/src/org/antlr/codegen/templates/C/ST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg
@@ -1,6 +1,8 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -32,75 +34,83 @@ group ST;
 
 @outputFile.imports() ::= <<
 <@super.imports()>
-import org.antlr.stringtemplate.*;
-import org.antlr.stringtemplate.language.*;
-import java.util.HashMap;
+using Antlr.StringTemplate;
+using Antlr.StringTemplate.Language;
+<if(!backtracking)>
+using Hashtable = System.Collections.Hashtable;
+<endif>
+
 >>
 
 /** Add this to each rule's return value struct */
 @returnScope.ruleReturnMembers() ::= <<
-public StringTemplate st;
-public StringTemplate getTemplate() { return st; }
-public String toString() { return st==null?null:st.toString(); }
+private StringTemplate st;
+public StringTemplate ST    { get { return st; } set { st = value; } }
+public override object Template 		{ get { return st; } }
+public override string ToString() 		{ return (st == null) ? null : st.ToString(); }
 >>
 
 @genericParser.members() ::= <<
 <@super.members()>
 protected StringTemplateGroup templateLib =
-  new StringTemplateGroup("<name>Templates", AngleBracketTemplateLexer.class);
+  new StringTemplateGroup("<name>Templates", typeof(AngleBracketTemplateLexer));
 
-public void setTemplateLib(StringTemplateGroup templateLib) {
-  this.templateLib = templateLib;
-}
-public StringTemplateGroup getTemplateLib() {
-  return templateLib;
+public StringTemplateGroup TemplateLib
+{
+ 	get { return this.templateLib; }
+ 	set { this.templateLib = value; }
 }
-/** allows convenient multi-value initialization:
- *  "new STAttrMap().put(...).put(...)"
- */
-public static class STAttrMap extends HashMap {
-  public STAttrMap put(String attrName, Object value) {
-    super.put(attrName, value);
+
+/// \<summary> Allows convenient multi-value initialization:
+///  "new STAttrMap().Add(...).Add(...)"
+/// \</summary>
+protected class STAttrMap : Hashtable
+{
+  public STAttrMap Add(string attrName, object value) 
+  {
+    base.Add(attrName, value);
     return this;
   }
-  public STAttrMap put(String attrName, int value) {
-    super.put(attrName, new Integer(value));
+  public STAttrMap Add(string attrName, int value) 
+  {
+    base.Add(attrName, value);
     return this;
   }
 }
 >>
 
 /** x+=rule when output=template */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
-<listLabel(elem=label+".getTemplate()",...)>
+<listLabel(elem=label+".Template",...)>
 >>
 
 rewriteTemplate(alts) ::= <<
 
 // TEMPLATE REWRITE
 <if(backtracking)>
-if ( backtracking==0 ) {
+if ( <actions.(actionScope).synpredgate> )
+{
   <alts:rewriteTemplateAlt(); separator="else ">
-  <if(rewrite)><replaceTextInLine()><endif>
+  <if(rewriteMode)><replaceTextInLine()><endif>
 }
 <else>
 <alts:rewriteTemplateAlt(); separator="else ">
-<if(rewrite)><replaceTextInLine()><endif>
+<if(rewriteMode)><replaceTextInLine()><endif>
 <endif>
 >>
 
 replaceTextInLine() ::= <<
 <if(TREE_PARSER)>
-((TokenRewriteStream)input.getTokenStream()).replace(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start),
-  retval.st);
+((TokenRewriteStream)input.TokenStream).Replace(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start),
+  retval.ST);
 <else>
-((TokenRewriteStream)input).replace(
-  ((Token)retval.start).getTokenIndex(),
-  input.LT(-1).getTokenIndex(),
-  retval.st);
+((TokenRewriteStream)input).Replace(
+  ((IToken)retval.Start).TokenIndex,
+  input.LT(-1).TokenIndex,
+  retval.ST);
 <endif>
 >>
 
@@ -108,11 +118,11 @@ rewriteTemplateAlt() ::= <<
 // <it.description>
 <if(it.pred)>
 if (<it.pred>) {
-    retval.st = <it.alt>;
+    retval.ST = <it.alt>;
 }<\n>
 <else>
 {
-    retval.st = <it.alt>;
+    retval.ST = <it.alt>;
 }<\n>
 <endif>
 >>
@@ -128,22 +138,22 @@ null;
  *  template.
  */
 rewriteExternalTemplate(name,args) ::= <<
-templateLib.getInstanceOf("<name>"<if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+templateLib.GetInstanceOf("<name>"<if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
   <endif>)
 >>
 
 /** expr is a string expression that says what template to load */
 rewriteIndirectTemplate(expr,args) ::= <<
-templateLib.getInstanceOf(<expr><if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+templateLib.GetInstanceOf(<expr><if(args)>,
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
   <endif>)
 >>
 
 /** Invoke an inline template with a set of attribute name/value pairs */
 rewriteInlineTemplate(args, template) ::= <<
 new StringTemplate(templateLib, "<template>"<if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  new STAttrMap()<args:{a | .Add("<a.name>", <a.value>)}>
   <endif>)
 >>
 
@@ -154,7 +164,7 @@ rewriteAction(action) ::= <<
 
 /** An action has %st.attrName=expr; or %{st}.attrName=expr; */
 actionSetAttribute(st,attrName,expr) ::= <<
-(<st>).setAttribute("<attrName>",<expr>);
+(<st>).SetAttribute("<attrName>",<expr>);
 >>
 
 /** Translate %{stringExpr} */
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg
new file mode 100644
index 0000000..a3678f1
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg
@@ -0,0 +1,433 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+using Antlr.Runtime.Tree;
+using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;<\n>
+<endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+InitializeTreeAdaptor();
+if ( TreeAdaptor == null )
+	TreeAdaptor = new CommonTreeAdaptor();
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+// Implement this function in your helper file to use a custom tree adaptor
+partial void InitializeTreeAdaptor();
+ITreeAdaptor adaptor;
+
+public ITreeAdaptor TreeAdaptor
+{
+	get
+	{
+		return adaptor;
+	}
+	set
+	{
+		this.adaptor = value;
+		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+}
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+internal <ASTLabelType> tree;
+public override object Tree { get { return tree; } }
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+	:{RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+	:{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = (<ASTLabelType>)adaptor.Nil();<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.Add(<label>.Tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.Add(<label>.Tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+{
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.Nil();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+input.ReplaceChildren(adaptor.GetParent(retval.start),
+                      adaptor.GetChildIndex(retval.start),
+                      adaptor.GetChildIndex(_last),
+                      retval.tree);
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;<\n>
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;
+<endif>
+<endif>
+<if(backtracking)>
+}<\n>
+<endif>
+}
+
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+	:{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+	separator="\n"
+>
+<referencedWildcardListLabels
+	:{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+	separator="\n"
+>
+<referencedRuleLabels
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.tree:null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.HasNext}; separator="||">) )
+{
+	throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>)
+{
+	<a.alt>
+}<\n>
+<else>
+{
+	<a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.AddChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.Create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.NextNode()
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg
new file mode 100644
index 0000000..06f5875
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg
@@ -0,0 +1,100 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+group ASTDbg;
+
+parserMembers() ::= <<
+// Implement this function in your helper file to use a custom tree adaptor
+partial void InitializeTreeAdaptor();
+protected DebugTreeAdaptor adaptor;
+
+public ITreeAdaptor TreeAdaptor
+{
+	get
+	{
+		return adaptor;
+	}
+	set
+	{
+<if(grammar.grammarIsRoot)>
+		this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+<else>
+		this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
+<endif><\n>
+		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+}<\n>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, <if(TREE_PARSER)>input.TreeAdaptor<else>adaptor<endif> );
+DebugListener = proxy;
+<inputStreamType> = new Debug<inputStreamType>( input, proxy );
+try
+{
+	proxy.Handshake();
+}
+catch ( IOException ioe )
+{
+	ReportError( ioe );
+}
+>>
+
+ at ctorForRootGrammar.finally() ::= <<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+proxy.TreeAdaptor = adap;
+>>
+
+ at ctorForProfilingRootGrammar.finally() ::=<<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+proxy.TreeAdaptor = adap;
+>>
+
+ at ctorForPredefinedListener.superClassRef() ::= ": base( input, dbg )"
+
+ at ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)><! don't create new adaptor for delegates !>
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;<\n>
+<endif>
+>>
+
+ at rewriteElement.pregen() ::= "dbg.Location( <e.line>, <e.pos> );"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg
new file mode 100644
index 0000000..ffd0bd0
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg
@@ -0,0 +1,194 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+retval.tree = (<ASTLabelType>)adaptor.ErrorNode(input, retval.start, input.LT(-1), re);
+<! System.out.WriteLine("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.AddChild(root_0, <label>.Tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)adaptor.Create(<label>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+adaptor.SetTokenBoundaries(retval.tree, retval.start, retval.stop);
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg
new file mode 100644
index 0000000..7487954
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg
@@ -0,0 +1,300 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> _first_0 = null;
+<ASTLabelType> _last = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = (<ASTLabelType>)_first_0;
+if ( adaptor.GetParent(retval.tree)!=null && adaptor.IsNil( adaptor.getParent(retval.tree) ) )
+    retval.tree = (<ASTLabelType>)adaptor.GetParent(retval.tree);
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = null;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==TokenTypes.Down ) {
+    Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<treeLevel>, <label>.Tree);
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change NextToken to NextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.NextNode())
+<else>
+stream_<token>.NextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg
new file mode 100644
index 0000000..0d83e15
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg
@@ -0,0 +1,1509 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+group CSharp3 implements ANTLRCore;
+
+csharpVisibilityMap ::= [
+	"private":"private",
+	"protected":"protected",
+	"public":"public",
+	"fragment":"private",
+	default:"private"
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(	LEXER,PARSER,TREE_PARSER, actionScope, actions,
+			docComment, recognizer,
+			name, tokens, tokenNames, rules, cyclicDFAs,
+			bitsets, buildTemplate, buildAST, rewriteMode, profile,
+			backtracking, synpreds, memoize, numRules,
+			fileName, ANTLRVersion, generatedTimestamp, trace,
+			scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+// The variable 'variable' is assigned but its value is never used.
+#pragma warning disable 219
+// Unreachable code detected.
+#pragma warning disable 162
+
+<actions.(actionScope).header>
+
+<@imports>
+using System.Collections.Generic;
+using Antlr.Runtime;
+<if(TREE_PARSER)>
+using Antlr.Runtime.Tree;
+using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
+<endif>
+using Stack = System.Collections.Generic.Stack\<object>;
+using List = System.Collections.IList;
+using ArrayList = System.Collections.Generic.List\<object>;
+<if(backtracking)>
+using Map = System.Collections.IDictionary;
+using HashMap = System.Collections.Generic.Dictionary\<object, object>;
+<endif>
+<@end>
+
+<if(actions.(actionScope).namespace)>
+namespace <actions.(actionScope).namespace>
+{
+<endif>
+
+<docComment>
+<recognizer>
+<if(actions.(actionScope).namespace)>
+
+} // namespace <actions.(actionScope).namespace>
+
+<endif>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="CommonToken",
+      filterMode, superClass="Lexer") ::= <<
+[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
+[System.CLSCompliant(false)]
+public partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+	<tokens:{public const int <it.name>=<it.type>;}; separator="\n">
+	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<actions.lexer.members>
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+	public <grammar.recognizerName>() {}<! needed by subclasses !>
+	public <grammar.recognizerName>( ICharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+		: this( input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}> )
+	{
+	}
+	public <grammar.recognizerName>( ICharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+		: base( input, state )
+	{
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+		state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<numRules>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+		<grammar.directDelegates:
+		 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+		<grammar.delegators:
+		 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+	}
+	public override string GrammarFileName { get { return "<fileName>"; } }
+
+<if(filterMode)>
+	<filteringNextToken()>
+<endif>
+	<rules; separator="\n\n">
+
+	<insertLexerSynpreds(synpreds)>
+
+	#region DFA
+	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+
+	protected override void InitDFAs()
+	{
+		base.InitDFAs();
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>( this<if(dfa.specialStateSTs)>, new SpecialStateTransitionHandler( specialStateTransition<dfa.decisionNumber> )<endif> );}; separator="\n">
+	}
+
+	<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+	#endregion
+
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+public override IToken NextToken()
+{
+	for ( ; ;)
+	{
+		if ( input.LA(1)==CharStreamConstants.EndOfFile )
+		{
+			return Tokens.EndOfFile;
+		}
+		state.token = null;
+		state.channel = TokenChannels.Default;
+		state.tokenStartCharIndex = input.Index;
+		state.tokenStartCharPositionInLine = input.CharPositionInLine;
+		state.tokenStartLine = input.Line;
+		state.text = null;
+		try
+		{
+			int m = input.Mark();
+			state.backtracking=1;<! means we won't throw slow exception !>
+			state.failed=false;
+			mTokens();
+			state.backtracking=0;
+			<! mTokens backtracks with synpred at backtracking==2
+			   and we set the synpredgate to allow actions at level 1. !>
+			if ( state.failed )
+			{
+				input.Rewind(m);
+				input.Consume(); <! advance one char and try again !>
+			}
+			else
+			{
+				Emit();
+				return state.token;
+			}
+		}
+		catch ( RecognitionException re )
+		{
+			// shouldn't happen in backtracking mode, but...
+			ReportError(re);
+			Recover(re);
+		}
+	}
+}
+
+public override void Memoize( IIntStream input, int ruleIndex, int ruleStartIndex )
+{
+	if ( state.backtracking > 1 )
+		base.Memoize( input, ruleIndex, ruleStartIndex );
+}
+
+public override bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+	if ( state.backtracking > 1 )
+		return base.AlreadyParsedRule(input, ruleIndex);
+
+	return false;
+}
+>>
+
+actionGate() ::= "state.backtracking == 0"
+
+filteringActionGate() ::= "state.backtracking==1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              ASTLabelType="object", labelType, members, rewriteElementType,
+              filterMode) ::= <<
+[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
+[System.CLSCompliant(false)]
+public partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+<if(grammar.grammarIsRoot)>
+	internal static readonly string[] tokenNames = new string[] {
+		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+	};<\n>
+<endif>
+	<tokens:{public const int <it.name>=<it.type>;}; separator="\n">
+
+	// delegates
+	<grammar.delegates:
+		 {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	// delegators
+	<grammar.delegators:
+		 {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<@members>
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+public <grammar.recognizerName>( <inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+	: this( input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}> )
+{
+}
+public <grammar.recognizerName>( <inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+	: base( input, state )
+{
+	<parserCtorBody()>
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+	<grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+}
+	<@end>
+
+	public override string[] TokenNames { get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; } }
+	public override string GrammarFileName { get { return "<fileName>"; } }
+
+	<members>
+
+	#region Rules
+	<rules; separator="\n\n">
+	#endregion Rules
+
+<if(grammar.delegatedRules)>
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+	#region Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+	public <returnType()> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) <!throws RecognitionException !>\{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}; separator="\n">
+	#endregion Delegated rules<\n>
+<endif>
+
+	<insertSynpreds(synpreds)>
+
+<if(cyclicDFAs)>
+	#region DFA
+	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+
+	protected override void InitDFAs()
+	{
+		base.InitDFAs();
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>( this<if(dfa.specialStateSTs)>, new SpecialStateTransitionHandler( specialStateTransition<dfa.decisionNumber> )<endif> );}; separator="\n">
+	}
+
+	<cyclicDFAs:cyclicDFA()><! dump tables for all DFA !>
+	#endregion DFA<\n>
+<endif>
+
+<if(bitsets)>
+	#region Follow sets
+	private static class Follow
+	{
+		<bitsets:bitset(name={_<it.name>_in_<it.inName><it.tokenIndex>},
+							words64=it.bits)>
+	}
+	#endregion Follow sets<\n>
+<endif>
+}
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<length(grammar.allImportedRules)>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="object", superClass="Parser", labelType="IToken",
+       members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="IToken", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object",
+           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+           members={<actions.treeparser.members>},
+           filterMode) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+public <!final !>void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)<! throws RecognitionException!>
+{
+	<ruleLabelDefs()>
+<if(trace)>
+	traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+	try
+	{
+		<block>
+	}
+	finally
+	{
+		traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+	}
+<else>
+	<block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+insertLexerSynpreds(synpreds) ::= <<
+<insertSynpreds(synpreds)>
+>>
+
+insertSynpreds(synpreds) ::= <<
+<if(synpreds)>
+#region Synpreds
+bool EvaluatePredicate( System.Action fragment )
+{
+	state.backtracking++;
+	<@start()>
+	int start = input.Mark();
+	try
+	{
+		fragment();
+	}
+	catch ( RecognitionException re )
+	{
+		System.Console.Error.WriteLine("impossible: "+re);
+	}
+	bool success = !state.failed;
+	input.Rewind(start);
+	<@stop()>
+	state.backtracking--;
+	state.failed=false;
+	return success;
+}
+#endregion Synpreds<\n>
+<endif>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( state.backtracking>0 && AlreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.backtracking>0) {state.failed=true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+<csharpVisibilityMap.(ruleDescriptor.modifier)> <returnType()> <ruleName>( <ruleDescriptor.parameterScope:parameterScope(scope=it)> )<! throws RecognitionException!>
+{
+	<if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+	<ruleScopeSetUp()>
+	<ruleDeclarations()>
+	<ruleLabelDefs()>
+	<ruleDescriptor.actions.init>
+	<@preamble()>
+	try
+	{
+		<ruleMemoization(name=ruleName)>
+		<block>
+		<ruleCleanUp()>
+		<(ruleDescriptor.actions.after):execAction()>
+	}
+<if(exceptions)>
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+	<actions.(actionScope).rulecatch>
+<else>
+	catch ( RecognitionException re )
+	{
+		ReportError(re);
+		Recover(input,re);
+	<@setErrorReturnValue()>
+	}<\n>
+<endif>
+<endif>
+<endif>
+	finally
+	{
+		<if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+		<memoize()>
+		<ruleScopeCleanUp()>
+		<finally>
+	}
+	<@postamble()>
+	return <ruleReturnValue()>;
+}
+// $ANTLR end "<ruleName>"
+>>
+
+catch(decl,action) ::= <<
+catch ( <e.decl> )
+{
+	<e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = new <returnType()>();
+retval.start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.Index;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.Push(new <it>_scope());<it>_scopeInit(<it>_stack.Peek());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.Push(new <it.name>_scope());<it.name>_scopeInit(<it.name>_stack.Peek());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_scopeAfter(<it>_stack.Peek());<it>_stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_scopeAfter(<it.name>_stack.Peek());<it.name>_stack.Pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{<labelType> <it.label.text>=null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:ruleLabelDef(label=it); separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( state.backtracking>0 ) { Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+<csharpVisibilityMap.(ruleDescriptor.modifier)> void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)
+{
+	<if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+	<ruleScopeSetUp()>
+	<ruleDeclarations()>
+	try
+	{
+<if(nakedBlock)>
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block><\n>
+<else>
+		int _type = <ruleName>;
+		int _channel = DefaultTokenChannel;
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+		<ruleCleanUp()>
+		state.type = _type;
+		state.channel = _channel;
+		<(ruleDescriptor.actions.after):execAction()>
+<endif>
+	}
+	finally
+	{
+		<if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+		<ruleScopeCleanUp()>
+		<memoize()>
+	}
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+public override void mTokens()<! throws RecognitionException!>
+{
+	<block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch ( alt<decisionNumber> )
+{
+<alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch ( alt<decisionNumber> )
+{
+<alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+for ( ; ; )
+{
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch ( alt<decisionNumber> )
+	{
+	<alts:altSwitchCase()>
+	default:
+		if ( cnt<decisionNumber> >= 1 )
+			goto loop<decisionNumber>;
+
+		<ruleBacktrackFailure()>
+		EarlyExitException eee<decisionNumber> = new EarlyExitException( <decisionNumber>, input );
+		<@earlyExitException()>
+		throw eee<decisionNumber>;
+	}
+	cnt<decisionNumber>++;
+}
+loop<decisionNumber>:
+	;
+
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+for ( ; ; )
+{
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch ( alt<decisionNumber> )
+	{
+	<alts:altSwitchCase()>
+	default:
+		goto loop<decisionNumber>;
+	}
+}
+
+loop<decisionNumber>:
+	;
+
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i>:
+	<@prealt()>
+	<it>
+	break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=(<labelType>)<endif>Match(input,<token>,Follow._<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label>==null) list_<label>=new ArrayList();
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> )
+{
+	input.Consume();
+	<postmatchCode>
+<if(!LEXER)>
+	state.errorRecovery=false;
+<endif>
+	<if(backtracking)>state.failed=false;<endif>
+}
+else
+{
+	<ruleBacktrackFailure()>
+	MismatchedSetException mse = new MismatchedSetException(null,input);
+	<@mismatchedSetException()>
+<if(LEXER)>
+	Recover(mse);
+	throw mse;
+<else>
+	throw mse;
+	<! use following code to make it recover inline; remove throw mse;
+	recoverFromMismatchedSet(input,mse,Follow._set_in_<ruleName><elementIndex>);
+	!>
+<endif>
+}<\n>
+>>
+
+matchSetUnchecked(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+input.Consume();
+<postmatchCode>
+<if(!LEXER)>
+	state.errorRecovery=false;
+<endif>
+	<if(backtracking)>state.failed=false;<endif>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+int <label>Start = CharIndex;
+Match(<string>); <checkRuleBacktrackFailure()>
+<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start, CharIndex-1);
+<else>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(Follow._<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+state._fsp--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;
+Match(EndOfFile); <checkRuleBacktrackFailure()>
+<labelType> <label> = new <labelType>(input, EndOfFile, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);
+<else>
+Match(EndOfFile); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==TokenTypes.Down )
+{
+	Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+	<children:element()>
+	Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) )
+{
+	<ruleBacktrackFailure()>
+	throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else
+{
+<if(eotPredictsAlt)>
+	alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+	<ruleBacktrackFailure()>
+	NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+	<@noViableAltException()>
+	throw nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else
+{
+	alt<decisionNumber>=<eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
+{
+	<targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) )
+{
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;<\n>
+<else>
+	{
+		<ruleBacktrackFailure()>
+		NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+		<@noViableAltException()>
+		throw nvae;
+	}<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) )
+{
+<edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) )
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+	{
+	<targetState>
+	}
+	break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+class DFA<dfa.decisionNumber> : DFA
+{
+
+	const string DFA<dfa.decisionNumber>_eotS =
+		"<dfa.javaCompressedEOT; wrap="\"+\n\t\t\"">";
+	const string DFA<dfa.decisionNumber>_eofS =
+		"<dfa.javaCompressedEOF; wrap="\"+\n\t\t\"">";
+	const string DFA<dfa.decisionNumber>_minS =
+		"<dfa.javaCompressedMin; wrap="\"+\n\t\t\"">";
+	const string DFA<dfa.decisionNumber>_maxS =
+		"<dfa.javaCompressedMax; wrap="\"+\n\t\t\"">";
+	const string DFA<dfa.decisionNumber>_acceptS =
+		"<dfa.javaCompressedAccept; wrap="\"+\n\t\t\"">";
+	const string DFA<dfa.decisionNumber>_specialS =
+		"<dfa.javaCompressedSpecial; wrap="\"+\n\t\t\"">}>";
+	static readonly string[] DFA<dfa.decisionNumber>_transitionS =
+		{
+			<dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+		};
+
+	static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+	static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+	static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+	static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+	static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+	static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+	static readonly short[][] DFA<dfa.decisionNumber>_transition;
+
+	static DFA<dfa.decisionNumber>()
+	{
+		int numStates = DFA<dfa.decisionNumber>_transitionS.Length;
+		DFA<dfa.decisionNumber>_transition = new short[numStates][];
+		for ( int i=0; i \< numStates; i++ )
+		{
+			DFA<dfa.decisionNumber>_transition[i] = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
+		}
+	}
+
+	public DFA<dfa.decisionNumber>( BaseRecognizer recognizer<if(dfa.specialStateSTs)>, SpecialStateTransitionHandler specialStateTransition<endif> )
+<if(dfa.specialStateSTs)>
+		: base( specialStateTransition )
+<endif>
+	{
+		this.recognizer = recognizer;
+		this.decisionNumber = <dfa.decisionNumber>;
+		this.eot = DFA<dfa.decisionNumber>_eot;
+		this.eof = DFA<dfa.decisionNumber>_eof;
+		this.min = DFA<dfa.decisionNumber>_min;
+		this.max = DFA<dfa.decisionNumber>_max;
+		this.accept = DFA<dfa.decisionNumber>_accept;
+		this.special = DFA<dfa.decisionNumber>_special;
+		this.transition = DFA<dfa.decisionNumber>_transition;
+	}
+	public override string GetDescription()
+	{
+		return "<dfa.description>";
+	}
+	<@errorMethod()>
+}<\n>
+<if(dfa.specialStateSTs)>
+int specialStateTransition<dfa.decisionNumber>( DFA dfa, int s, IIntStream _input )<! throws NoViableAltException!>
+{
+	<if(LEXER)>
+	IIntStream input = _input;
+	<endif>
+	<if(PARSER)>
+	ITokenStream input = (ITokenStream)_input;
+	<endif>
+	<if(TREE_PARSER)>
+	ITreeNodeStream input = (ITreeNodeStream)_input;
+	<endif>
+	int _s = s;
+	switch ( s )
+	{
+	<dfa.specialStateSTs:{state |
+	case <i0>:<! compressed special state numbers 0..n-1 !>
+		<state>}; separator="\n">
+	}
+<if(backtracking)>
+	if (state.backtracking>0) {state.failed=true; return -1;}<\n>
+<endif>
+	NoViableAltException nvae = new NoViableAltException(dfa.GetDescription(), <dfa.decisionNumber>, _s, input);
+	dfa.Error(nvae);
+	throw nvae;
+}<\n>
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)><! get next lookahead symbol to test edges, then rewind !>
+int index<decisionNumber>_<stateNumber> = input.Index;
+input.Rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)><! return input cursor to state before we rewound !>
+input.Seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "EvaluatePredicate(<pred>_fragment)"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+attributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected class <scope.name>_scope
+{
+	<scope.attributes:{public <it.decl>;}; separator="\n">
+}
+<if(scope.actions.scopeinit)>
+protected void <scope.name>_scopeInit( <scope.name>_scope scope )
+{
+	<scope.actions.scopeinit>
+}<\n>
+<else>
+partial void <scope.name>_scopeInit( <scope.name>_scope scope );
+<endif>
+<if(scope.actions.scopeafter)>
+protected void <scope.name>_scopeAfter( <scope.name>_scope scope )
+{
+	<scope.actions.scopeafter>
+}<\n>
+<else>
+partial void <scope.name>_scopeAfter( <scope.name>_scope scope );
+<endif>
+protected Stack\<<scope.name>_scope\> <scope.name>_stack = new Stack\<<scope.name>_scope\>();<\n>
+<endif>
+>>
+
+globalAttributeScope(scope) ::= <<
+<attributeScope(...)>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<attributeScope(...)>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+default(<typeName>)
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public <!static !>class <ruleDescriptor:returnStructName()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope
+{
+	<scope.attributes:{public <it.decl>;}; separator="\n">
+	<@ruleReturnMembers()>
+}
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name> =<expr>;
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name> =<expr>;
+<else>
+((<scope>_scope)<scope>_stack.Peek()).<attr.name> =<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!=null?<scope>.<attr.name>:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>;
+<else>
+<attr.name> =<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.Text:null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.Type:0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.Line:0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.CharPositionInLine:0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.Channel:0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.TokenIndex:0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?Integer.valueOf(<scope>.Text):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Start):null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Stop):null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.Tree):null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(<scope>!=null?(input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
+  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start))):null)
+<else>
+(<scope>!=null?input.ToString(<scope>.start,<scope>.stop):null)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.st:null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(<scope>!=null?<scope>.Type:0)"
+
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(<scope>!=null?<scope>.Line:0)"
+
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= 
+    "(<scope>!=null?<scope>.CharPositionInLine:-1)"
+
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(<scope>!=null?<scope>.Channel:0)"
+
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(<scope>!=null?<scope>.TokenIndex:0)"
+
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(<scope>!=null?<scope>.Text:null)"
+
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(<scope>!=null?Integer.valueOf(<scope>.Text):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.Start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.Stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.Tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start))
+<else>
+input.ToString(retval.Start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+	<action>
+}
+<else>
+<action>
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{<it>UL};separator=",">});<\n>
+>>
+
+codeFileExtension() ::= ".cs"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg
new file mode 100644
index 0000000..00249f4
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg
@@ -0,0 +1,299 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.Runtime.Debug;
+using IOException = System.IO.IOException;
+>>
+
+ at genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+public static readonly string[] ruleNames =
+	new string[]
+	{
+		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n	", separator=", ">
+	};<\n>
+<endif>
+<if(grammar.grammarIsRoot)><! grammar imports other grammar(s) !>
+	int ruleLevel = 0;
+	public virtual int RuleLevel { get { return ruleLevel; } }
+	public virtual void IncRuleLevel() { ruleLevel++; }
+	public virtual void DecRuleLevel() { ruleLevel--; }
+<if(profile)>
+	<ctorForProfilingRootGrammar()>
+<else>
+	<ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else><! imported grammar !>
+	public int RuleLevel { get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; } }
+	public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
+	public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
+	<ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+public virtual bool AlreadyParsedRule( IIntStream input, int ruleIndex )
+{
+	((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+	return super.AlreadyParsedRule(input, ruleIndex);
+}<\n>
+public virtual void Memoize( IIntStream input, int ruleIndex, int ruleStartIndex )
+{
+	((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+	super.Memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+protected virtual bool EvalPredicate( bool result, string predicate )
+{
+	dbg.SemanticPredicate( result, predicate );
+	return result;
+}<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+public <name>( <inputStreamType> input )
+	: this( input, DebugEventSocketProxy.DefaultDebuggerPort, new RecognizerSharedState() )
+{
+}
+public <name>( <inputStreamType> input, int port, RecognizerSharedState state )
+	: base( input, state )
+{
+	<parserCtorBody()>
+	<createListenerAndHandshake()>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<@finally()>
+}<\n>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>( <inputStreamType> input )
+	: this( input, new Profiler(null), new RecognizerSharedState() )
+{
+}
+public <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state )
+	: base( input, dbg, state )
+{
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+	<parserCtorBody()>
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+public <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+	: base( input, dbg, state )
+{
+	<parserCtorBody()>
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>( input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>( <inputStreamType> input, IDebugEventListener dbg )
+	<@superClassRef>: base( input, dbg, new RecognizerSharedState() )<@end>
+{
+<if(profile)>
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+<endif>
+	<parserCtorBody()>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<@finally()>
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, input.TreeAdaptor );<\n>
+<else>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, null );<\n>
+<endif>
+DebugListener = proxy;
+try
+{
+	proxy.Handshake();
+}
+catch ( IOException ioe )
+{
+	ReportError( ioe );
+}
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.preamble() ::= <<
+try
+{
+	dbg.EnterRule( GrammarFileName, "<ruleName>" );
+	if ( RuleLevel == 0 )
+	{
+		dbg.Commence();
+	}
+	IncRuleLevel();
+	dbg.Location( <ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine> );<\n>
+>>
+
+ at rule.postamble() ::= <<
+dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
+}
+finally
+{
+	dbg.ExitRule( GrammarFileName, "<ruleName>" );
+	DecRuleLevel();
+	if ( RuleLevel == 0 )
+	{
+		dbg.Terminate();
+	}
+}<\n>
+>>
+
+ at insertSynpreds.start() ::= "dbg.BeginBacktrack( state.backtracking );"
+
+ at insertSynpreds.stop() ::= "dbg.EndBacktrack( state.backtracking, success );"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::= <<
+try
+{
+	dbg.EnterSubRule( <decisionNumber> );<\n>
+>>
+
+exitSubRule() ::= <<
+}
+finally
+{
+	dbg.ExitSubRule( <decisionNumber> );
+}<\n>
+>>
+
+enterDecision() ::= <<
+try
+{
+	dbg.EnterDecision( <decisionNumber> );<\n>
+>>
+
+exitDecision() ::= <<
+}
+finally
+{
+	dbg.ExitDecision( <decisionNumber> );
+}<\n>
+>>
+
+enterAlt(n) ::= "dbg.EnterAlt( <n> );<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+ at block.postdecision() ::= "<exitDecision()>"
+
+ at block.postbranch() ::= "<exitSubRule()>"
+
+ at ruleBlock.predecision() ::= "<enterDecision()>"
+
+ at ruleBlock.postdecision() ::= "<exitDecision()>"
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+ at positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+ at positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+ at positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+ at positiveClosureBlock.earlyExitException() ::=
+	"dbg.RecognitionException( eee<decisionNumber> );<\n>"
+
+ at closureBlock.preloop() ::= "<enterSubRule()>"
+
+ at closureBlock.postloop() ::= "<exitSubRule()>"
+
+ at closureBlock.predecision() ::= "<enterDecision()>"
+
+ at closureBlock.postdecision() ::= "<exitDecision()>"
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+	"dbg.Location( <it.line>, <it.pos> );"
+
+ at matchSet.mismatchedSetException() ::=
+	"dbg.RecognitionException( mse );"
+
+ at dfaState.noViableAltException() ::= "dbg.RecognitionException( nvae );"
+
+ at dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException( nvae );"
+
+dfaDecision(decisionNumber,description) ::= <<
+try
+{
+	isCyclicDecision = true;
+	<super.dfaDecision(...)>
+}
+catch ( NoViableAltException nvae )
+{
+	dbg.RecognitionException( nvae );
+	throw nvae;
+}
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+public override void Error( NoViableAltException nvae )
+{
+	((DebugParser)recognizer).dbg.RecognitionException( nvae );
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+EvalPredicate( <pred>, "<description>" )
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg
new file mode 100644
index 0000000..1453d76
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg
@@ -0,0 +1,167 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+group ST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr3.ST;
+using Antlr3.ST.Language;
+>>
+
+/** Add this to each rule's return value struct */
+ at returnScope.ruleReturnMembers() ::= <<
+public StringTemplate st;
+public object getTemplate() { return st; }
+public override string ToString() { return (st==null) ? null : st.ToString(); }
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+protected StringTemplateGroup templateLib = new StringTemplateGroup("<name>Templates", typeof(AngleBracketTemplateLexer) );
+
+public StringTemplateGroup TemplateLib
+{
+	get { return templateLib; }
+	set { templateLib = value; }
+}
+
+///** allows convenient multi-value initialization:
+// *  "new STAttrMap().put(...).put(...)"
+// */
+//public static class STAttrMap extends HashMap {
+//  public STAttrMap put(String attrName, object value) {
+//    super.put(attrName, value);
+//    return this;
+//  }
+//  public STAttrMap put(String attrName, int value) {
+//    super.put(attrName, new Integer(value));
+//    return this;
+//  }
+//}
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTemplate()",...)>
+>>
+
+rewriteTemplate(alts) ::= <<
+
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> )
+{
+	<alts:rewriteTemplateAlt(); separator="else ">
+	<if(rewriteMode)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewriteMode)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+((TokenRewriteStream)input.TokenStream).replace(
+	input.TreeAdaptor.GetTokenStartIndex(retval.start),
+	input.TreeAdaptor.GetTokenStopIndex(retval.start),
+	retval.st );
+<else>
+((TokenRewriteStream)input).replace(
+	((IToken)retval.start).TokenIndex,
+	input.LT(-1).TokenIndex,
+	retval.st );
+<endif>
+>>
+
+rewriteTemplateAlt() ::= <<
+// <it.description>
+<if(it.pred)>
+if (<it.pred>)
+{
+	retval.st = <it.alt>;
+}<\n>
+<else>
+{
+	retval.st = <it.alt>;
+}<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+null;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <<
+templateLib.getInstanceOf("<name>"<if(args)>,
+	new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+	<endif>)
+>>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <<
+templateLib.getInstanceOf(<expr><if(args)>,
+	new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+	<endif>)
+>>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <<
+new StringTemplate(templateLib, "<template>"<if(args)>,
+	new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+	<endif>)
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>).setAttribute("<attrName>",<expr>);
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+new StringTemplate(templateLib,<stringExpr>)
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg
new file mode 100644
index 0000000..1baef3e
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg
@@ -0,0 +1,445 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2008 Erik van Bilsen
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()><if(!TREE_PARSER)><! tree parser would already have imported !>  Antlr.Runtime.Tree,<\n><endif>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+ at genericParser.membersConstructor() ::= <<
+<@super.membersConstructor()>
+<parserMembersConstructor()>
+>>
+
+ at genericParser.membersImplementation() ::= <<
+<@super.membersImplementation()>
+<parserMembersImplementation()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+  strict protected
+    FAdaptor: ITreeAdaptor;
+    procedure SetAdaptor(const Value: ITreeAdaptor);
+    property Adaptor: ITreeAdaptor read FAdaptor;
+  public
+    property TreeAdaptor: ITreeAdaptor read FAdaptor write SetAdaptor;
+
+>>
+
+parserMembersConstructor() ::= <<
+FAdaptor := TCommonTreeAdaptor.Create;
+>>
+
+parserMembersImplementation() ::= <<
+procedure T<grammar.recognizerName>.SetAdaptor(const Value: ITreeAdaptor);
+begin
+  FAdaptor := Value;
+  <grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor := FAdaptor;}>
+end;
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+function T<grammar.recognizerName>.T<ruleDescriptor:returnStructName()>.GetTree: IANTLRInterface;
+begin
+  Result := FTree;
+end;
+
+procedure T<grammar.recognizerName>.T<ruleDescriptor:returnStructName()>.SetTree(const Value: IANTLRInterface);
+begin
+  FTree := Value as I<ASTLabelType>;
+end;
+>>
+
+ at returnScopeDeclaration.ruleReturnMembers() ::= <<
+strict private
+  FTree: I<ASTLabelType>;
+protected
+  { IRuleReturnScope }
+  function GetTree: IANTLRInterface; override;
+  procedure SetTree(const Value: IANTLRInterface); override;
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+Root[0] := nil;<\n>
+>>
+
+ruleDeclarationVars() ::= <<
+<super.ruleDeclarationVars()>
+Root: array [0..63] of I<ASTLabelType>;
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{<it.label.text>_tree := nil;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<it.label.text>_tree := nil;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites:{Locals['Stream_<it>'] := TRewriteRule<rewriteElementType>Stream.Create(Adaptor,'token <it>');}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites:{Locals['Stream_<it>'] := TRewriteRuleSubtreeStream.Create(Adaptor,'rule <it>');}; separator="\n">
+>>
+
+ruleLabelDefVars() ::= <<
+<super.ruleLabelDefVars()>
+<ruleDescriptor.tokenLabels:{<it.label.text>_tree: I<ASTLabelType>;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<it.label.text>_tree: I<ASTLabelType>;}; separator="\n">
+>>
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+Root[0] := Adaptor.GetNilNode as I<ASTLabelType>;
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>(Locals['Stream_<token>'] as IRewriteRuleElementStream).Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>(Locals['Stream_<token>'] as IRewriteRuleElementStream).Add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+wildcardTrack(label,elementIndex) ::= <<
+<super.wildcard(...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>(Locals['Stream_<rule.name>'] as IRewriteRuleElementStream).Add(<label>.Tree);<\n>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>(Locals['Stream_<rule>'] as IRewriteRuleElementStream).Add(<label>.Tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+    referencedWildcardLabels,
+    referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements:          <referencedElementsDeep; separator=", ">
+// token labels:      <referencedTokenLabels; separator=", ">
+// rule labels:       <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels:  <referencedRuleListLabels; separator=", ">
+<if(backtracking)>
+if (State.Backtracking = 0) then 
+begin<\n>
+<endif>
+<prevRuleRootRef()>.Tree := Root[0];
+<rewriteCodeLabels()>
+Root[0] := Adaptor.GetNilNode as I<ASTLabelType>;
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.rulePostProcessing(root[0]);
+input.ReplaceChildren(adaptor.GetParent(retval.Start),
+                      adaptor.GetChildIndex(retval.Start),
+                      adaptor.GetChildIndex(_last),
+                      retval.Tree);
+<endif>
+<endif>
+<! if parser or rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.Tree := Root[0];<\n>
+<endif>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.Tree := Root[0];<\n>
+<endif>
+<if(backtracking)>
+end;
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{Locals['Stream_<it>'] := TRewriteRule<rewriteElementType>Stream.Create(Adaptor, 'token <it>', <it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{Locals['Stream_<it>'] := TRewriteRule<rewriteElementType>Stream.Create(Adaptor,'token <it>', list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels:{
+if Assigned(<it>) then
+  Locals['Stream_<it>'] := TRewriteRuleSubtreeStream.Create(Adaptor, 'token <it>', <it>.Tree)
+else
+  Locals['Stream_<it>'] := TRewriteRuleSubtreeStream.Create(Adaptor, 'token <it>', nil);}; separator="\n">
+<referencedRuleListLabels
+    :{Locals['Stream_<it>'] := TRewriteRuleSubtreeStream.Create(Adaptor, 'token <it>', list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+(* <fileName>:<description> *)
+if (<referencedElementsDeep:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).HasNext}; separator=" or ">) then
+begin
+  <alt>
+end;
+<referencedElementsDeep:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).Reset;<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+(* <fileName>:<description> *)
+while (<referencedElements:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).HasNext}; separator=" or ">) do
+begin
+  <alt>
+end;
+<referencedElements:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements,     // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if (not (<referencedElements:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).HasNext}; separator=" or ">)) then
+  raise ERewriteEarlyExitException.Create('');
+
+while (<referencedElements:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).HasNext}; separator=" or ">) do
+begin
+  <alt>
+end;
+<referencedElements:{el | (Locals['Stream_<el>'] as IRewriteRuleElementStream).Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+(* <a.description> *)
+<if(a.pred)>
+if (<a.pred>) then
+begin
+  <a.alt>
+end<\n>
+<else>
+begin
+  <a.alt>
+end;<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "Root[0] = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+(* <fileName>:<description> *)
+begin
+  Root[<treeLevel>] := Adaptor.GetNilNode as I<ASTLabelType>;
+  <root:rewriteElement()>
+  <children:rewriteElement()>
+  Adaptor.AddChild(Root[<enclosingTreeLevel>], Root[<treeLevel>]);
+end;<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], (Locals['Stream_<label>'] as IRewriteRuleElementStream).NextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], (Locals['Stream_<label>'] as IRewriteRuleElementStream).NextNode);<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot((Locals['Stream_<label>'] as IRewriteRuleElementStream).NextNode(), Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+Root[0] = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of Root[0] right
+ *  before I set it during rewrites.  The assign will be to retval.Tree.
+ */
+prevRuleRootRef() ::= "RetVal"
+
+rewriteRuleRef(rule) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], (Locals['Stream_<rule>'] as IRewriteRuleElementStream).NextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot((Locals['Stream_<rule>'] as IRewriteRuleElementStream).NextNode, Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot(<action>, Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], (Locals['Stream_<label>'] as IRewriteRuleElementStream).NextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+Adaptor.AddChild(Root[<treeLevel>], (Locals['Stream_<label>'] as IRewriteRuleElementStream).NextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot((Locals['Stream_<label>'] as IRewriteRuleElementStream).NextNode, Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+Root[<treeLevel>] := Adaptor.BecomeRoot((Locals['Stream_<label>'] as IRewriteRuleElementStream).NextNode, Root[<treeLevel>]) as I<ASTLabelType>;<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+T<hetero>.Create(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+Adaptor.CreateNode(<tokenType>, <args; separator=", "><if(!args)>'<tokenType>'<endif>) as I<ASTLabelType>
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+T<hetero>.Create((Locals['Stream_<token>'] as IRewriteRuleElementStream).NextToken<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+Adaptor.Create(<token>, <args; separator=", ">)
+<else>
+(Locals['Stream_<token>'] as IRewriteRuleElementStream).NextNode
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg
new file mode 100644
index 0000000..2957fe8
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg
@@ -0,0 +1,220 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2008 Erik van Bilsen
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+RetVal.Tree := Adaptor.ErrorNode(Input, RetVal.Start as IToken, 
+  Input.LT(-1), RE) as I<ASTLabelType>;
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>
+if (State.Backtracking = 0) then
+begin<\n>
+<endif>
+<label>_tree := <createNodeFromToken(...)>;
+Adaptor.AddChild(Root[0], <label>_tree);
+<if(backtracking)>
+end;
+<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>
+if (State.Backtracking = 0) then 
+begin 
+<endif>
+<label>_tree := <createNodeFromToken(...)>;
+Root[0] := Adaptor.BecomeRoot(<label>_tree, Root[0]) as I<ASTLabelType>;
+<if(backtracking)>
+end;
+<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if (State.Backtracking = 0) then <endif>Adaptor.AddChild(Root[0], <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label> := Input.LT(1) as I<labelType>;<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if (State.Backtracking = 0) then <endif>Root[0] := Adaptor.BecomeRoot(<createNodeFromToken(...)>, Root[0]) as I<ASTLabelType>;})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>Adaptor.AddChild(Root[0], <label>.Tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>Root[0] := Adaptor.BecomeRoot(<label>.Tree, Root[0]) as I<ASTLabelType>;
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>
+if (State.Backtracking = 0) then 
+begin 
+<endif>
+	<label>_tree := Adaptor.CreateNode(<label>) as I<ASTLabelType>;
+	Adaptor.AddChild(Root[0], <label>_tree);
+<if(backtracking)>
+end;
+<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>
+if (State.Backtracking = 0) then 
+begin 
+<endif>
+	<label>_tree := Adaptor.CreateNode(<label>) as I<ASTLabelType>;
+	Root[0] := Adaptor.BecomeRoot(<label>_tree, Root[0]) as I<ASTLabelType>;
+<if(backtracking)>
+end;
+<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+T<hetero>.Create(<label>) <! new MethodNode(IDLabel) !>
+<else>
+Adaptor.CreateNode(<label>) as I<ASTLabelType>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>
+if (State.Backtracking = 0) then
+begin<\n>
+<endif>
+RetVal.Tree := Adaptor.RulePostProcessing(Root[0]) as I<ASTLabelType>;
+<if(!TREE_PARSER)>
+Adaptor.SetTokenBoundaries(RetVal.Tree, RetVal.Start as IToken, RetVal.Stop as IToken);
+<endif>
+<if(backtracking)>
+<\n>end;
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg
new file mode 100644
index 0000000..b77c3f0
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg
@@ -0,0 +1,307 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2008 Erik van Bilsen
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+_First[0] := nil;
+_Last := nil;<\n>
+>>
+
+ruleDeclarationVars() ::= <<
+<super.ruleDeclarationVars()>
+_First, _Save_Last: array [0..63] of I<ASTLabelType>;
+_Last: I<ASTLabelType>;
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if (State.Backtracking = 0) then
+begin<endif>
+<if(rewriteMode)>
+RetVal.Tree := _First[0] as I<ASTLabelType>;
+if (Adaptor.GetParent(RetVal.Tree) \<\> nil) and (Adaptor.IsNil(Adaptor.GetParent(RetVal.Tree))) then 
+  RetVal.Tree := Adaptor.GetParent(RetVal.Tree) as I<ASTLabelType>;
+<endif>
+<if(backtracking)>end;<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+begin
+  _Save_Last[<treeLevel>] := _Last;
+  _First[<treeLevel>] := nil;
+<if(!rewriteMode)>
+  Root[<treeLevel>] := Adaptor.GetNilNode as I<ASTLabelType>;<\n>
+<endif>
+  <root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>
+<if(root.el.rule)>
+  if (_First[<enclosingTreeLevel>] = nil) then _First[<enclosingTreeLevel>] := <root.el.label>.Tree;
+<else>
+  if (_First[<enclosingTreeLevel>] = nil) then _First[<enclosingTreeLevel>] := <root.el.label>;
+<endif>
+<endif>
+  <actionsAfterRoot:element()>
+<if(nullableChildList)>
+  if (Input.LA(1) = TToken.DOWN) then
+  begin
+    Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(Input, TToken.UP, nil); <checkRuleBacktrackFailure()>
+  end;
+<else>
+  Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
+  <children:element()>
+  Match(Input, TToken.UP, nil); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+  Adaptor.AddChild(Root[<enclosingTreeLevel>], Root[<treeLevel>]);
+<endif>
+  _Last := _Save_Last[<treeLevel>];
+end;<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>
+if (State.Backtracking = 0) then
+begin<\n>
+<endif>
+<if(hetero)>
+<label>_tree := T<hetero>.Create(<label>);
+<else>
+	<label>_tree := Adaptor.DupNode(<label>) as I<ASTLabelType>;
+<endif><\n>
+	Adaptor.AddChild(Root[<treeLevel>], <label>_tree);
+<if(backtracking)>
+end;
+<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>
+if (_First[<treeLevel>] = nil) then _First[<treeLevel>] := <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>
+if (State.Backtracking = 0) then
+begin
+<endif>
+<if(hetero)>
+<label>_tree := T<hetero>.Create(<label>);
+<else>
+	<label>_tree := Adaptor.DupNode(<label>) as I<ASTLabelType>;
+<endif><\n>
+	Root[<treeLevel>] := Adaptor.BecomeRoot(<label>_tree, Root[<treeLevel>]) as I<ASTLabelType>;
+<if(backtracking)>
+end;
+<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_Llast := Input.LT(1) as I<ASTLabelType>;
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if (State.Backtracking = 0) then begin <endif>
+<if(hetero)>
+<label>_tree := T<hetero>.Create(<label>);
+<else>
+<label>_tree := Adaptor.DupNode(<label>) as I<ASTLabelType>;
+<endif><\n>
+Adaptor.AddChild(Root[<treeLevel>], <label>_tree);
+<if(backtracking)>end;<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if (State.Backtracking = 0) then begin <endif>
+<if(hetero)>
+<label>_tree := T<hetero>.Create(<label>);
+<else>
+<label>_tree := Adaptor.DupNode(<label>) as I<ASTLabelType>;
+<endif><\n>
+Root[<treeLevel>] := Adaptor.BecomeRoot(<label>_tree, Root[<treeLevel>]) as I<ASTLabelType>;
+<if(backtracking)>end;<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.ruleRef(...)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>
+<if(!rewriteMode)>
+Adaptor.AddChild(Root[<treeLevel>], <label>.Tree);
+<else> <! rewrite mode !>
+if (_First[<treeLevel>] = nil) then _First[<treeLevel>] := <label>.Tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (State.Backtracking = 0) then <endif>Root[<treeLevel>] := Adaptor.BecomeRoot(<label>.Tree, Root[<treeLevel>]) as I<ASTLabelType>;
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".Tree",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_Last := Input.LT(1) as I<ASTLabelType>;
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+T<hetero>.Create((Locals['Stream_<token>'] as IRewriteRuleElementStream).NextNode)
+<else>
+(Locals['Stream_<token>'] as IRewriteRuleElementStream).NextNode
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>
+if (State.Backtracking = 0) then
+begin<\n>
+<endif>
+RetVal.Tree := Adaptor.RulePostProcessing(Root[0]) as I<ASTLabelType>;
+<if(backtracking)>
+end;
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg
new file mode 100644
index 0000000..ca4b5ab
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg
@@ -0,0 +1,1805 @@
+/* [The "BSD licence"] 
+ Copyright (c) 2008 Erik van Bilsen
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group Delphi implements ANTLRCore;
+
+csharpTypeInitMap ::= [
+  "int":"0",
+  "uint":"0",
+  "long":"0",
+  "ulong":"0",
+  "float":"0.0",
+  "double":"0.0",
+  "bool":"False",
+  "byte":"0",
+  "sbyte":"0",
+  "short":"0",
+  "ushort":"0",
+  "char":"#0",
+  "string":"''",
+  "String":"''",
+  default:"nil" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ *  LEXER (Boolean): should we generate lexer code?
+ *  PARSER (Boolean): should we generate parser code?
+ *  TREE_PARSER (Boolean): should we generate tree parser code?
+ *  actionScope (String): 'lexer', 'parser', 'tree_parser' or custom scope
+ *  actions (HashMap):
+ *  docComment (String): document comment
+ *  recognizer (Object): recognizer class generator
+ *  name (String): name of grammar
+ *  tokens (HashMap<name: String, type: Integer>):
+ *  tokenNames:
+ *  rules:
+ *  cyclicDFAs:
+ *  bitsets:
+ *  buildTemplate (Boolean): should we generate a string template?
+ *  buildAST (Boolean): should we generate an AST?
+ *  rewriteMode (Boolean): are we rewriteing nodes?
+ *  profile (Boolean):
+ *  backtracking (Boolean): backtracking mode?
+ *  synpreds (): syntactic predicates
+ *  memoize (Boolean): should we memoize?
+ *  numRules (Integer): number of rules
+ *  fileName (String): fully qualified name of original .g file
+ *  ANTLRVersion (String): ANTLR version in Major.Minor.Build format
+ *  generatedTimestamp (String): date/time when the file is generated
+ *  trace (Boolean): should we trace input/output?
+ *  scopes:
+ *  superClass (String): name of base class, or empty string
+ *  literals:
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+     bitsets, buildTemplate, buildAST, rewriteMode, profile,
+     backtracking, synpreds, memoize, numRules,
+     fileName, ANTLRVersion, generatedTimestamp, trace,
+     scopes, superClass, literals) ::=
+<<
+unit <name>;
+
+{$HINTS OFF}
+
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<actions.(actionScope).header>
+
+interface
+
+<@imports>
+uses<\n>
+<@end>
+  <actions.(actionScope).usesInterface>
+<if(TREE_PARSER)>
+  Antlr.Runtime.Tree,<\n>
+<endif>
+  Antlr.Runtime,
+  Antlr.Runtime.Collections,
+  Antlr.Runtime.Tools;
+  
+<docComment>
+<recognizer>
+>>
+
+/** Generates source code for the lexer class
+ * grammar (Grammar object)
+ */
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode, superClass="Lexer") ::= <<
+type
+  I<grammar.recognizerName> = interface(I<@superClassName><superClass><@end>)
+  end;
+  
+  T<grammar.recognizerName> = class(T<@superClassName><superClass><@end>, I<grammar.recognizerName>)
+  strict private
+    FCnt: array [0..<grammar.numberOfDecisions>] of Byte;
+    FLA: array [0..<grammar.numberOfDecisions>, 0..255] of Integer;
+    FException: ERecognitionException;
+    procedure InitializeCyclicDFAs;
+  <cyclicDFAs:cyclicDFADeclaration()> 
+  public
+    const
+      <tokens:{<it.name> = <it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+  strict private
+    <actions.(actionScope).memberDeclarations>
+  public
+    // delegates
+    <grammar.delegates: {g|<g:delegateName()>: I<superClass>; {<g.recognizerName>}}; separator="\n">
+  public
+    // delegators
+    <grammar.delegators: {g|<g:delegateName()>: Pointer; {<g.recognizerName>}}; separator="\n">
+    <last(grammar.delegators):{g|gParent: Pointer; {<g.recognizerName>}}>
+  protected
+    { IBaseRecognizer }
+    function GetGrammarFileName: String; override;
+<if(filterMode)>
+    function AlreadyParsedRule(const Input: IIntStream;
+      const RuleIndex: Integer): Boolean; override;
+    procedure Memoize(const Input: IIntStream; const RuleIndex,
+      RuleStartIndex: Integer); override;
+  protected
+    { ILexer }
+    function NextToken: IToken; override;<\n>
+<endif>
+  protected
+    { ILexer }
+    procedure DoTokens; override;
+  public
+    constructor Create; overload;
+    constructor Create(const AInput: ICharStream<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+    constructor Create(const AInput: ICharStream; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+
+    <rules: {r | <if(!r.ruleDescriptor.isSynPred)><lexerRuleDeclaration(r)><endif>}>
+    <synpreds:{p | <lexerSynpredDeclaration(p)>}; separator="\n">
+  end;
+  
+implementation
+
+uses
+  <grammar.delegates: {g|<g.recognizerName>,}; separator="\n">
+  <grammar.delegators: {g|<g.recognizerName>,}; separator="\n">
+  <actions.(actionScope).usesImplementation>
+  SysUtils,
+  StrUtils,
+  Math;
+  
+{ T<grammar.recognizerName> }
+
+constructor T<grammar.recognizerName>.Create;
+begin
+  InitializeCyclicDFAs;
+end;
+
+constructor T<grammar.recognizerName>.Create(const AInput: ICharStream<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  Create(AInput, nil<grammar.delegators:{g|, A<g:delegateName()>}>);
+end;
+
+constructor T<grammar.recognizerName>.Create(const AInput: ICharStream; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  inherited Create(AInput, AState);
+  InitializeCyclicDFAs; { TODO: Necessary in Delphi??? Not removed yet. }
+  <if(memoize)>
+  <if(grammar.grammarIsRoot)>
+  State.RuleMemoCount := <numRules>+1;<\n> <! index from 1..n !>
+  <endif>
+  <endif>
+  <grammar.directDelegates:
+   {g|<g:delegateName()> := T<g.recognizerName>.Create(AInput, State<trunc(g.delegators):{p|, <p:delegateName()>}>, Self);}; separator="\n">
+  <grammar.delegators:
+   {g|<g:delegateName()> := Pointer(A<g:delegateName()>);}; separator="\n">
+  <last(grammar.delegators):{g|gParent := Pointer(A<g:delegateName()>);}>
+  <actions.(actionScope).memberInitializations>
+end;
+<actions.(actionScope).memberImplementations>
+function T<grammar.recognizerName>.GetGrammarFileName: String;
+begin
+  Result := '<fileName>';
+end;
+
+<if(filterMode)>
+<filteringNextToken()>
+<endif>
+
+<rules; separator="\n\n">
+<synpreds:{p | <lexerSynpred(p)>}>
+
+procedure T<grammar.recognizerName>.InitializeCyclicDFAs;
+begin
+  <cyclicDFAs:{dfa | FDFA<dfa.decisionNumber> := TDFA<dfa.decisionNumber>.Create(Self<@debugAddition()>);}; separator="\n">
+  <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>FDFA<dfa.decisionNumber>.SpecialStateTransitionHandler := DFA<dfa.decisionNumber>_SpecialStateTransition;<endif>}; separator="\n">
+end;
+  
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+end.>>
+
+lexerRuleDeclaration(rule) ::= <<
+procedure m<rule.ruleName>(<rule.ruleDescriptor.parameterScope:parameterScope(scope=rule)>);<\n>
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+function T<grammar.recognizerName>.NextToken: IToken;
+var
+  M: Integer;
+begin
+  while (True) do
+  begin
+    if (Input.LA(1) = Integer(cscEOF)) then
+      Exit(TToken.EOF_TOKEN);
+
+    State.Token := nil;
+    State.Channel := TToken.DEFAULT_CHANNEL;
+    State.TokenStartCharIndex := Input.Index;
+    State.TokenStartCharPositionInLine := Input.CharPositionInLine;
+    State.TokenStartLine := Input.Line;
+    State.Text := '';
+    try
+      M := Input.Mark();
+      State.Backtracking := 1; <! means we won't throw slow exception !>
+      State.Failed := False;
+      mTokens();
+      State.Backtracking := 0;
+<!
+      mTokens backtracks with synpred at backtracking==2
+            and we set the synpredgate to allow actions at level 1. 
+!>
+      if (State.Failed) then
+      begin
+        Input.Rewind(M);
+        Input.Consume; <! // advance one char and try again !>
+      end
+      else 
+      begin
+        Emit;
+        Exit(State.Token);
+      end;
+    except
+      on RE: ERecognitionException do
+      begin
+        // shouldn't happen in backtracking mode, but...
+        ReportError(RE);
+        Recover(RE);
+      end;
+    end;
+  end;
+end;
+
+function T<grammar.recognizerName>.AlreadyParsedRule(const Input: IIntStream;
+  const RuleIndex: Integer): Boolean; 
+begin
+  if (State.Backtracking > 1) then
+    Result := inherited AlreadyParsedRule(Input, RuleIndex)
+  else
+    Result := False; 
+end;
+  
+procedure T<grammar.recognizerName>.Memoize(const Input: IIntStream; const RuleIndex,
+  RuleStartIndex: Integer); 
+begin
+  if (State.Backtracking > 1) then
+    inherited Memoize(Input, RuleIndex, RuleStartIndex);
+end;  
+
+>>
+
+filteringActionGate() ::= "(State.Backtracking = 1)"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="ANTLRInterface", labelType, members, rewriteElementType) ::= <<
+type
+  <rules: {r | <genericParserRuleReturnType(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
+  I<grammar.recognizerName> = interface(I<@superClassName><superClass><@end>)
+    <rules: {r | <genericParserRuleInterface(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
+  end;
+  
+  T<grammar.recognizerName> = class(T<@superClassName><superClass><@end>, I<grammar.recognizerName>)
+<if(grammar.grammarIsRoot)>
+  public
+    const
+      TOKEN_NAMES: array [0..<length(tokenNames)>+3] of String = (
+        '\<invalid>', 
+        '\<EOR>',
+        '\<DOWN>',
+        '\<UP>',
+        <tokenNames; separator=",\n">);<\n>
+<endif>
+  public
+    const
+      <tokens:{<it.name> = <it.type>;}; separator="\n">
+  public
+    // delegates
+    <grammar.delegates: {g|<g:delegateName()>: I<superClass>; {<g.recognizerName>}}; separator="\n">
+  public
+    // delegators
+    <grammar.delegators: {g|<g:delegateName()>: Pointer; {<g.recognizerName>}}; separator="\n">
+    <last(grammar.delegators):{g|gParent: Pointer; {<g.recognizerName>}}>
+    
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDeclaration(scope=it)><endif>}>
+<@members>
+    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+  public
+    constructor Create(const AInput: <inputStreamType><grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+    constructor Create(const AInput: <inputStreamType>; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+<@end>
+  protected
+    { IBaseRecognizer }
+    function GetTokenNames: TStringArray; override;
+    function GetGrammarFileName: String; override;  
+  strict private
+    <actions.(actionScope).memberDeclarations>
+  <rules: {r | <genericParserRuleDeclaration(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor| <delegatedRuleDeclaration(ruleDescriptor)>}>
+
+    <synpreds:{p | <synpredDeclaration(p)>}; separator="\n">
+  <cyclicDFAs:cyclicDFADeclaration()> 
+  strict private
+    FException: ERecognitionException;
+    FLA: array [0..<grammar.numberOfDecisions>, 0..255] of Integer;
+    FCnt: array [0..<grammar.numberOfDecisions>] of Byte;
+    procedure InitializeCyclicDFAs;
+<if(bitsets)>
+  public
+    class var
+      <bitsets:bitsetDecl(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>})>
+  public
+    class procedure InitializeBitsets; static;<\n>
+<endif>
+  end;
+
+implementation
+
+uses
+  <grammar.delegates: {g|<g.recognizerName>,}; separator="\n">
+  <grammar.delegators: {g|<g.recognizerName>,}; separator="\n">
+  <actions.(actionScope).usesImplementation>
+  SysUtils,
+  StrUtils,
+  Math;
+  
+{ T<grammar.recognizerName> }
+
+constructor T<grammar.recognizerName>.Create(const AInput: <inputStreamType><grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  Create(AInput, TRecognizerSharedState.Create<grammar.delegators:{g|, A<g:delegateName()>}>);  
+end;
+
+constructor T<grammar.recognizerName>.Create(const AInput: <inputStreamType>; 
+  const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  inherited Create(AInput, AState);
+  <@membersConstructor>
+  <@end>
+  <parserCtorBody()>
+  <grammar.directDelegates:{g|<g:delegateName()> := T<g.recognizerName>.Create(Input, State<trunc(g.delegators):{p|, <p:delegateName()>}>, Self);}; separator="\n">
+  <grammar.indirectDelegates:{g | <g:delegateName()> := <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+  <last(grammar.delegators):{g|gParent := Pointer(A<g:delegateName()>);}>
+  <rules: {r | <ruleAttributeScopeInit(scope=r.ruleDescriptor.ruleScope)>}>
+  <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>  
+  <actions.(actionScope).memberInitializations>
+end;
+<actions.(actionScope).memberImplementations>
+
+<grammar.delegatedRules:{ruleDescriptor| <delegatedRuleImplementation(ruleDescriptor)>}; separator="\n">
+procedure T<grammar.recognizerName>.InitializeCyclicDFAs;
+begin
+  <cyclicDFAs:{dfa | FDFA<dfa.decisionNumber> := TDFA<dfa.decisionNumber>.Create(Self);}; separator="\n">
+  <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>FDFA<dfa.decisionNumber>.SpecialStateTransitionHandler := DFA<dfa.decisionNumber>_SpecialStateTransition;<endif>}; separator="\n">
+end;
+
+<if(bitsets)>
+class procedure T<grammar.recognizerName>.InitializeBitsets;
+begin
+  <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>
+end;
+<endif>
+
+<@membersImplementation>
+ <@end>
+
+function T<grammar.recognizerName>.GetTokenNames: TStringArray;
+var
+  I: Integer;
+begin
+  SetLength(Result,Length(T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES));
+  for I := 0 to Length(T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES) - 1 do
+    Result[I] := T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES[I];
+end;
+
+function T<grammar.recognizerName>.GetGrammarFileName: String;
+begin
+  Result := '<fileName>';
+end;
+
+<rules; separator="\n\n">
+<synpreds:{p | <synpred(p)>}>
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+<if(bitsets)>
+initialization
+  T<grammar.recognizerName>.InitializeBitsets;<\n>
+<endif>
+end.>>
+
+delegatedRuleDeclaration(ruleDescriptor) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;<\n>
+<else>
+procedure <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);<\n>
+<endif>
+<endif>
+>>
+
+delegatedRuleImplementation(ruleDescriptor) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;<\n>
+<else>
+procedure T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);<\n>
+<endif>
+<endif>
+begin
+  <if(ruleDescriptor.hasReturnValue)>Result :=<endif> T<ruleDescriptor.grammar.recognizerName>(<ruleDescriptor.grammar:delegateName()>.Implementor).<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">);
+end;
+
+>>
+
+parserCtorBody() ::= <<
+InitializeCyclicDFAs;
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+State.RuleMemoCount := <length(grammar.allImportedRules)>+1;<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators: {g|<g:delegateName()> := Pointer(A<g:delegateName()>);}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start "<ruleName>"
+procedure T<grammar.recognizerName>.<ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
+var
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+  <ruleLabelDefVars()>
+begin
+  <ruleLabelDefs()>
+<if(trace)>
+  TraceIn('<ruleName>_fragment', <ruleDescriptor.index>);
+  try
+    <block>
+  finally
+    TraceOut('<ruleName>_fragment', <ruleDescriptor.index>);
+  end;
+<else>
+  <block>
+<endif>
+end;
+// $ANTLR end "<ruleName>"
+>>
+
+synpredDecls(name) ::= <<
+SynPredPointer <name>;<\n>
+>>
+
+synpred(name) ::= <<
+
+function T<grammar.recognizerName>.<name>: Boolean;
+var
+  Start: Integer;
+  Success: Boolean;
+begin
+  State.Backtracking := State.Backtracking + 1;
+  <@start()>
+  Start := Input.Mark;
+  try 
+    <name>_fragment(); // can never throw exception
+  except
+    on RE: ERecognitionException do
+      WriteLn('Impossible: ' + RE.ToString);
+  end;
+  Success := not State.Failed;
+  Input.Rewind(Start);
+  <@stop()>
+  State.Backtracking := State.Backtracking - 1;
+  State.Failed := False;
+  Result := Success;
+end;<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+lexerSynpredDeclaration(name) ::= <<
+function <name>: Boolean;
+procedure <name>_fragment;
+>>
+
+synpredDeclaration(name) ::= <<
+function <name>: Boolean;
+procedure <name>_fragment;
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ((State.Backtracking > 0) and AlreadyParsedRule(Input, <ruleDescriptor.index>)) then
+  Exit(<ruleReturnValue()>);
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)><\n>if (State.Failed) then Exit(<ruleReturnValue()>);<\n><endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (State.Backtracking > 0) then
+begin
+  State.Failed := True; 
+  Exit(<ruleReturnValue()>);
+end;<endif>
+>>
+
+genericParserRuleDeclaration(rule, ruleDescriptor) ::= <<
+<if(ruleDescriptor.isSynPred)>
+<else>
+<ruleAttributeScopeDeclaration(scope=ruleDescriptor.ruleScope)>
+<returnScopeDeclaration(scope=ruleDescriptor.returnScope)>
+public
+<if(ruleDescriptor.hasMultipleReturnValues)>
+  function <rule.ruleName>: I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+  function <rule.ruleName>: <returnType()>;<\n>
+<else>
+  procedure <rule.ruleName>;<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+genericParserRuleInterface(rule, ruleDescriptor) ::= <<
+<if(ruleDescriptor.isSynPred)>
+<else>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function <rule.ruleName>: I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function <rule.ruleName>: <returnType()>;<\n>
+<else>
+procedure <rule.ruleName>;<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+genericParserRuleReturnType(rule, ruleDescriptor) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(ruleDescriptor.isSynPred)>
+<else>
+I<returnType()> = interface(I<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)
+end;<\n>
+<endif>
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+(* <fileName>:<description> *)
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;
+<else>
+procedure T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
+<endif>
+<endif>
+
+var
+<ruleDescriptor.actions.vars>
+  Locals: TLocalStorage;
+<if(ruleDescriptor.hasMultipleReturnValues)>
+  RetVal: I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+  RetVal: <returnType()>;<\n>
+<else>
+<endif>
+<endif>
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+  <ruleDeclarationVars()>
+  <ruleLabelDefVars()>
+begin   
+  Locals.Initialize;
+  try
+    <if(trace)>TraceIn('<ruleName>', <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try 
+      try
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+<if(exceptions)>
+        <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+        <actions.(actionScope).rulecatch>
+<else>
+      except
+        on RE: ERecognitionException do
+        begin
+          ReportError(RE);
+          Recover(Input,RE);
+          <@setErrorReturnValue()>
+        end;<\n>
+<endif>
+<endif>
+<endif>
+      end;
+    finally 
+      <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+      <memoize()>
+      <ruleScopeCleanUp()>
+      <finally>
+    end;
+    <@postamble()>
+  finally
+    Locals.Finalize;
+  end;  
+  Exit(<ruleReturnValue()>);
+end;
+// $ANTLR end "<ruleName>"
+>>
+
+catch(decl,action) ::= << 
+catch (<e.decl>) 
+{
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+RetVal := T<returnType()>.Create;
+RetVal.Start := Input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name> := <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex := Input.Index();
+<endif>
+>>
+
+ruleDeclarationVars() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name>: <a.type>;
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex: Integer;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>Stack.Push(T<it>Scope.Create);}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>Stack.Push(T<it.name>Scope.Create);}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>Stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>Stack.Pop;}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]:{<it.label.text> := nil;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{list_<it.label.text> := nil;}; separator="\n">
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|<ll.label.text> := nil;}; separator="\n">
+>>
+
+ruleLabelDefVars() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]:{<it.label.text>: I<labelType>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{list_<it.label.text>: IList\<IANTLRInterface\>;}; separator="\n">
+<ruleDescriptor.ruleLabels:ruleLabelDefVar(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|<ll.label.text>: <ruleLabelType(referencedRule=ll.referencedRule)>;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text> := nil;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List_<it.label.text> := nil;}; separator="\n"
+>
+>>
+
+lexerRuleLabelDefDeclarations() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text>: I<labelType>;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List_<it.label.text>: IList;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+RetVal
+<endif>
+<else>
+<! nil !>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+RetVal.Stop := Input.LT(-1);
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if (State.Backtracking > 0) then
+  Memoize(Input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); 
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+<ruleDescriptor.parameterScope>
+procedure T<grammar.recognizerName>.m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
+var
+  <ruleDescriptor.actions.vars>
+  Locals: TLocalStorage;
+  TokenType, Channel: Integer;
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+  <lexerRuleLabelDefDeclarations()>
+begin
+  Locals.Initialize;
+  try
+    <ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try
+<if(nakedBlock)>
+      <ruleMemoization(name=ruleName)>
+      <lexerRuleLabelDefs()>
+      <ruleDescriptor.actions.init>
+      <block><\n>
+<else>
+      TokenType := <ruleName>;
+      Channel := DEFAULT_TOKEN_CHANNEL;
+      <ruleMemoization(name=ruleName)>
+      <lexerRuleLabelDefs()>
+      <ruleDescriptor.actions.init>
+      <block>
+      <ruleCleanUp()>
+      State.TokenType := TokenType;
+      State.Channel := Channel;
+      <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    finally 
+      <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+      <ruleScopeCleanUp()>
+      <memoize()>
+    end;
+  finally
+    Locals.Finalize;
+  end;
+end;
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+procedure T<grammar.recognizerName>.mTokens;
+var
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+begin
+  <block>
+end;
+
+procedure T<grammar.recognizerName>.DoTokens;
+begin
+  mTokens;
+end;
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+Alt[<decisionNumber>] := <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+case Alt[<decisionNumber>] of
+  <alts:altSwitchCase()>
+end;
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+Alt[<decisionNumber>] := <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+case Alt[<decisionNumber>] of
+  <alts:altSwitchCase()>
+end;
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+(* <fileName>:<description> *)
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+(* <fileName>:<description> *)
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+FCnt[<decisionNumber>] := 0;
+<decls>
+<@preloop()>
+while (True) do
+begin
+  Alt[<decisionNumber>] := <maxAlt>;
+  <@predecision()>
+  <decision>
+  <@postdecision()>
+  case Alt[<decisionNumber>] of
+    <alts:altSwitchCase()>
+  else
+    begin
+      if (FCnt[<decisionNumber>] >= 1) then
+        Break;
+      <ruleBacktrackFailure()> 
+      raise EEarlyExitException.Create(<decisionNumber>, Input);
+      <@earlyExitException()>
+    end;
+  end;
+  Inc(FCnt[<decisionNumber>]);
+end;
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+<decls>
+<@preloop()>
+while (True) do 
+begin
+  Alt[<decisionNumber>] := <maxAlt>;
+  <@predecision()>
+  <decision>
+  <@postdecision()>
+  case Alt[<decisionNumber>] of
+    <alts:altSwitchCase()>
+  else
+    Break;
+  end;
+end;
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+<i>:
+  <@prealt()>
+  <it><\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+(* <fileName>:<description> *)
+begin
+  <@declarations()>
+  <elements:element()>
+  <rew>
+  <@cleanup()>
+end;
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label> := <endif>Match(Input, <token>, FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)> as I<labelType><endif>;<\n><checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> = nil) then list_<label> := TList\<IANTLRInterface\>.Create;
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> := Input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> := Input.LA(1);<\n>
+<endif>
+MatchRange(<a>, <b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label> := Input.LA(1);<\n>
+<else>
+<label> := Input.LT(1) as I<labelType>;<\n>
+<endif>
+<endif>
+if (<s>) then
+begin
+  Input.Consume;
+  <postmatchCode>
+  <if(!LEXER)>
+  State.ErrorRecovery := False;<endif>
+  <if(backtracking)>State.Failed := False;<endif>
+end
+else 
+begin
+  <ruleBacktrackFailure()>
+  FException := EMismatchedSetException.Create(nil, Input);  
+  <@mismatchedSetException()>
+<if(LEXER)>
+  Recover(FException);
+  raise FException;<\n>
+<else>
+  raise FException;
+  <! use following code to make it recover inline; remove throw mse;
+  RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+  !>
+<endif>
+end;<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+Locals.AsInteger['<label>Start'] := CharIndex;
+Match(<string>); <checkRuleBacktrackFailure()>
+<label> := TCommonToken.Create(Input, TToken.INVALID_TOKEN_TYPE, TToken.DEFAULT_CHANNEL, Locals.AsInteger['<label>Start'], CharIndex-1);
+<else>
+Match(<string>); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> := Input.LT(1) as I<labelType>;<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> := Input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)>
+<label> := <if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+<else>
+<if(scope)>T<scope.recognizerName>(IANTLRObject(<scope:delegateName()>).Implementor).<endif><rule.name>(<args; separator=", ">);<\n>
+<endif>
+State.FollowingStackPointer := State.FollowingStackPointer - 1;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+Locals.AsInteger['<label>Start<elementIndex>'] := CharIndex;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> := TCommonToken.Create(Input, TToken.INVALID_TOKEN_TYPE, TToken.DEFAULT_CHANNEL, 
+  Locals.AsInteger['<label>Start<elementIndex>'], CharIndex - 1);
+<else>
+<if(scope)>(<scope:delegateName()>.Implementor as T<scope.recognizerName>).<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+Locals.AsInteger['<label>Start<elementIndex>'] := CharIndex;
+Match(EOF); <checkRuleBacktrackFailure()>
+Locals['<label>'] := TCommonToken.Create(Input, EOF, TToken.DEFAULT_CHANNEL, Locals.AsInteger['<label>Start<elementIndex>'], CharIndex-1);
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if (Input.LA(1) = TToken.DOWN) then
+begin
+  Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
+  <children:element()>
+  Match(Input, TToken.UP, nil); <checkRuleBacktrackFailure()>
+end;
+<else>
+Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(Input, TToken.UP, nil);<\n><checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if (not (<evalPredicate(...)>)) then
+begin
+  <ruleBacktrackFailure()>
+  raise EFailedPredicateException.Create(Input, '<ruleName>', '<description>');
+end;<\n>
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else 
+begin
+<if(eotPredictsAlt)>
+  Alt[<decisionNumber>] := <eotPredictsAlt>;<\n>
+<else>
+  <ruleBacktrackFailure()>
+  raise ENoViableAltException.Create('<description>', <decisionNumber>, <stateNumber>, Input);<\n>
+<endif>
+end;
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);<\n>
+<edges; separator="\nelse ">;
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);
+<edges; separator="\nelse ">;<\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+Alt[<decisionNumber>] := <eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else 
+begin
+  Alt[<decisionNumber>] := <eotPredictsAlt>;
+end;<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "Alt[<decisionNumber>] := <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> and (<predicates>)<endif>) then
+begin
+  <targetState>
+end <! no ; here !>
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case Input.LA(<k>) of
+  <edges; separator="\n">
+else
+  begin
+<if(eotPredictsAlt)>
+    Alt[<decisionNumber>] := <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    <@noViableAltException()>
+    raise ENoViableAltException.Create('<description>', <decisionNumber>, <stateNumber>, Input);<\n>
+<endif>
+  end;
+end;<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case Input.LA(<k>) of
+  <edges; separator="\n">
+end;<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case Input.LA(<k>) of
+  <edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+else
+  Alt[<decisionNumber>] := <eotPredictsAlt>;<\n>
+<endif>
+end;<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{<it>}; separator=",\n">:
+  begin
+    <targetState>
+  end;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+Alt[<decisionNumber>] := FDFA<decisionNumber>.Predict(Input);
+>>
+
+/* Dump DFA tables.
+ */
+cyclicDFADeclaration(dfa) ::= <<
+strict protected
+  type
+    TDFA<dfa.decisionNumber> = class(TDFA)
+    protected
+      { IDFA }
+      function Description: String; override;
+    public
+      constructor Create(const ARecognizer: IBaseRecognizer);
+    end;
+  var
+    FDFA<dfa.decisionNumber>: IDFA;
+<if(dfa.specialStateSTs)>
+strict protected
+  function DFA<dfa.decisionNumber>_SpecialStateTransition(const DFA: IDFA; S: Integer;
+    const AInput: IIntStream): Integer;<endif>
+>>
+
+cyclicDFA(dfa) ::= <<
+{ T<grammar.recognizerName>.TDFA<dfa.decisionNumber> }
+
+constructor T<grammar.recognizerName>.TDFA<dfa.decisionNumber>.Create(const ARecognizer: IBaseRecognizer);
+const
+  DFA<dfa.decisionNumber>_EOT = '<dfa.javaCompressedEOT; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_EOF = '<dfa.javaCompressedEOF; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_MIN = '<dfa.javaCompressedMin; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_MAX = '<dfa.javaCompressedMax; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_ACCEPT = '<dfa.javaCompressedAccept; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_SPECIAL = '<dfa.javaCompressedSpecial; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_TRANSITION: array [0..<length(dfa.javaCompressedTransition)>-1] of String = (
+    <dfa.javaCompressedTransition:{s|'<s; wrap="'+\n'">'}; separator=",\n">);
+begin
+  inherited Create;
+  Recognizer := ARecognizer;
+  DecisionNumber := <dfa.decisionNumber>;
+  EOT := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_EOT);
+  EOF := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_EOF);
+  Min := TDFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_MIN);
+  Max := TDFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_MAX);
+  Accept := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_ACCEPT);
+  Special := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_SPECIAL);
+  Transition := TDFA.UnpackEncodedStringArray(DFA<dfa.decisionNumber>_TRANSITION);
+end;
+
+function T<grammar.recognizerName>.TDFA<dfa.decisionNumber>.Description: String;
+begin
+  Result := '<dfa.description>';
+end;<\n>
+<if(dfa.specialStateSTs)>
+function T<grammar.recognizerName>.DFA<dfa.decisionNumber>_SpecialStateTransition(const DFA: IDFA; S: Integer;
+  const AInput: IIntStream): Integer;
+var
+  Locals: TLocalStorage;
+  <if(LEXER)>
+  Input: IIntStream;
+  <endif>
+  <if(PARSER)>
+  Input: ITokenStream;
+  <endif>
+  <if(TREE_PARSER)>
+  Input: ITreeNodeStream;
+  <endif>
+  _S: Integer;
+  NVAE: ENoViableAltException;
+begin
+  Result := -1;
+  Locals.Initialize;
+  try
+    <if(LEXER)>
+    Input := AInput;
+    <endif>
+    <if(PARSER)>
+    Input := AInput as ITokenStream;
+    <endif>
+    <if(TREE_PARSER)>
+    Input := AInput as ITreeNodeStream;
+    <endif>
+    _S := S;
+    case S of
+      <dfa.specialStateSTs:{state | <i0>: begin<! compressed special state numbers 0..n-1 !>
+     <state> <\n>   end;}; separator="\n">
+    end;
+<if(backtracking)>
+    if (State.Backtracking > 0) then
+    begin
+      State.Failed := True; 
+      Exit(-1);
+    end;<\n>
+<endif>
+    NVAE := ENoViableAltException.Create(DFA.Description, <dfa.decisionNumber>, _S, Input);
+    DFA.Error(NVAE);
+    raise NVAE;
+  finally
+    Locals.Finalize;
+  end;
+end;<\n>
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+Locals.AsInteger['index<decisionNumber>_<stateNumber>'] := Input.Index;
+Input.Rewind;<\n>
+<endif>
+S := -1;
+<edges; separator="\nelse ">;
+<if(semPredState)> <! return input cursor to state before we rewound !>
+Input.Seek(Locals.AsInteger['index<decisionNumber>_<stateNumber>']);<\n>
+<endif>
+if (S >= 0) then
+  Exit(S);
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> and (<predicates>)<endif>) then
+  S := <targetStateNumber>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+S := <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "((<left>) and (<right>))"
+
+orPredicates(operands) ::= "((<first(operands)>)<rest(operands):{o | or (<o>)}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "FLA[<decisionNumber>,<stateNumber>] = <atomAsInt>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "Input.LA(<k>) = <atomAsInt>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+((FLA[<decisionNumber>,<stateNumber>] \>= <lowerAsInt>) and (FLA[<decisionNumber>,<stateNumber>] \<= <upperAsInt>))
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(Input.LA(<k>) \>= <lowerAsInt>) and (Input.LA(<k>) \<= <upperAsInt>)"
+
+setTest(ranges) ::= "<ranges; separator=\") or (\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<scope.name>Stack := TStackList\<I<scope.name>Scope\>.Create;<\n>
+<endif>
+>>
+
+globalAttributeScopeDeclaration(scope) ::= <<
+<if(scope.attributes)>
+strict protected
+  type
+    I<scope.name>Scope = interface(IANTLRObject)
+    end;
+    T<scope.name>Scope = class(TANTLRObject, I<scope.name>Scope)
+    protected
+      <scope.attributes:{<it.name>: <it.type>;}; separator="\n">   
+    end;
+strict protected
+  <scope.name>Stack: IStackList\<I<scope.name>Scope\>;
+<endif>
+>>
+
+ruleAttributeScopeDeclaration(scope) ::= <<
+<if(scope.attributes)>
+strict protected
+  type
+    I<scope.name>Scope = interface(IANTLRObject)
+    end;
+    T<scope.name>Scope = class(TANTLRObject, I<scope.name>Scope)
+    protected
+      <scope.attributes:{<it.name>: <it.type>;}; separator="\n">
+    end;
+strict protected
+  <scope.name>Stack: IStackList\<I<scope.name>Scope\>;    
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<! protected Stack <scope.name>Stack = new Stack();<\n> !>
+>>
+
+ruleAttributeScopeInit(scope) ::= <<
+<if(scope)>
+<scope.name>Stack := TStackList\<I<scope.name>Scope\>.Create;<\n>
+<endif>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor:returnStructName()>
+<! I<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope !>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+<! Pointer/void !>
+<endif>
+<endif>
+>>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+I<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<csharpTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<label.label.text> := <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+ruleLabelDefVar(label) ::= <<
+<label.label.text>: <ruleLabelType(referencedRule=label.referencedRule)>;
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+{ T<ruleDescriptor:returnStructName()> }
+
+<scope.attributes:{public <it.decl>;}; separator="\n">
+<@ruleReturnMembers()>
+<endif>
+>>
+
+returnScopeDeclaration(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public
+  type
+    T<ruleDescriptor:returnStructName()> = class(T<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope, I<ruleDescriptor:returnStructName()>)
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+    end;
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> := <expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+(<scope>Stack[<scope>Stack.Count-<negIndex>-1] as T<scope>Scope).<attr.name>
+<else>
+<if(index)>
+(<scope>Stack[<index>] as T<scope>Scope).<attr.name>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name>
+<else>
+(<scope>Stack.Peek.Implementor as T<scope>Scope).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+(<scope>Stack[<scope>Stack.Count-<negIndex>-1] as T<scope>Scope).<attr.name> := <expr>;<\n>
+<else>
+<if(index)>
+(<scope>Stack[<index>] as T<scope>Scope).<attr.name> := <expr>;<\n>
+<else>
+(<scope>Stack.Peek.Implementor as T<scope>Scope).<attr.name> := <expr>;<\n>
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>Stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(IfThen(Assigned(<scope>),Def(<scope>).<attr.name>,<initValue(attr.type)>))
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+RetVal.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+RetVal.<attr.name> := <expr>;
+<else>
+<attr.name> := <expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(Def(<scope>).Text)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(Def(<scope>).TokenType)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(Def(<scope>).Line)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(Def(<scope>).CharPositionInLine)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(Def(<scope>).Channel)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(Def(<scope>).TokenIndex)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(StrToIntDef(Def(<scope>).Text,0))"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(IfThen(Assigned(<scope>), Def(<scope>).Start, nil) as I<labelType>)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(Def(<scope>).Stop as I<labelType>)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(Def(Def(<scope>).Tree as I<ASTLabelType>))"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+IfThen(Assigned(<scope>), Input.TokenStream.ToString(
+  Input.TreeAdaptor.GetTokenStartIndex(Def(<scope>).Start),
+  Input.TreeAdaptor.GetTokenStopIndex(Def(<scope>).Start)), '')
+<else>
+IfThen(Assigned(<scope>), Input.ToString(
+  (Def(<scope>).Start) as IToken,(Def(<scope>).Stop) as IToken), '')
+<endif>
+>>
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> != null) ? <scope>.ST : null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(Def(<scope>).TokenType)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(Def(<scope>).Line)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(IfThen(Assigned(<scope>),Def(<scope>).CharPositionInLine,-1))"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(Def(<scope>).Channel)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(Def(<scope>).TokenIndex)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(Def(<scope>).Text)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "(StrToIntDef(Def(<scope>).Text,0))"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "(RetVal.Start as I<labelType>)"
+rulePropertyRef_stop(scope,attr) ::= "(RetVal.Stop as I<labelType>)"
+rulePropertyRef_tree(scope,attr) ::= "(RetVal.Tree as I<ASTLabelType>)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+Input.TokenStream.ToString(
+  Input.TreeAdaptor.GetTokenStartIndex(RetVal.Start),
+  Input.TreeAdaptor.GetTokenStopIndex(RetVal.Start))
+<else>
+Input.ToString(RetVal.Start as IToken,Input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "RetVal.ST"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "TokenType"
+lexerRulePropertyRef_line(scope,attr) ::= "State.TokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "State.TokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "Channel"
+lexerRulePropertyRef_start(scope,attr) ::= "State.TokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "StrToInt(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "RetVal.Tree := <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "RetVal.ST := <expr>;"
+
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if (<actions.(actionScope).synpredgate>) then
+begin
+  <action>
+end;
+<else>
+if (State.Backtracking = 0) then
+begin
+  <action>
+end;<\n>
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+<name> := TBitSet.Create([<words64:{<it>};separator=",">]);<\n>
+>>
+
+bitsetDecl(name) ::= <<
+<name>: IBitSet;<\n>
+>>
+
+codeFileExtension() ::= ".pas"
+
+true() ::= "True"
+false() ::= "False"
diff --git a/src/org/antlr/codegen/templates/Java/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
similarity index 56%
rename from src/org/antlr/codegen/templates/Java/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
index e01b59d..8a2b6dc 100644
--- a/src/org/antlr/codegen/templates/Java/AST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
@@ -44,6 +44,7 @@ parserMembers() ::= <<
 protected TreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
 public void setTreeAdaptor(TreeAdaptor adaptor) {
     this.adaptor = adaptor;
+    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
 }
 public TreeAdaptor getTreeAdaptor() {
     return adaptor;
@@ -63,28 +64,15 @@ ruleDeclarations() ::= <<
 
 ruleLabelDefs() ::= <<
 <super.ruleLabelDefs()>
-<ruleDescriptor.tokenLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
 <ruleDescriptor.tokenListLabels:{<ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
 <ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{RewriteRuleTokenStream stream_<it>=new RewriteRuleTokenStream(adaptor,"token <it>");}; separator="\n">
+    :{RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
 <ruleDescriptor.allRuleRefsInAltsWithRewrites
     :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
 >>
 
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(backtracking)>
-if ( backtracking==0 ) {<\n>
-<endif>
-    retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
-    adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-<if(backtracking)>
-}
-<endif>
-<endif>
->>
-
 /** When doing auto AST construction, we must define some variables;
  *  These should be turned off if doing rewrites.  This must be a "mode"
  *  as a rule could have both rewrite and AST within the same alternative
@@ -93,162 +81,65 @@ if ( backtracking==0 ) {<\n>
 @alt.declarations() ::= <<
 <if(autoAST)>
 <if(outerAlt)>
+<if(!rewriteMode)>
 root_0 = (<ASTLabelType>)adaptor.nil();<\n>
 <endif>
 <endif>
+<endif>
 >>
 
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
-adaptor.addChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
-root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
+// T r a c k i n g  R u l e  E l e m e n t s
 
-/** ID but track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>stream_<token>.add(<label>);<\n>
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
 >>
 
 /** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
  *  to the tracking list stream_ID for use in the rewrite.
  */
-tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRefTrack(...)>
 <listLabel(elem=label,...)>
 >>
 
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.addChild(root_0, adaptor.create(<label>));})>
->>
-
-matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(adaptor.create(<label>), root_0);})>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>adaptor.addChild(root_0, <label>.getTree());
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
 >>
 
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_0);
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
 >>
 
 /** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args) ::= <<
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
 <super.ruleRef(...)>
-<if(backtracking)>if ( backtracking==0 ) <endif>stream_<rule>.add(<label>.getTree());
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.getTree());
 >>
 
 /** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label+".getTree()",...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefBang(...)>
 <listLabel(elem=label+".getTree()",...)>
 >>
 
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
 <ruleRefRuleRoot(...)>
-<listLabel(elem=label+".getTree()",...)>
->>
-
-// WILDCARD AST
-
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
-adaptor.addChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.getTree());
 >>
 
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( backtracking==0 ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
-root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".getTree()",...)>
 >>
 
-// TODO: ugh, am i really missing the combinations for Track and ListLabel?
-// there's got to be a better way
-
 // R e w r i t e
 
 rewriteCode(
@@ -258,6 +149,8 @@ rewriteCode(
 	referencedTokenListLabels,
 	referencedRuleLabels,
 	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
 	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
 <<
 
@@ -267,13 +160,32 @@ rewriteCode(
 // rule labels: <referencedRuleLabels; separator=", ">
 // token list labels: <referencedTokenListLabels; separator=", ">
 // rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
 <if(backtracking)>
-if ( backtracking==0 ) {<\n>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
 <endif>
 <prevRuleRootRef()>.tree = root_0;
 <rewriteCodeLabels()>
 root_0 = (<ASTLabelType>)adaptor.nil();
 <alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+input.replaceChildren(adaptor.getParent(retval.start),
+                      adaptor.getChildIndex(retval.start),
+                      adaptor.getChildIndex(_last),
+                      retval.tree);
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;
+<endif>
+<endif>
 <if(backtracking)>
 }
 <endif>
@@ -281,15 +193,23 @@ root_0 = (<ASTLabelType>)adaptor.nil();
 
 rewriteCodeLabels() ::= <<
 <referencedTokenLabels
-    :{RewriteRuleTokenStream stream_<it>=new RewriteRuleTokenStream(adaptor,"token <it>",<it>);};
+    :{RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
     separator="\n"
 >
 <referencedTokenListLabels
-    :{RewriteRuleTokenStream stream_<it>=new RewriteRuleTokenStream(adaptor,"token <it>", list_<it>);};
+    :{RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
     separator="\n"
 >
 <referencedRuleLabels
-    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",<it>!=null?<it>.tree:null);};
+    :{RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.tree:null);};
     separator="\n"
 >
 <referencedRuleListLabels
@@ -376,39 +296,39 @@ rewriteElement(e) ::= <<
 >>
 
 /** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,args) ::= <<
-adaptor.addChild(root_<treeLevel>, <if(args)>adaptor.create(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>);<\n>
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
 >>
 
 /** Gen $label ... where defined via label=ID */
 rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.next());<\n>
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
 >>
 
 /** Gen $label ... where defined via label+=ID */
 rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.next());<\n>
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
 >>
 
 /** Gen ^($label ...) */
 rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.next(), root_<treeLevel>);<\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
 >>
 
 /** Gen ^($label ...) where label+=... */
 rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
 
 /** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,args) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<if(args)>adaptor.create(<token>,<args; separator=", ">)<else>stream_<token>.next()<endif>, root_<treeLevel>);<\n>
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
 >>
 
-rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, adaptor.create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>));<\n>
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
 >>
 
-rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(adaptor.create(<token>, <args; separator=", "><if(!args)>"<token>"<endif>), root_<treeLevel>);<\n>
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
 >>
 
 /** plain -> {foo} action */
@@ -424,7 +344,7 @@ root_0 = <action>;<\n>
 prevRuleRootRef() ::= "retval"
 
 rewriteRuleRef(rule) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<rule>.next());<\n>
+adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());<\n>
 >>
 
 rewriteRuleRefRoot(rule) ::= <<
@@ -441,12 +361,12 @@ root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<action>, root_<treeLevel>
 
 /** Gen $ruleLabel ... where defined via ruleLabel=rule */
 rewriteRuleLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.next());<\n>
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
 >>
 
 /** Gen $ruleLabel ... where defined via ruleLabel+=rule */
 rewriteRuleListLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, ((<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)stream_<label>.next()).getTree());<\n>
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
 >>
 
 /** Gen ^($ruleLabel ...) where ruleLabel=rule */
@@ -458,3 +378,29 @@ root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(),
 rewriteRuleListLabelRefRoot(label) ::= <<
 root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
 >>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/Java/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
similarity index 67%
copy from src/org/antlr/codegen/templates/Java/ASTDbg.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
index 3abe396..b462e29 100644
--- a/src/org/antlr/codegen/templates/Java/ASTDbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
@@ -32,21 +32,29 @@
 group ASTDbg;
 
 parserMembers() ::= <<
-protected DebugTreeAdaptor adaptor =
-	  new DebugTreeAdaptor(null,new CommonTreeAdaptor());
+protected DebugTreeAdaptor adaptor;
 public void setTreeAdaptor(TreeAdaptor adaptor) {
+<if(grammar.grammarIsRoot)>
     this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+<else>
+    this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor 
+<endif><\n>
+    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
 }
 public TreeAdaptor getTreeAdaptor() {
     return adaptor;
 }<\n>
 >>
 
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
 createListenerAndHandshake() ::= <<
 DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this, port, adaptor);
+    new DebugEventSocketProxy(this,port,<if(TREE_PARSER)>input.getTreeAdaptor()<else>adaptor<endif>);
 setDebugListener(proxy);
-adaptor.setDebugEventListener(proxy);
+set<inputStreamType>(new Debug<inputStreamType>(input,proxy));
 try {
     proxy.handshake();
 }
@@ -55,11 +63,25 @@ catch (IOException ioe) {
 }
 >>
 
-ctorForPredefinedListener() ::= <<
-public <name>(<inputStreamType> input, DebugEventListener dbg) {
-    super(input, dbg);
-    adaptor.setDebugEventListener(dbg);
-}<\n>
+ at ctorForRootGrammar.finally() ::= <<
+TreeAdaptor adap = new CommonTreeAdaptor();
+setTreeAdaptor(adap);
+proxy.setTreeAdaptor(adap);
+>>
+
+ at ctorForProfilingRootGrammar.finally() ::=<<
+TreeAdaptor adap = new CommonTreeAdaptor();
+setTreeAdaptor(adap);
+proxy.setTreeAdaptor(adap);
+>>
+
+ at ctorForPredefinedListener.superClassRef() ::= "super(input, dbg);"
+
+ at ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
+TreeAdaptor adap = new CommonTreeAdaptor();
+setTreeAdaptor(adap);<\n>
+<endif>
 >>
 
 @rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg
new file mode 100644
index 0000000..7585696
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg
@@ -0,0 +1,190 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+retval.tree = (<ASTLabelType>)adaptor.errorNode(input, retval.start, input.LT(-1), re);
+<! System.out.println("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<createNodeFromToken(...)>, root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <label>.getTree());
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)adaptor.create(<label>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg
new file mode 100644
index 0000000..25cd89c
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg
@@ -0,0 +1,296 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> _first_0 = null;
+<ASTLabelType> _last = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = (<ASTLabelType>)_first_0;
+if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
+    retval.tree = (<ASTLabelType>)adaptor.getParent(retval.tree);
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = null;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.dupTree(<label>);
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<treeLevel>, <label>.getTree());
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_<treeLevel>);
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/Java/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
similarity index 60%
rename from src/org/antlr/codegen/templates/Java/Dbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
index 9633c69..908224b 100644
--- a/src/org/antlr/codegen/templates/Java/Dbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
@@ -37,38 +37,38 @@ import java.io.IOException;
 >>
 
 @genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
 public static final String[] ruleNames = new String[] {
-    "invalidRule", <rules:{rST | "<rST.ruleName>"}; wrap="\n    ", separator=", ">
+    "invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n    ", separator=", ">
 };<\n>
-public int ruleLevel = 0;
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-public <name>(<inputStreamType> input, int port) {
+<endif>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+    public int ruleLevel = 0;
+    public int getRuleLevel() { return ruleLevel; }
+    public void incRuleLevel() { ruleLevel++; }
+    public void decRuleLevel() { ruleLevel--; }
 <if(profile)>
-        this(input, new Profiler(null));
-        Profiler p = (Profiler)dbg;
-        p.setParser(this);
+    <ctorForProfilingRootGrammar()>
 <else>
-        super(input, port);
-        <createListenerAndHandshake()>
-<endif><\n>
-<if(memoize)>
-        ruleMemo = new Map[<numRules>+1];<\n><! index from 1..n !>
+    <ctorForRootGrammar()>
 <endif>
-}
-public <name>(<inputStreamType> input) {
-    this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT);
-}
 <ctorForPredefinedListener()>
+<else> <! imported grammar !>
+    public int getRuleLevel() { return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel(); }
+    public void incRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel(); }
+    public void decRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel(); }
+    <ctorForDelegateGrammar()>
+<endif>
 <if(profile)>
 public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-    ((Profiler)dbg).examineRuleMemoization(input, ruleIndex, ruleNames[ruleIndex]);
+    ((Profiler)dbg).examineRuleMemoization(input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
     return super.alreadyParsedRule(input, ruleIndex);
 }<\n>
 public void memoize(IntStream input,
                     int ruleIndex,
                     int ruleStartIndex)
 {
-    ((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, ruleNames[ruleIndex]);
+    ((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
     super.memoize(input, ruleIndex, ruleStartIndex);
 }<\n>
 <endif>
@@ -78,13 +78,68 @@ protected boolean evalPredicate(boolean result, String predicate) {
 }<\n>
 >>
 
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+public <name>(<inputStreamType> input) {
+    this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, int port, RecognizerSharedState state) {
+    super(input, state);
+    <parserCtorBody()>
+    <createListenerAndHandshake()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+    this(input, new Profiler(null), new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, DebugEventListener dbg, RecognizerSharedState state) {
+    super(input, dbg, state);
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+public <name>(<inputStreamType> input, DebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+    super(input, dbg, state);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+    <@superClassRef>super(input, dbg, new RecognizerSharedState());<@end>
+<if(profile)>
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+<endif>
+    <parserCtorBody()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+>>
+
 createListenerAndHandshake() ::= <<
 <if(TREE_PARSER)>
 DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this, port, input.getTreeAdaptor());
+    new DebugEventSocketProxy(this, port, input.getTreeAdaptor());<\n>
 <else>
 DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this, port, null);
+    new DebugEventSocketProxy(this, port, null);<\n>
 <endif>
 setDebugListener(proxy);
 try {
@@ -95,18 +150,12 @@ catch (IOException ioe) {
 }
 >>
 
-ctorForPredefinedListener() ::= <<
-public <name>(<inputStreamType> input, DebugEventListener dbg) {
-    super(input, dbg);
-}<\n>
->>
-
 @genericParser.superClassName() ::= "Debug<@super.superClassName()>"
 
 @rule.preamble() ::= <<
-try { dbg.enterRule("<ruleName>");
-if ( ruleLevel==0 ) {dbg.commence();}
-ruleLevel++;
+try { dbg.enterRule(getGrammarFileName(), "<ruleName>");
+if ( getRuleLevel()==0 ) {dbg.commence();}
+incRuleLevel();
 dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
 >>
 
@@ -114,15 +163,15 @@ dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>);<\n>
 dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>);<\n>
 }
 finally {
-    dbg.exitRule("<ruleName>");
-    ruleLevel--;
-    if ( ruleLevel==0 ) {dbg.terminate();}
+    dbg.exitRule(getGrammarFileName(), "<ruleName>");
+    decRuleLevel();
+    if ( getRuleLevel()==0 ) {dbg.terminate();}
 }<\n>
 >>
 
- at synpred.start() ::= "dbg.beginBacktrack(backtracking);"
+ at synpred.start() ::= "dbg.beginBacktrack(state.backtracking);"
 
- at synpred.stop() ::= "dbg.endBacktrack(backtracking, success);"
+ at synpred.stop() ::= "dbg.endBacktrack(state.backtracking, success);"
 
 // Common debug event triggers used by region overrides below
 
diff --git a/src/org/antlr/codegen/templates/Java/Java.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
similarity index 73%
copy from src/org/antlr/codegen/templates/Java/Java.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
index 92f6b75..0a6d9a0 100644
--- a/src/org/antlr/codegen/templates/Java/Java.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
@@ -30,7 +30,7 @@ group Java implements ANTLRCore;
 javaTypeInitMap ::= [
 	"int":"0",
 	"long":"0",
-	"float":"0.0",
+	"float":"0.0f",
 	"double":"0.0",
 	"boolean":"false",
 	"byte":"0",
@@ -45,7 +45,7 @@ javaTypeInitMap ::= [
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass, literals) ::=
@@ -71,18 +71,37 @@ import java.util.HashMap;
 <recognizer>
 >>
 
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-public class <name> extends Lexer {
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="CommonToken",
+      filterMode, superClass="Lexer") ::= <<
+public class <grammar.recognizerName> extends <@superClassName><superClass><@end> {
     <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
     <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
     <actions.lexer.members>
-    public <name>() {;} <! needed by subclasses !>
-    public <name>(CharStream input) {
-        super(input);
-<if(backtracking)>
-        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    public <grammar.recognizerName>() {;} <! needed by subclasses !>
+    public <grammar.recognizerName>(CharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>);
+    }
+    public <grammar.recognizerName>(CharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+        super(input,state);
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        state.ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+<endif>
 <endif>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+        <grammar.delegators:
+         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
     }
     public String getGrammarFileName() { return "<fileName>"; }
 
@@ -111,27 +130,27 @@ public Token nextToken() {
         if ( input.LA(1)==CharStream.EOF ) {
             return Token.EOF_TOKEN;
         }
-        token = null;
-	channel = Token.DEFAULT_CHANNEL;
-        tokenStartCharIndex = input.index();
-        tokenStartCharPositionInLine = input.getCharPositionInLine();
-        tokenStartLine = input.getLine();
-	text = null;
+        state.token = null;
+	state.channel = Token.DEFAULT_CHANNEL;
+        state.tokenStartCharIndex = input.index();
+        state.tokenStartCharPositionInLine = input.getCharPositionInLine();
+        state.tokenStartLine = input.getLine();
+	state.text = null;
         try {
             int m = input.mark();
-            backtracking=1; <! means we won't throw slow exception !>
-            failed=false;
+            state.backtracking=1; <! means we won't throw slow exception !>
+            state.failed=false;
             mTokens();
-            backtracking=0;
+            state.backtracking=0;
             <! mTokens backtracks with synpred at backtracking==2
                and we set the synpredgate to allow actions at level 1. !>
-            if ( failed ) {
+            if ( state.failed ) {
                 input.rewind(m);
                 input.consume(); <! advance one char and try again !>
             }
             else {
                 emit();
-                return token;
+                return state.token;
             }
         }
         catch (RecognitionException re) {
@@ -146,44 +165,69 @@ public void memoize(IntStream input,
 		int ruleIndex,
 		int ruleStartIndex)
 {
-if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+if ( state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
 }
 
 public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+if ( state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
 return false;
 }
 >>
 
-filteringActionGate() ::= "backtracking==1"
+actionGate() ::= "state.backtracking==0"
+
+filteringActionGate() ::= "state.backtracking==1"
 
 /** How to generate a parser */
 genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
               bitsets, inputStreamType, superClass,
-              ASTLabelType="Object", labelType, members) ::= <<
-public class <name> extends <@superClassName><superClass><@end> {
+              ASTLabelType="Object", labelType, members, rewriteElementType,
+              filterMode) ::= <<
+public class <grammar.recognizerName> extends <@superClassName><superClass><@end> {
+<if(grammar.grammarIsRoot)>
     public static final String[] tokenNames = new String[] {
         "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-    };
+    };<\n>
+<endif>
     <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
+
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
     <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
     <@members>
-   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-    public <name>(<inputStreamType> input) {
-        super(input);
-<if(backtracking)>
-        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
+    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+    public <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>);
+    }
+    public <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+        super(input, state);
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">         
+        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
     }
     <@end>
 
-    public String[] getTokenNames() { return tokenNames; }
+    public String[] getTokenNames() { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
     public String getGrammarFileName() { return "<fileName>"; }
 
     <members>
 
     <rules; separator="\n\n">
 
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+    public <returnType()> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException \{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}; separator="\n">
+
     <synpreds:{p | <synpred(p)>}>
 
     <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
@@ -194,15 +238,31 @@ public class <name> extends <@superClassName><superClass><@end> {
 }
 >>
 
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", ...)>
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new HashMap[<length(grammar.allImportedRules)>+1];<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="Object", superClass="Parser", labelType="Token",
+       members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
 >>
 
 /** How to generate a tree parser; same as parser except the input
  *  stream is a different type.
  */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", ...)>
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object",
+           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+           members={<actions.treeparser.members>},
+           filterMode) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
 >>
 
 /** A simpler version of a rule template that is specific to the imaginary
@@ -216,6 +276,7 @@ synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 <<
 // $ANTLR start <ruleName>
 public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {   
+    <ruleLabelDefs()>
 <if(trace)>
     traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
     try {
@@ -233,7 +294,7 @@ public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterSc
 
 synpred(name) ::= <<
 public final boolean <name>() {
-    backtracking++;
+    state.backtracking++;
     <@start()>
     int start = input.mark();
     try {
@@ -241,11 +302,11 @@ public final boolean <name>() {
     } catch (RecognitionException re) {
         System.err.println("impossible: "+re);
     }
-    boolean success = !failed;
+    boolean success = !state.failed;
     input.rewind(start);
     <@stop()>
-    backtracking--;
-    failed=false;
+    state.backtracking--;
+    state.failed=false;
     return success;
 }<\n>
 >>
@@ -256,18 +317,18 @@ lexerSynpred(name) ::= <<
 
 ruleMemoization(name) ::= <<
 <if(memoize)>
-if ( backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+if ( state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
 <endif>
 >>
 
 /** How to test for failure and return from rule */
 checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+<if(backtracking)>if (state.failed) return <ruleReturnValue()>;<endif>
 >>
 
 /** This rule has failed, exit indicating failure during backtrack */
 ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (backtracking>0) {failed=true; return <ruleReturnValue()>;}<endif>
+<if(backtracking)>if (state.backtracking>0) {state.failed=true; return <ruleReturnValue()>;}<endif>
 >>
 
 /** How to generate code for a rule.  This includes any return type
@@ -277,7 +338,7 @@ rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memo
 <ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
 <returnScope(scope=ruleDescriptor.returnScope)>
 
-// $ANTLR start <ruleName>
+// $ANTLR start "<ruleName>"
 // <fileName>:<description>
 public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
     <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
@@ -302,6 +363,7 @@ public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterS
     catch (RecognitionException re) {
         reportError(re);
         recover(input,re);
+	<@setErrorReturnValue()>
     }<\n>
 <endif>
 <endif>
@@ -315,7 +377,7 @@ public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterS
     <@postamble()>
     return <ruleReturnValue()>;
 }
-// $ANTLR end <ruleName>
+// $ANTLR end "<ruleName>"
 >>
 
 catch(decl,action) ::= <<
@@ -348,11 +410,13 @@ ruleScopeCleanUp() ::= <<
 <ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
 >>
 
+
 ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
     :{<labelType> <it.label.text>=null;}; separator="\n"
 >
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
     :{List list_<it.label.text>=null;}; separator="\n"
 >
 <ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
@@ -367,7 +431,6 @@ lexerRuleLabelDefs() ::= <<
 >
 <ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
 <[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
   ruleDescriptor.ruleListLabels]
     :{List list_<it.label.text>=null;}; separator="\n"
 >
@@ -396,7 +459,7 @@ retval.stop = input.LT(-1);<\n>
 memoize() ::= <<
 <if(memoize)>
 <if(backtracking)>
-if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+if ( state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
 <endif>
 <endif>
 >>
@@ -405,7 +468,7 @@ if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.n
  *  fragment rules.
  */
 lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start <ruleName>
+// $ANTLR start "<ruleName>"
 public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
     <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
     <ruleScopeSetUp()>
@@ -418,12 +481,14 @@ public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scop
         <block><\n>
 <else>
         int _type = <ruleName>;
+        int _channel = DEFAULT_TOKEN_CHANNEL;
         <ruleMemoization(name=ruleName)>
         <lexerRuleLabelDefs()>
         <ruleDescriptor.actions.init>
         <block>
         <ruleCleanUp()>
-        this.type = _type;
+        state.type = _type;
+        state.channel = _channel;
         <(ruleDescriptor.actions.after):execAction()>
 <endif>
     }
@@ -433,7 +498,7 @@ public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scop
         <memoize()>
     }
 }
-// $ANTLR end <ruleName>
+// $ANTLR end "<ruleName>"
 >>
 
 /** How to generate code for the implicitly-defined lexer grammar rule
@@ -562,15 +627,21 @@ case <i> :
 >>
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
 // <fileName>:<description>
 {
 <@declarations()>
 <elements:element()>
+<rew>
 <@cleanup()>
 }
 >>
 
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
 // E L E M E N T S
 
 /** Dump the elements one per line */
@@ -580,15 +651,12 @@ element() ::= <<
 >>
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=(<labelType>)<endif>match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -627,24 +695,28 @@ if ( <s> ) {
     input.consume();
     <postmatchCode>
 <if(!LEXER)>
-    errorRecovery=false;
+    state.errorRecovery=false;
 <endif>
-    <if(backtracking)>failed=false;<endif>
+    <if(backtracking)>state.failed=false;<endif>
 }
 else {
     <ruleBacktrackFailure()>
-    MismatchedSetException mse =
-        new MismatchedSetException(null,input);
+    MismatchedSetException mse = new MismatchedSetException(null,input);
     <@mismatchedSetException()>
 <if(LEXER)>
     recover(mse);
+    throw mse;
 <else>
+    throw mse;
+    <! use following code to make it recover inline; remove throw mse;
     recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
 <endif>
-    throw mse;
 }<\n>
 >>
 
+matchRuleBlockSet ::= matchSet
+
 matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 <matchSet(...)>
 <listLabel(elem=label,...)>
@@ -655,7 +727,7 @@ lexerStringRef(string,label) ::= <<
 <if(label)>
 int <label>Start = getCharIndex();
 match(<string>); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
+<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
 <else>
 match(<string>); <checkRuleBacktrackFailure()><\n>
 <endif>
@@ -687,38 +759,42 @@ wildcardCharListLabel(label, elementIndex) ::= <<
 >>
 
 /** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
  */
-ruleRef(rule,label,elementIndex,args) ::= <<
-pushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
-<if(label)>
-<label>=<rule>(<args; separator=", ">);<\n>
-<else>
-<rule>(<args; separator=", ">);<\n>
-<endif>
-_fsp--;
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+state._fsp--;
 <checkRuleBacktrackFailure()>
 >>
 
 /** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
 <listLabel(elem=label,...)>
 >>
 
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
 <if(label)>
 int <label>Start<elementIndex> = getCharIndex();
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
 <else>
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -728,14 +804,15 @@ lexerMatchEOF(label,elementIndex) ::= <<
 <if(label)>
 int <label>Start<elementIndex> = getCharIndex();
 match(EOF); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<labelType> <label> = new <labelType>(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
 <else>
 match(EOF); <checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
 <root:element()>
 <actionsAfterRoot:element()>
 <if(nullableChildList)>
@@ -937,7 +1014,16 @@ class DFA<dfa.decisionNumber> extends DFA {
     }
     <@errorMethod()>
 <if(dfa.specialStateSTs)>
-    public int specialStateTransition(int s, IntStream input) throws NoViableAltException {
+    public int specialStateTransition(int s, IntStream _input) throws NoViableAltException {
+        <if(LEXER)>
+        IntStream input = _input;
+        <endif>
+        <if(PARSER)>
+        TokenStream input = (TokenStream)_input;
+        <endif>
+        <if(TREE_PARSER)>
+        TreeNodeStream input = (TreeNodeStream)_input;
+        <endif>
     	int _s = s;
         switch ( s ) {
         <dfa.specialStateSTs:{state |
@@ -945,7 +1031,7 @@ class DFA<dfa.decisionNumber> extends DFA {
             <state>}; separator="\n">
         }
 <if(backtracking)>
-        if (backtracking>0) {failed=true; return -1;}<\n>
+        if (state.backtracking>0) {state.failed=true; return -1;}<\n>
 <endif>
         NoViableAltException nvae =
             new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input);
@@ -997,7 +1083,7 @@ orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
 
 notPredicate(pred) ::= "!(<evalPredicate(...)>)"
 
-evalPredicate(pred,description) ::= "<pred>"
+evalPredicate(pred,description) ::= "(<pred>)"
 
 evalSynPredicate(pred,description) ::= "<pred>()"
 
@@ -1036,9 +1122,11 @@ protected Stack <scope.name>_stack = new Stack();<\n>
 <endif>
 >>
 
+returnStructName() ::= "<it.name>_return"
+
 returnType() ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.name>_return
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
 <else>
 <if(ruleDescriptor.hasSingleReturnValue)>
 <ruleDescriptor.singleValueReturnType>
@@ -1053,7 +1141,7 @@ void
  */
 ruleLabelType(referencedRule) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
+<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
 <else>
 <if(referencedRule.hasSingleReturnValue)>
 <referencedRule.singleValueReturnType>
@@ -1063,6 +1151,10 @@ void
 <endif>
 >>
 
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
 /** Using a type to init value map, try to init a type; if not in table
  *  must be an object, default value is "null".
  */
@@ -1081,7 +1173,7 @@ ruleLabelDef(label) ::= <<
  */
 returnScope(scope) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-public static class <returnType()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
+public static class <ruleDescriptor:returnStructName()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
     <scope.attributes:{public <it.decl>;}; separator="\n">
     <@ruleReturnMembers()>
 };
@@ -1128,7 +1220,7 @@ isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
 /** reference an attribute of rule; might only have single return value */
 ruleLabelRef(referencedRule,scope,attr) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
+(<scope>!=null?<scope>.<attr.name>:<initValue(attr.type)>)
 <else>
 <scope>
 <endif>
@@ -1159,38 +1251,47 @@ listLabelRef(label) ::= "list_<label>"
 
 // not sure the next are the right approach
 
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.getText():null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.getType():0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.getLine():0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.getCharPositionInLine():0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.getChannel():0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.getTokenIndex():0)"
 tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?Integer.valueOf(<scope>.getText()):0)"
 
-ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.start):null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.stop):null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.tree):null)"
 ruleLabelPropertyRef_text(scope,attr) ::= <<
 <if(TREE_PARSER)>
-input.getTokenStream().toString(
+(<scope>!=null?(input.getTokenStream().toString(
   input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))
+  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))):null)
 <else>
-input.toString(<scope>.start,<scope>.stop)
+(<scope>!=null?input.toString(<scope>.start,<scope>.stop):null)
 <endif>
 >>
 
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.st:null)"
 
 /** Isolated $RULE ref ok in lexer as it's a Token */
 lexerRuleLabel(label) ::= "<label>"
 
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(<scope>!=null?<scope>.getType():0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(<scope>!=null?<scope>.getLine():0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= 
+    "(<scope>!=null?<scope>.getCharPositionInLine():-1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(<scope>!=null?<scope>.getChannel():0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(<scope>!=null?<scope>.getTokenIndex():0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(<scope>!=null?<scope>.getText():null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(<scope>!=null?Integer.valueOf(<scope>.getText()):0)"
 
 // Somebody may ref $template or $tree or $stop within a rule:
 rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
@@ -1209,36 +1310,33 @@ rulePropertyRef_st(scope,attr) ::= "retval.st"
 
 lexerRulePropertyRef_text(scope,attr) ::= "getText()"
 lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
 lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "channel"
-lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
 lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "Integer.valueOf(<scope>.getText())"
 
 // setting $st and $tree is allowed in local rule. everything else
 // is flagged as error
 ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
 ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
 
-
-/** How to execute an action */
+/** How to execute an action (only when not backtracking) */
 execAction(action) ::= <<
 <if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
 if ( <actions.(actionScope).synpredgate> ) {
   <action>
 }
 <else>
-if ( backtracking==0 ) {
-  <action>
-}
-<endif>
-<else>
 <action>
 <endif>
 >>
 
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
 // M I S C (properties, etc...)
 
 bitset(name, words64) ::= <<
diff --git a/src/org/antlr/codegen/templates/Java/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
similarity index 95%
copy from src/org/antlr/codegen/templates/Java/ST.stg
copy to tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
index dbe70eb..f4847e4 100644
--- a/src/org/antlr/codegen/templates/Java/ST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
@@ -71,7 +71,7 @@ public static class STAttrMap extends HashMap {
 >>
 
 /** x+=rule when output=template */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
 <listLabel(elem=label+".getTemplate()",...)>
 >>
@@ -80,13 +80,13 @@ rewriteTemplate(alts) ::= <<
 
 // TEMPLATE REWRITE
 <if(backtracking)>
-if ( backtracking==0 ) {
+if ( <actions.(actionScope).synpredgate> ) {
   <alts:rewriteTemplateAlt(); separator="else ">
-  <if(rewrite)><replaceTextInLine()><endif>
+  <if(rewriteMode)><replaceTextInLine()><endif>
 }
 <else>
 <alts:rewriteTemplateAlt(); separator="else ">
-<if(rewrite)><replaceTextInLine()><endif>
+<if(rewriteMode)><replaceTextInLine()><endif>
 <endif>
 >>
 
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg
new file mode 100644
index 0000000..b97b5cc
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg
@@ -0,0 +1,391 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group AST;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+<!protected TreeAdaptor adaptor = new CommonTreeAdaptor();<\n>!>
+setTreeAdaptor: function(adaptor) {
+    this.adaptor = adaptor;
+    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
+},
+getTreeAdaptor: function() {
+    return this.adaptor;
+},
+>>
+
+ at returnScope.ruleReturnMembers() ::= <<
+getTree: function() { return this.tree; }
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+var root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<ruleDescriptor.tokenLabels:{var <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{var <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{var stream_<it>=new org.antlr.runtime.tree.RewriteRuleTokenStream(this.adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{var stream_<it>=new org.antlr.runtime.tree.RewriteRuleSubtreeStream(this.adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = this.adaptor.nil();<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+wildcardTrack(label,elementIndex) ::= <<
+<super.wildcard(...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.getTree());
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.getTree());
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+    referencedWildcardLabels,
+    referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = this.adaptor.nil();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = this.adaptor.rulePostProcessing(root_0);
+this.input.replaceChildren(this.adaptor.getParent(retval.start),
+                      this.adaptor.getChildIndex(retval.start),
+                      this.adaptor.getChildIndex(_last),
+                      retval.tree);
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;
+<endif>
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{var stream_<it>=new org.antlr.runtime.tree.RewriteRule<rewriteElementType>Stream(this.adaptor,"token <it>",<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{var stream_<it>=new org.antlr.runtime.tree.RewriteRule<rewriteElementType>Stream(this.adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{var stream_<it>=new org.antlr.runtime.tree.RewriteRuleSubtreeStream(this.adaptor,"token <it>",<it>!=null?<it>.tree:null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{var stream_<it>=new org.antlr.runtime.tree.RewriteRuleSubtreeStream(this.adaptor,"token <it>",list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator="||"> ) {
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.hasNext()}; separator="||">) ) {
+    throw new org.antlr.runtime.tree.RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+var root_<treeLevel> = this.adaptor.nil();
+<root:rewriteElement()>
+<children:rewriteElement()>
+this.adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+this.adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+this.adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+this.adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+this.adaptor.addChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+this.adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+this.adaptor.addChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+this.adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+this.adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = this.adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+this.adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+this.adaptor.create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg
new file mode 100644
index 0000000..45f6f27
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg
@@ -0,0 +1,161 @@
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+ at rule.setErrorReturnValue() ::= <<
+retval.tree = this.adaptor.errorNode(this.input, retval.start, this.input.LT(-1), re);
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+this.adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = this.adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>this.adaptor.addChild(root_0, <createNodeFromToken(...)>);})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label>=input.LT(1);<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = this.adaptor.becomeRoot(<createNodeFromToken(...)>, root_0);})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>this.adaptor.addChild(root_0, <label>.getTree());
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = this.adaptor.becomeRoot(<label>.getTree(), root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = this.adaptor.create(<label>);
+this.adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = this.adaptor.create(<label>);
+root_0 = this.adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+this.adaptor.create(<label>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = this.adaptor.rulePostProcessing(root_0);
+this.adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg
new file mode 100644
index 0000000..3f13df1
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg
@@ -0,0 +1,253 @@
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+var _first_0 = null;
+var _last = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = _first_0;
+if ( this.adaptor.getParent(retval.tree) && this.adaptor.isNil( this.adaptor.getParent(retval.tree) ) )
+    retval.tree = this.adaptor.getParent(retval.tree);
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = this.input.LT(1);
+{
+var _save_last_<treeLevel> = _last;
+var _first_<treeLevel> = null;
+<if(!rewriteMode)>
+var root_<treeLevel> = this.adaptor.nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( !_first_<enclosingTreeLevel> ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( !_first_<enclosingTreeLevel> ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( this.input.LA(1)==org.antlr.runtime.Token.DOWN ) {
+    this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+this.adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = this.input.LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = this.input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = this.adaptor.dupNode(<label>);
+<endif><\n>
+this.adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( !_first_<treeLevel> ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = this.input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = this.adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = this.adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = this.input.LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = this.adaptor.dupNode(<label>);
+<endif><\n>
+this.adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = this.input.LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = this.adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = this.adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = this.input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+this.adaptor.addChild(root_<treeLevel>, <label>.getTree());
+<else> <! rewrite mode !>
+if ( !_first_<treeLevel> ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = this.input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = this.adaptor.becomeRoot(<label>.getTree(), root_<treeLevel>);
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = this.input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = this.input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = this.input.LT(1);
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = this.input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = this.adaptor.rulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg
new file mode 100644
index 0000000..68a3330
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg
@@ -0,0 +1,1326 @@
+group JavaScript implements ANTLRCore;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+       bitsets, buildTemplate, buildAST, rewriteMode, profile,
+       backtracking, synpreds, memoize, numRules,
+       fileName, ANTLRVersion, generatedTimestamp, trace,
+       scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<actions.(actionScope).header>
+
+<@imports>
+<if(TREE_PARSER)>
+<endif>
+<@end>
+
+<docComment>
+<recognizer>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode, superClass="org.antlr.runtime.Lexer") ::= <<
+var <grammar.recognizerName> = function(input, state<grammar.delegators:{g|, <g:delegateName()>}>) {
+// alternate constructor @todo
+// public <grammar.recognizerName>(CharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+// public <grammar.recognizerName>(CharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+    if (!state) {
+        state = new org.antlr.runtime.RecognizerSharedState();
+    }
+
+    (function(){
+        <actions.lexer.members>
+    }).call(this);
+
+    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this);}; separator="\n">
+    <grammar.recognizerName>.superclass.constructor.call(this, input, state);
+    <if(memoize)>
+    <if(grammar.grammarIsRoot)>
+    this.state.ruleMemo = {};
+    <endif>
+    <endif>
+
+    <grammar.directDelegates:
+       {g|this.<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+    <grammar.delegators:
+       {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|this.gParent = this.<g:delegateName()>;}>
+
+    <actions.lexer.init>
+};
+
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    <tokens:{<it.name>: <it.type>}; separator=",\n">
+});
+
+(function(){
+var HIDDEN = org.antlr.runtime.Token.HIDDEN_CHANNEL,
+    EOF = org.antlr.runtime.Token.EOF;
+org.antlr.lang.extend(<grammar.recognizerName>, <@superClassName><superClass><@end>, {
+    <tokens:{<it.name> : <it.type>,}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    getGrammarFileName: function() { return "<fileName>"; }
+});
+org.antlr.lang.augmentObject(<grammar.recognizerName>.prototype, {
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator=",\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}; separator=",\n">
+}, true); // important to pass true to overwrite default implementations
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+})();
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+nextToken: function() {
+    while (true) {
+        if ( this.input.LA(1)==org.antlr.runtime.CharStream.EOF ) {
+            return org.antlr.runtime.Token.EOF_TOKEN;
+        }
+        this.state.token = null;
+        this.state.channel = org.antlr.runtime.Token.DEFAULT_CHANNEL;
+        this.state.tokenStartCharIndex = this.input.index();
+        this.state.tokenStartCharPositionInLine = this.input.getCharPositionInLine();
+        this.state.tokenStartLine = this.input.getLine();
+        this.state.text = null;
+        try {
+            var m = this.input.mark();
+            this.state.backtracking=1; <! means we won't throw slow exception !>
+            this.state.failed=false;
+            this.mTokens();
+            this.state.backtracking=0;
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if ( this.state.failed ) {
+                this.input.rewind(m);
+                this.input.consume(); <! advance one char and try again !>
+            }
+            else {
+                this.emit();
+                return this.state.token;
+            }
+        }
+        catch (re) {
+            // shouldn't happen in backtracking mode, but...
+            if (re instanceof org.antlr.runtime.RecognitionException) {
+                this.reportError(re);
+                this.recover(re);
+            } else {
+                throw re;
+            }
+        }
+    }
+},
+
+memoize: function(input, ruleIndex, ruleStartIndex) {
+    if (this.state.backtracking>1) {
+        <grammar.recognizerName>.superclass.prototype.memoize.call(this, input, ruleIndex, ruleStartIndex);
+    }
+},
+
+alreadyParsedRule: function(input, ruleIndex) {
+    if (this.state.backtracking>1) {
+        return <grammar.recognizerName>.superclass.prototype.alreadyParsedRule.call(this, input, ruleIndex);
+    }
+    return false;
+},
+
+
+>>
+
+actionGate() ::= "this.state.backtracking===0"
+
+filteringActionGate() ::= "this.state.backtracking===1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              ASTLabelType="Object", labelType, members, rewriteElementType) ::= <<
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+var <grammar.recognizerName> = function(input, state<grammar.delegators:{g|, <g:delegateName()>}>) {
+    if (!state) {
+        state = new org.antlr.runtime.RecognizerSharedState();
+    }
+
+    (function(){
+        <members>
+    }).call(this);
+
+    <grammar.recognizerName>.superclass.constructor.call(this, input, state);
+
+    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this);}; separator="\n">
+
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|this.<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">         
+         <grammar.indirectDelegates:{g | this.<g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+         <last(grammar.delegators):{g|this.gParent = this.<g:delegateName()>;}>
+
+    /* @todo only create adaptor if output=AST */
+    this.adaptor = new org.antlr.runtime.tree.CommonTreeAdaptor();<\n>
+};
+
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    <tokens:{<it.name>: <it.type>}; separator=",\n">
+});
+
+(function(){
+// public class variables
+var <tokens:{<it.name>= <it.type>}; separator=",\n    ">;
+<if(TREE_PARSER)>
+var UP = org.antlr.runtime.Token.UP,
+    DOWN = org.antlr.runtime.Token.DOWN;
+<endif>
+
+
+// public instance methods/vars
+org.antlr.lang.extend(<grammar.recognizerName>, org.antlr.runtime.<@superClassName><superClass><@end>, {
+    <@members>
+    <@end>
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+
+    getTokenNames: function() { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; },
+    getGrammarFileName: function() { return "<fileName>"; }
+});
+org.antlr.lang.augmentObject(<grammar.recognizerName>.prototype, {
+
+    <rules; separator=",\n\n">
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+    , <ruleDescriptor.name>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) \{ <if(ruleDescriptor.hasReturnValue)>return <endif>this.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}>
+
+
+
+    <synpreds:{p | <synpred(p)>}; separator=",\n">
+
+}, true); // important to pass true to overwrite default implementations
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+// public class variables
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+<if(grammar.grammarIsRoot)>
+    tokenNames: ["\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">],<\n>
+<endif>
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits); separator=",\n">
+});
+
+})();
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = {};<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="var", superClass="tree.TreeParser", members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start "<ruleName>"
+<ruleName>_fragment: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
+<if(trace)>
+    this.traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try {
+        <block>
+    }
+    finally {
+        this.traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+},
+// $ANTLR end "<ruleName>"
+>>
+
+synpred(name) ::= <<
+<name>: function() {
+    this.state.backtracking++;
+    <@start()>
+    var start = this.input.mark();
+    try {
+        this.<name>_fragment(); // can never throw exception
+    } catch (re) {
+        alert("impossible: "+re.toString());
+    }
+    var success = !this.state.failed;
+    this.input.rewind(start);
+    <@stop()>
+    this.state.backtracking--;
+    this.state.failed=false;
+    return success;
+}
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( this.state.backtracking>0 && this.alreadyParsedRule(this.input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (this.state.failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (this.state.backtracking>0) {this.state.failed=true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// <fileName>:<description>
+// $ANTLR start "<ruleName>"
+<ruleDescriptor.actions.decorate>
+<ruleName>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
+    <if(trace)>this.traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try {
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (re) {
+        if (re instanceof org.antlr.runtime.RecognitionException) {
+            this.reportError(re);
+            this.recover(this.input,re);
+            <@setErrorReturnValue()>
+        } else {
+            throw re;
+        }
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally {
+        <if(trace)>this.traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+var retval = new <returnType()>();
+retval.start = this.input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+var <a.name> = <if(a.initValue)><a.initValue><else>null<endif>;
+}>
+<endif>
+<if(memoize)>
+var <ruleDescriptor.name>_StartIndex = this.input.index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{this.<it>_stack.push({});}; separator="\n">
+<ruleDescriptor.ruleScope:{this.<it.name>_stack.push({});}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{this.<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{this.<it.name>_stack.pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{var <it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{var list_<it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{var <it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{var <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{var list_<it.label.text>=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = this.input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( this.state.backtracking>0 ) { this.memoize(this.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start <ruleName>
+m<ruleName>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)  {
+    <if(trace)>this.traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        var _type = this.<ruleName>;
+        var _channel = org.antlr.runtime.BaseRecognizer.DEFAULT_TOKEN_CHANNEL;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        this.state.type = _type;
+        this.state.channel = _channel;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally {
+        <if(trace)>this.traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+},
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+mTokens: function() {
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    var alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+    default :
+        if ( cnt<decisionNumber> >= 1 ) {
+            break loop<decisionNumber>;
+        }
+        <ruleBacktrackFailure()>
+            var eee = new org.antlr.runtime.EarlyExitException(<decisionNumber>, this.input);
+            <@earlyExitException()>
+            throw eee;
+    }
+    cnt<decisionNumber>++;
+} while (true);
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    var alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+    default :
+        break loop<decisionNumber>;
+    }
+} while (true);
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+<! (function() { /* @todo4 (do we really need a new scope?) */ !>
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+<! }).call(this); !>
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=<endif>this.match(this.input,<token>,<grammar.recognizerName>.FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (org.antlr.lang.isNull(list_<label>)) list_<label> = [];
+list_<label>.push(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = this.input.LA(1);<\n>
+<endif>
+this.match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = this.input.LA(1);<\n>
+<endif>
+this.matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= this.input.LA(1);<\n>
+<else>
+<label>=this.input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) {
+    this.input.consume();
+    <postmatchCode>
+<if(!LEXER)>
+    this.state.errorRecovery=false;
+<endif>
+    <if(backtracking)>this.state.failed=false;<endif>
+}
+else {
+    <ruleBacktrackFailure()>
+    var mse = new org.antlr.runtime.MismatchedSetException(null,this.input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    this.recover(mse);
+    throw mse;
+<else>
+    throw mse;
+    <! use following code to make it recover inline; remove throw mse;
+    this.recoverFromMismatchedSet(this.input,mse,<grammar.recognizerName>.FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label) ::= <<
+<if(label)>
+var <label>Start = this.getCharIndex();
+this.match(<string>); <checkRuleBacktrackFailure()>
+var <label> = new org.antlr.runtime.CommonToken(this.input, org.antlr.runtime.Token.INVALID_TOKEN_TYPE, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start, this.getCharIndex()-1);
+<else>
+this.match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label>=this.input.LT(1);<\n>
+<endif>
+this.matchAny(this.input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = this.input.LA(1);<\n>
+<endif>
+this.matchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+this.pushFollow(<grammar.recognizerName>.FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif>this.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+this.state._fsp--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+var <label>Start<elementIndex> = this.getCharIndex();
+this.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new org.antlr.runtime.CommonToken(this.input, org.antlr.runtime.Token.INVALID_TOKEN_TYPE, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, this.getCharIndex()-1);
+<else>
+this.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+var <label>Start<elementIndex> = this.getCharIndex();
+this.match(EOF); <checkRuleBacktrackFailure()>
+var <label> = new org.antlr.runtime.CommonToken(this.input, this.EOF, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, this.getCharIndex()-1);
+<else>
+this.match(this.EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( this.input.LA(1)==org.antlr.runtime.Token.DOWN ) {
+    this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    throw new org.antlr.runtime.FailedPredicateException(this.input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    var nvae =
+        new org.antlr.runtime.NoViableAltException("<description>", <decisionNumber>, <stateNumber>, this.input);<\n>
+    <@noViableAltException()>
+    throw nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+    alt<decisionNumber>=<eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this.input.LA(<k>) ) {
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    var nvae =
+        new org.antlr.runtime.NoViableAltException("<description>", <decisionNumber>, <stateNumber>, this.input);<\n>
+    <@noViableAltException()>
+    throw nvae;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this.input.LA(<k>) ) {
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this.input.LA(<k>) ) {
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+    <targetState>
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = this.dfa<decisionNumber>.predict(this.input);
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    DFA<dfa.decisionNumber>_eotS:
+        "<dfa.javaCompressedEOT; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_eofS:
+        "<dfa.javaCompressedEOF; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_minS:
+        "<dfa.javaCompressedMin; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_maxS:
+        "<dfa.javaCompressedMax; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_acceptS:
+        "<dfa.javaCompressedAccept; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_specialS:
+        "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>",
+    DFA<dfa.decisionNumber>_transitionS: [
+            <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+    ]
+});
+
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    DFA<dfa.decisionNumber>_eot:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_eotS),
+    DFA<dfa.decisionNumber>_eof:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_eofS),
+    DFA<dfa.decisionNumber>_min:
+        org.antlr.runtime.DFA.unpackEncodedStringToUnsignedChars(<grammar.recognizerName>.DFA<dfa.decisionNumber>_minS),
+    DFA<dfa.decisionNumber>_max:
+        org.antlr.runtime.DFA.unpackEncodedStringToUnsignedChars(<grammar.recognizerName>.DFA<dfa.decisionNumber>_maxS),
+    DFA<dfa.decisionNumber>_accept:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_acceptS),
+    DFA<dfa.decisionNumber>_special:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_specialS),
+    DFA<dfa.decisionNumber>_transition: (function() {
+        var a = [],
+            i,
+            numStates = <grammar.recognizerName>.DFA<dfa.decisionNumber>_transitionS.length;
+        for (i=0; i\<numStates; i++) {
+            a.push(org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_transitionS[i]));
+        }
+        return a;
+    })()
+});
+
+<grammar.recognizerName>.DFA<dfa.decisionNumber> = function(recognizer) {
+    this.recognizer = recognizer;
+    this.decisionNumber = <dfa.decisionNumber>;
+    this.eot = <grammar.recognizerName>.DFA<dfa.decisionNumber>_eot;
+    this.eof = <grammar.recognizerName>.DFA<dfa.decisionNumber>_eof;
+    this.min = <grammar.recognizerName>.DFA<dfa.decisionNumber>_min;
+    this.max = <grammar.recognizerName>.DFA<dfa.decisionNumber>_max;
+    this.accept = <grammar.recognizerName>.DFA<dfa.decisionNumber>_accept;
+    this.special = <grammar.recognizerName>.DFA<dfa.decisionNumber>_special;
+    this.transition = <grammar.recognizerName>.DFA<dfa.decisionNumber>_transition;
+};
+
+org.antlr.lang.extend(<grammar.recognizerName>.DFA<dfa.decisionNumber>, org.antlr.runtime.DFA, {
+    getDescription: function() {
+        return "<dfa.description>";
+    },
+    <@errorMethod()>
+<if(dfa.specialStateSTs)>
+    specialStateTransition: function(s, input) {
+        var _s = s;
+        /* bind to recognizer so semantic predicates can be evaluated */
+        var retval = (function(s, input) {
+            switch ( s ) {
+            <dfa.specialStateSTs:{state |
+            case <i0> : <! compressed special state numbers 0..n-1 !>
+                <state>}; separator="\n">
+            }
+        }).call(this.recognizer, s, input);
+        if (!org.antlr.lang.isUndefined(retval)) {
+            return retval;
+        }
+<if(backtracking)>
+        if (this.recognizer.state.backtracking>0) {this.recognizer.state.failed=true; return -1;}<\n>
+<endif>
+        var nvae =
+            new org.antlr.runtime.NoViableAltException(this.getDescription(), <dfa.decisionNumber>, _s, input);
+        this.error(nvae);
+        throw nvae;
+    },<\n>
+<endif>
+    dummy: null
+});<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+var index<decisionNumber>_<stateNumber> = input.index();
+input.rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "this.<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "this.input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(this.input.LA(<k>)\>=<lower> && this.input.LA(<k>)\<=<upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+<scope.name>_stack: [],<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+<scope.name>_stack: [],<\n>
+<endif>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+null
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<!<ruleLabelType(referencedRule=label.referencedRule)>!> var <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+// inline static return class
+<ruleDescriptor:returnStructName()>: (function() {
+    <returnType()> = function(){};
+    org.antlr.lang.extend(<returnType()>,
+                      org.antlr.runtime.<if(TREE_PARSER)>tree.Tree<else>Parser<endif>RuleReturnScope,
+    {
+        <@ruleReturnMembers()>
+    });
+    return;
+})(),
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+(this.<scope>_stack[this.<scope>_stack.length-<negIndex>-1]).<attr.name>
+<else>
+<if(index)>
+(this.<scope>_stack[<index>]).<attr.name>
+<else>
+org.antlr.lang.array.peek(this.<scope>_stack).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+(this.<scope>_stack[this.<scope>_stack.length-<negIndex>-1]).<attr.name> =<expr>;
+<else>
+<if(index)>
+(this.<scope>_stack[<index>]).<attr.name> =<expr>;
+<else>
+org.antlr.lang.array.peek(this.<scope>_stack).<attr.name> =<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "this.<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!==null?<scope>.<attr.name>:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>;
+<else>
+<attr.name> =<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>?<scope>.getText():null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>?<scope>.getType():0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>?<scope>.getLine():0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>?<scope>.getCharPositionInLine():0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>?<scope>.getChannel():0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>?<scope>.getTokenIndex():0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>?parseInt(<scope>.getText(), 10):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>?<scope>.start:null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>?<scope>.stop:null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>?<scope>.tree:null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(<scope>?(this.input.getTokenStream().toString(
+  this.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+  this.input.getTreeAdaptor().getTokenStopIndex(<scope>.start))):null)
+<else>
+(<scope>?this.input.toString(<scope>.start,<scope>.stop):null)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>?<scope>.getType():0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>?<scope>.getLine():0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>?<scope>.getCharPositionInLine():-1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>?<scope>.getChannel():0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>?<scope>.getTokenIndex():0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>?<scope>.getText():0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "(retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "(retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "(retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+this.input.getTokenStream().toString(
+  this.input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  this.input.getTreeAdaptor().getTokenStopIndex(retval.start))
+<else>
+this.input.toString(retval.start,this.input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "this.getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "this.state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "this.state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "this.state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(this.getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "parseInt(<scope>.getText(),10)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+<! @todo overflow issue !>
+<name>: new org.antlr.runtime.BitSet([<words64:{<it>};separator=",">])
+>>
+
+codeFileExtension() ::= ".js"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/src/org/antlr/codegen/templates/ObjC/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
similarity index 69%
rename from src/org/antlr/codegen/templates/ObjC/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
index 4512d81..bfcc10d 100644
--- a/src/org/antlr/codegen/templates/ObjC/AST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2006 Kay Roepke
+ Copyright (c) 2006, 2007 Kay Roepke
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -104,15 +104,15 @@ parserMethods() ::= <<
 @returnScope.methods() ::= <<
 - (<ASTLabelType>) tree
 {
-	return tree;
+    return tree;
 }
 - (void) setTree:(<ASTLabelType>)aTree
 {
-	if (tree != aTree) {
-		[aTree retain];
-		[tree release];
-		tree = aTree;
-	}
+    if (tree != aTree) {
+        [aTree retain];
+        [tree release];
+        tree = aTree;
+    }
 }
 
 - (void) dealloc
@@ -145,7 +145,7 @@ ruleCleanUp() ::= <<
     :{[_stream_<it> release];}; separator="\n">
 <if(ruleDescriptor.hasMultipleReturnValues)>
 <if(backtracking)>
-if ( backtracking == 0 ) {<\n>
+if ( ![_state isBacktracking] ) {<\n>
 <endif>
     [_<prevRuleRootRef()> setTree:(<ASTLabelType>)[treeAdaptor postProcessTree:root_0]];
     [treeAdaptor setBoundariesForTree:[_<prevRuleRootRef()> tree] fromToken:[_<prevRuleRootRef()> start] toToken:[_<prevRuleRootRef()> stop]];
@@ -188,186 +188,58 @@ root_0 = (<ASTLabelType>)[treeAdaptor newEmptyTree];<\n>
 <endif>
 >>
 
+// T r a c k i n g  R u l e  E l e m e n t s
 
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking == 0 ) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-[treeAdaptor addChild:_<label>_tree toTree:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( backtracking == 0 ) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefBang(...)>
-<listLabel(...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(...)>
->>
-
-/** ID but track it for use in a rewrite rule */
+/** ID and track it for use in a rewrite rule */
 tokenRefTrack(token,label,elementIndex) ::= <<
 <super.tokenRef(...)>
-<if(backtracking)>if ( backtracking == 0 ) <endif>[_stream_<token> addElement:_<label>];<\n>
+<if(backtracking)>if ( ![_state isBacktracking] ) <endif>[_stream_<token> addElement:_<label>];<\n>
 >>
 
 /** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list list_ID for use in the rewrite.
+ *  to the tracking list stream_ID for use in the rewrite.
  */
 tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
 <tokenRefTrack(...)>
-<listLabel(...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={
-<if(backtracking)>if (backtracking == 0) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-[treeAdaptor addChild:_<label>_tree toTree:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
-})>
->>
-
-matchSetRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={
-<if(backtracking)>if (backtracking == 0) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
-})>
->>
-
-matchSetRuleRoot(s,label,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={
-<if(backtracking)>if (backtracking == 0) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
-})>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (backtracking == 0) <endif>[treeAdaptor addChild:[_<label> tree] toTree:root_0];
+<listLabel(elem=label,...)>
 >>
 
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args) ::= "<super.ruleRef(...)>"
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( ![_state isBacktracking] ) <endif>[_stream_<token> addElement:_<label>];<\n>
+>>
 
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (backtracking == 0) <endif>root_0 = (<ASTLabelType>)[treeAdaptor makeNode:[_<label> tree] parentOf:root_0];
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
 >>
 
 /** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args) ::= <<
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
 <super.ruleRef(...)>
-<if(backtracking)>if (backtracking == 0) <endif>[_stream_<rule> addElement:[_<label> tree]];
+<if(backtracking)>if ( ![_state isBacktracking] ) <endif>[_stream_<rule.name> addElement:[_<label> tree]];
 >>
 
 /** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRefTrack(...)>
-<listLabel(...)>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefBang(...)>
-<listLabel(...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(...)>
->>
-
-// WILDCARD AST
-
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if (backtracking == 0) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-[treeAdaptor addChild:_<label>_tree toTree:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
+<listLabel(elem=label,...)>
 >>
 
-wildcardRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if (backtracking == 0) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRefRuleRoot(...)>
+<if(backtracking)>if ( ![_state isBacktracking] ) <endif>[_stream_<rule.name> addElement:[_<label> tree]];<\n>
 >>
 
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if (backtracking == 0) {<endif>
-_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
-root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
-[_<label>_tree release];
-<if(backtracking)>}<endif>
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem="["+label+" tree]",...)>
 >>
 
-// TODO: ugh, am i really missing the combinations for Track and ListLabel?
-// there's got to be a better way
-
 // R e w r i t e
 
 rewriteCode(
@@ -387,17 +259,17 @@ rewriteCode(
 // token list labels: <referencedTokenListLabels; separator=", ">
 // rule list labels: <referencedRuleListLabels; separator=", ">
 <if(backtracking)>
-if (backtracking == 0) {<\n>
+if (![_state isBacktracking]) {<\n>
 <endif>
 int i_0 = 0;
 root_0 = (<ASTLabelType>)[treeAdaptor newEmptyTree];
 [_<prevRuleRootRef()> setTree:root_0];
 <rewriteCodeLabels()>
 <alts:rewriteAlt(); separator="else ">
+<rewriteCodeLabelsCleanUp()>
 <if(backtracking)>
 }
 <endif>
-<rewriteCodeLabelsCleanUp()>
 >>
 
 rewriteCodeLabels() ::= <<
@@ -419,13 +291,16 @@ rewriteCodeLabels() ::= <<
 >
 >>
 
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
 rewriteOptionalBlock(
 	alt,rewriteBlockLevel,
 	referencedElementsDeep, // all nested refs
 	referencedElements, // elements in immediately block; no nested blocks
 	description) ::=
 <<
-// <fileName>:<description> ;
+// <fileName>:<description>
 if ( <referencedElementsDeep:{el | [_stream_<el> hasNext]}; separator="||"> ) {
 	<alt>
 }
@@ -499,7 +374,7 @@ rewriteElement(e) ::= <<
 >>
 
 /** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,args) ::= <<
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
 <if(args)>
 id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:_<token>]; // TODO: args: <args; separator=", ">
 <endif>
@@ -525,14 +400,17 @@ rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
 root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[_stream_<label> next] parentOf:root_<treeLevel>];<\n>
 >>
 
-/** Gen ^(ID ...) */
-rewriteTokenRefRoot(token,elementIndex) ::= <<
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
 root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[_stream_<token> next] parentOf:root_<treeLevel>];<\n>
 >>
 
-rewriteImaginaryTokenRef(args,token,elementIndex) ::= <<
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
 <if(first(rest(args)))><! got two arguments - means create from token with custom text!>
-id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token> text:<first(rest(args))>];
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token> text:@<first(rest(args))>];
 [treeAdaptor addChild:_<token>_tree toTree:root_<treeLevel>];
 [_<token>_tree release];<\n>
 <else><! at most one argument !>
@@ -548,9 +426,9 @@ id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithTokenType:<token> text:[t
 <endif><! two args !>
 >>
 
-rewriteImaginaryTokenRefRoot(args,token,elementIndex) ::= <<
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
 <if(first(rest(args)))><! got two arguments - means create from token with custom text!>
-id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token> text:<first(rest(args))>];
+id\<ANTLRTree> _<token>_tree = [treeAdaptor newTreeWithToken:<first(args)> tokenType:<token> text:@<first(rest(args))>];
 root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:_<token>_tree parentOf:root_<treeLevel>];
 [_<token>_tree release];<\n>
 <else><! at most one argument !>
@@ -577,12 +455,11 @@ root_0 = <action>;<\n>
  */
 prevRuleRootRef() ::= "retval"
 
-rewriteRuleRef(rule,dup) ::= <<
+rewriteRuleRef(rule) ::= <<
 [treeAdaptor addChild:[_stream_<rule> next] toTree:root_<treeLevel>];<\n>
-<endif>
 >>
 
-rewriteRuleRefRoot(rule,dup) ::= <<
+rewriteRuleRefRoot(rule) ::= <<
 root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:(id\<ANTLRTree>)[_stream_<rule> next] parentOf:root_<treeLevel>];<\n>
 >>
 
@@ -613,3 +490,24 @@ root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[_<label> tree] parentO
 rewriteRuleListLabelRefRoot(label) ::= <<
 root_<treeLevel> = (<ASTLabelType>)[treeAdaptor makeNode:[(ANTLR<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope *)[_stream_<label> next] tree] parentOf:root_<treeLevel>];<\n>
 >>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+new <hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+>>
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/ObjC/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg
similarity index 100%
rename from src/org/antlr/codegen/templates/ObjC/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg
new file mode 100644
index 0000000..8b68d76
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg
@@ -0,0 +1,189 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,hetero,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking == 0 ) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+[treeAdaptor addChild:_<label>_tree toTree:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,hetero,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( backtracking == 0 ) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+// TODO: add support for heterogeneous trees
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+[treeAdaptor addChild:_<label>_tree toTree:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+})>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (backtracking == 0) <endif>[treeAdaptor addChild:[_<label> tree] toTree:root_0];
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (backtracking == 0) <endif>root_0 = (<ASTLabelType>)[treeAdaptor makeNode:[_<label> tree] parentOf:root_0];
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem="["+label+" tree]",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem="["+label+" tree]",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem="["+label+" tree]",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+[treeAdaptor addChild:_<label>_tree toTree:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (backtracking == 0) {<endif>
+_<label>_tree = (<ASTLabelType>)[treeAdaptor newTreeWithToken:_<label>];
+root_0 = (<ASTLabelType>)[treeAdaptor makeNode:_<label>_tree parentOf:root_0];
+[_<label>_tree release];
+<if(backtracking)>}<endif>
+>>
+
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+new <hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)adaptor.create(<label>)
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg
new file mode 100644
index 0000000..82e0f4a
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg
@@ -0,0 +1,129 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007 Kay Roepke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group ASTTreeParser;
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+{
+    <ASTLabelType> root_<treeLevel> = [treeAdapator newEmptyTree];
+    <root:element()>
+    <actionsAfterRoot:element()>
+    <if(nullableChildList)>
+    if ( [input LA:1] == ANTLRTokenTypeDOWN ) {
+        [self match:input tokenType:ANTLRTokenTypeDOWN follow:nil]; <checkRuleBacktrackFailure()>
+        <children:element()>
+        [self match:input tokenType:ANTLRTokenTypeUP follow:nil]; <checkRuleBacktrackFailure()>
+    }
+    <else>
+    [self match:input tokenType:ANTLRTokenTypeDOWN follow:nil]; <checkRuleBacktrackFailure()>
+    <children:element()>
+    [self match:input tokenType:ANTLRTokenTypeUP follow:nil]; <checkRuleBacktrackFailure()>
+    <endif>
+    [root_<treeLevel> release];
+}<\n>
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(rewriteMode)>retval.tree = (<ASTLabelType>)retval.start;<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking==0 ) <endif>adaptor.addChild(root_<treeLevel>, <label>.getTree());
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking==0 ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_<treeLevel>);
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+#error Heterogeneous tree support not implemented.
+<if(hetero)>
+new <hetero>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/ObjC/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg
similarity index 100%
rename from src/org/antlr/codegen/templates/ObjC/Dbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg
diff --git a/src/org/antlr/codegen/templates/ObjC/ObjC.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
similarity index 87%
rename from src/org/antlr/codegen/templates/ObjC/ObjC.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
index 83449d8..1bc9212 100644
--- a/src/org/antlr/codegen/templates/ObjC/ObjC.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
@@ -58,7 +58,7 @@ className() ::= "<name><! if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parse
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass,literals) ::=
@@ -77,7 +77,7 @@ outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
 headerFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass,literals) ::=
@@ -108,7 +108,7 @@ lexerHeaderFile() ::=
 
 #pragma mark Rule return scopes start
 <rules:{rule |
-<rule.ruleDescriptor:returnScopeInterface(ruleDescriptor=rule.ruleDescriptor)>
+<rule:{ruleDescriptor | <returnScopeInterface(scope=ruleDescriptor.returnScope)>}>
 }>
 #pragma mark Rule return scopes end
 
@@ -138,7 +138,7 @@ lexer(grammar, name, tokens, scopes, rules, numRules, labelType="id<ANTLRToken>
 /** As per Terence: No returns for lexer rules!
 #pragma mark Rule return scopes start
 <rules:{rule |
-<rule.ruleDescriptor:returnScope(ruleDescriptor=rule.ruleDescriptor)>
+<rule.ruleDescriptor:{ruleDescriptor | <returnScope(scope=ruleDescriptor.returnScope)>}>
 }>
 #pragma mark Rule return scopes end
 */
@@ -158,7 +158,11 @@ static NSArray *tokenNames;
 {
 	if (nil!=(self = [super initWithCharStream:anInput])) {
 		<if(memoize)>
-		// init memoize facility
+		// initialize the memoization cache - the indices are 1-based in the runtime code!
+		[ruleMemo addObject:[NSNull null]];     // dummy entry to ensure 1-basedness.
+		for (int i = 0; i \< <numRules>; i++) {
+		    [[state ruleMemo] addObject:[NSMutableDictionary dictionary]];
+		}
 		<endif>
 		<synpreds:{p | <lexerSynpred(name=p)>};separator="\n">
 		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [[<name>DFA<dfa.decisionNumber> alloc] initWithRecognizer:self];}; separator="\n">
@@ -213,22 +217,23 @@ static NSArray *tokenNames;
 filteringNextToken() ::= <<
 - (id\<ANTLRToken>) nextToken
 {
+    <sharedStateLocalVarDefinition()>
     while (YES) {
         if ( [input LA:1] == ANTLRCharStreamEOF ) {
             return nil; // should really be a +eofToken call here -> go figure
         }
         [self setToken:nil];
-        _channel = ANTLRTokenChannelDefault;
-        _tokenStartLine = [input line];
-        _tokenCharPositionInLine = [input charPositionInLine];
-        tokenStartCharIndex = [self charIndex];
+        [_state setChannel:ANTLRTokenChannelDefault];
+        [_state setTokenStartLine:[input line]];
+        [_state setTokenCharPositionInLine:[input charPositionInLine]];
+        [_state setTokenStartCharIndex:[self charIndex]];
         @try {
             int m = [input mark];
-            backtracking = 1;
-            failed = NO;
+            [_state setBacktracking:1];
+            [_state setIsFailed:NO];
             [self mTokens];
-            backtracking = 0;
-            if ( failed ) {
+            [_state setBacktracking:0];
+            if ( [_state isFailed] ) {
                 [input rewind:m];
                 [input consume]; <! // advance one char and try again !>
             } else {
@@ -245,7 +250,7 @@ filteringNextToken() ::= <<
 }
 >>
 
-filteringActionGate() ::= "backtracking==1"
+filteringActionGate() ::= "[_state backtracking] == 1"
 
 treeParserHeaderFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment,
            recognizer, name, tokens, tokenNames, rules, cyclicDFAs,
@@ -270,12 +275,12 @@ parserHeaderFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment,
 
 #pragma mark Dynamic Rule Scopes
 <rules:{rule |
-<rule.ruleDescriptor.ruleScope:ruleAttributeScopeDecl(scope=it)>
+<rule.ruleDescriptor:{ ruleDescriptor | <ruleAttributeScopeDecl(scope=ruleDescriptor.ruleScope)>}>
 }>
 
 #pragma mark Rule Return Scopes
 <rules:{rule |
-<rule.ruleDescriptor:returnScopeInterface(ruleDescriptor=rule.ruleDescriptor)>
+<rule.ruleDescriptor:{ ruleDescriptor | <returnScopeInterface(scope=ruleDescriptor.returnScope)>}>
 }>
 
 
@@ -289,10 +294,10 @@ parserHeaderFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment,
 	}>
 	<@ivars()>
 
-	<actions.parser.ivars>
+	<actions.(actionScope).ivars>
  }
 
-<actions.parser.methodsdecl>
+<actions.(actionScope).methodsdecl>
 
 <rules:{rule |
 - (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>) <rule.ruleName><if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>;
@@ -323,7 +328,7 @@ genericParser(name, scopes, tokens, tokenNames, rules, numRules,
 
 #pragma mark Rule return scopes start
 <rules:{rule |
-<rule.ruleDescriptor:returnScope(ruleDescriptor=rule.ruleDescriptor)>
+<rule.ruleDescriptor:{ruleDescriptor | <returnScope(scope=ruleDescriptor.returnScope)>}>
 }>
 
 @implementation <className()>
@@ -353,7 +358,7 @@ static NSArray *tokenNames;
 		<rules:{rule |
 		<rule.ruleDescriptor.ruleScope:ruleAttributeScopeInit(scope=it)>
 		}>
-		<actions.parser.init>
+		<actions.(actionScope).init>
 		<@init()>
 	}
 	return self;
@@ -366,7 +371,7 @@ static NSArray *tokenNames;
 <endif>
 	<cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
 	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDealloc(scope=it)><endif>}>
-	<actions.parser.dealloc>
+	<actions.(actionScope).dealloc>
 	<@dealloc()>
 	[super dealloc];
 }
@@ -376,7 +381,7 @@ static NSArray *tokenNames;
 	return @"<fileName>";
 }
 
-<actions.parser.methods>
+<actions.(actionScope).methods>
 
 <rules; separator="\n\n">
 
@@ -391,9 +396,15 @@ parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTL
 <genericParser(inputStreamType="id\<ANTLRTokenStream>", ...)>
 >>
 
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ANTLRASTLabelType>}, ASTLabelType="id", superClass="ANTLRTreeParser", members={<actions.treeparser.members>}) ::= <<
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="id", superClass="ANTLRTreeParser", members={<actions.treeparser.members>}) ::= <<
 <genericParser(inputStreamType="id\<ANTLRTreeNodeStream>", ...)>
 >>
+
+/** Maintain a local variable for the shared state object to avoid calling the accessor all the time. */
+sharedStateLocalVarDefinition() ::= <<
+<if(LEXER)>ANTLRLexerState<else>ANTLRBaseRecognizerState<endif> *_state = [self state];
+>>
+
 /** A simpler version of a rule template that is specific to the imaginary
  *  rules created for syntactic predicates.  As they never have return values
  *  nor parameters etc..., just give simplest possible method.  Don't do
@@ -405,12 +416,13 @@ synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 <<
 - (void) <ruleName>
 {
-    <if(trace)>NSLog(@"enter <ruleName> %d failed=%@ backtracking=%d", [input LA:1], failed ? @"YES" : @"NO", backtracking);
+    <sharedStateLocalVarDefinition()>
+    <if(trace)>NSLog(@"enter <ruleName> %d failed=%@ backtracking=%d", [input LA:1], [_state isFailed] ? @"YES" : @"NO", [_state backtracking]);
     @try {
         <block>
     }
     @finally {
-        NSLog(@"exit <ruleName> %d failed=%@ backtracking=%d", [input LA:1], failed ? @"YES" : @"NO", backtracking);
+        NSLog(@"exit <ruleName> %d failed=%@ backtracking=%d", [input LA:1], [_state isFailed] ? @"YES" : @"NO", [_state backtracking]);
     }
 <else>
     <block>
@@ -420,12 +432,12 @@ synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 
 /** How to test for failure and return from rule */
 checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+<if(backtracking)>if ([_state isFailed]) return <ruleReturnValue()>;<endif>
 >>
 
 /** This rule has failed, exit indicating failure during backtrack */
 ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (backtracking > 0) {failed=YES; return <ruleReturnValue()>;}<endif>
+<if(backtracking)>if ([_state isBacktracking]) {[_state setIsFailed:YES]; return <ruleReturnValue()>;}<endif>
 >>
 
 synpred(name) ::= <<
@@ -438,7 +450,7 @@ lexerSynpred(name) ::= <<
 
 ruleMemoization(name) ::= <<
 <if(memoize)>
-if ( backtracking>0 && [self alreadyParsedRule:input ruleIndex:<ruleDescriptor.index>] ) { return <ruleReturnValue()>; }
+if ([_state isBacktracking] && [self alreadyParsedRule:input ruleIndex:<ruleDescriptor.index>] ) { return <ruleReturnValue()>; }
 <endif>
 >>
 
@@ -451,7 +463,8 @@ rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memo
 // <fileName>:<description>
 - (<returnType()>) <ruleName><ruleDescriptor.parameterScope:parameterScope(scope=it)>
 {
-    <if(trace)>NSLog(@"enter <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], failed ? @"YES" : @"NO", backtracking);<endif>
+    <if(trace)>NSLog(@"enter <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], [_state isFailed] ? @"YES" : @"NO", [_state backtracking]);<endif>
+    <sharedStateLocalVarDefinition()>
     <ruleScopeSetUp()>
     <ruleDeclarations()>
     <ruleLabelDefs()>
@@ -476,7 +489,7 @@ rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memo
 <endif>
 <endif>
 	@finally {
-		<if(trace)>NSLog(@"exit <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], failed ? @"YES" : @"NO", backtracking);<endif>
+		<if(trace)>NSLog(@"exit <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], [_state isFailed] ? @"YES" : @"NO", [_state backtracking]);<endif>
 		<ruleCleanUp()>
 		<(ruleDescriptor.actions.finally):execAction()>
 		<ruleScopeCleanUp()>
@@ -499,7 +512,7 @@ ruleDeclarations() ::= <<
 [_retval setStart:[input LT:1]];<\n>
 <else>
 <if(ruleDescriptor.hasSingleReturnValue)>
-<returnType()> _<ruleDescriptor.singleValueReturnName>;
+<returnType()> _<ruleDescriptor.singleValueReturnName> = <initValue(typeName=returnType())>;
 <endif>
 <endif>
 <if(memoize)>
@@ -544,11 +557,13 @@ ruleCleanUp() ::= <<
 // token+rule list labels
 <[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{[_list_<it.label.text> release];}; separator="\n">
 <if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
 [_retval setStop:[input LT:-1]];<\n>
 <endif>
+<endif>
 <if(memoize)>
 <if(backtracking)>
-if ( backtracking>0 ) { [self memoize:input ruleIndex:<ruleDescriptor.index> startIndex:<ruleDescriptor.name>_StartIndex]; }
+if ( [_state isBacktracking] ) { [self memoize:input ruleIndex:<ruleDescriptor.index> startIndex:<ruleDescriptor.name>_StartIndex]; }
 <endif>
 <endif>
 >>
@@ -561,24 +576,23 @@ lexerRule(ruleName,nakedBlock,ruleDescriptor,block, memoize) ::= <<
 
 - (void) m<ruleName><if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>
 {
-    <if(trace)>NSLog(@"enter <ruleName> %C line=%d:%d failed=%@ backtracking=%d", [input LA:1], [self line], [self charPositionInLine], failed ? @"YES" : @"NO", backtracking);<endif>
+    <if(trace)>NSLog(@"enter <ruleName> %C line=%d:%d failed=%@ backtracking=%d", [input LA:1], [self line], [self charPositionInLine], [_state isFailed] ? @"YES" : @"NO", [_state backtracking]);<endif>
+    <sharedStateLocalVarDefinition()>
     <ruleDeclarations()>
     <ruleLabelDefs()>
     <ruleMemoization(name=ruleName)>
     @try {
-        ruleNestingLevel++;
 <ruleDescriptor.actions.init>
 <if(nakedBlock)>
         <block><\n>
 <else>
         int _type = <name>_<ruleName>;
         <block>
-        self->_tokenType = _type;<\n>
+        [_state setTokenType:_type];<\n>
 <endif>
     }
     @finally {
-        ruleNestingLevel--;
-        <if(trace)>NSLog(@"exit <ruleName> %C line=%d:%d failed=%@ backtracking=%d", [input LA:1], [self line], [self charPositionInLine], failed ? @"YES" : @"NO", backtracking);<endif>
+        <if(trace)>NSLog(@"exit <ruleName> %C line=%d:%d failed=%@ backtracking=%d", [input LA:1], [self line], [self charPositionInLine], [_state isFailed] ? @"YES" : @"NO", [_state backtracking]);<endif>
         // rule cleanup
         <ruleCleanUp()>
         <(ruleDescriptor.actions.finally):execAction()>
@@ -714,7 +728,7 @@ case <i> :
 >>
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel) ::= <<
 // <fileName>:<description> // alt
 {
 <@declarations()>
@@ -723,6 +737,11 @@ alt(elements,altNum,description,autoAST,outerAlt) ::= <<
 }
 >>
 
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
 // E L E M E N T S
 
 /** Dump the elements one per line */
@@ -732,7 +751,7 @@ element() ::= <<
 >>
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
+tokenRef(token,label,elementIndex,hetero) ::= <<
 <if(label)>
 _<label>=(<labelType>)[input LT:1];<\n>
 <endif>
@@ -740,7 +759,7 @@ _<label>=(<labelType>)[input LT:1];<\n>
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -777,9 +796,9 @@ if (<s>) {
 	<postmatchCode>
 	[input consume];
 <if(!LEXER)>
-	errorRecovery = NO;
+	[_state setIsErrorRecovery:NO];
 <endif>
-	<if(backtracking)>failed = NO;<endif>
+	<if(backtracking)>[_state setIsFailed:NO];<endif>
 } else {
 	<ruleBacktrackFailure()>
 	ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
@@ -841,39 +860,39 @@ wildcardCharListLabel(label, elementIndex) ::= <<
 /** Match a rule reference by invoking it possibly with arguments
  *  and a return value or values.
  */
-ruleRef(rule,label,elementIndex,args) ::= <<
-[following addObject:FOLLOW_<rule>_in_<ruleName><elementIndex>];
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+[[_state following] addObject:FOLLOW_<rule.name>_in_<ruleName><elementIndex>];
 <if(label)>
-_<label> = [self <rule><if(args)>:<args; separator=" :"><endif>];<\n>
+_<label> = [self <rule.name><if(args)>:<args; separator=" :"><endif>];<\n>
 <else>
-[self <rule><if(args)>:<args; separator=" :"><endif>];<\n>
+[self <rule.name><if(args)>:<args; separator=" :"><endif>];<\n>
 <endif>
-[following removeLastObject];
+[[_state following] removeLastObject];
 <checkRuleBacktrackFailure()><\n>
 >>
 
 /** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
 <listLabel(...)>
 >>
 
 /** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
 <if(label)>
 int _<label>Start<elementIndex> = [self charIndex];
-[self m<rule><if(args)>:<args; separator=" :"><endif>];
+[self m<rule.name><if(args)>:<args; separator=" :"><endif>];
 <checkRuleBacktrackFailure()><\n>
 _<label> = [[ANTLRCommonToken alloc] initWithInput:input tokenType:ANTLRTokenTypeInvalid channel:ANTLRTokenChannelDefault start:_<label>Start<elementIndex> stop:[self charIndex]-1];
 [_<label> setLine:[self line]];
 <else>
-[self m<rule><if(args)>:<args; separator=" :"><endif>];
+[self m<rule.name><if(args)>:<args; separator=" :"><endif>];
 <checkRuleBacktrackFailure()><\n>
 <endif>
 >>
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -893,7 +912,8 @@ int _<label>Start<elementIndex> = [self charIndex];
 >>
 
 /** match ^(root children) in tree parser */
-tree(root,actionsAfterRoot,children,nullableChildList) ::= <<
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
 <root:element()>
 <actionsAfterRoot:element()>
 <if(nullableChildList)>
@@ -1286,24 +1306,27 @@ ruleLabelDef(label) ::= <<
  *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
  *  subgroups to stick in members.
  */
-returnScopeInterface(ruleDescriptor) ::= <<
+returnScopeInterface(scope) ::= <<
 <if(!ruleDescriptor.isSynPred)>
 <if(ruleDescriptor.hasMultipleReturnValues)>
 @interface <returnTypeName()> : ANTLR<if(TREE_PARSER)>Tree<endif>ParserRuleReturnScope {
-    <ruleDescriptor.returnScope.attributes:{<it.decl>;}; separator="\n">
+    <scope.attributes:{<it.decl>;}; separator="\n">
     <@ivars()>
+    <actions.(actionScope).ruleReturnIvars>
 }
 <@methods()>
+<actions.(actionScope).ruleReturnMethodsDecl>
 @end
 <endif>
 <endif>
 >>
 
-returnScope(ruleDescriptor) ::= <<
+returnScope(scope) ::= <<
 <if(!ruleDescriptor.isSynPred)>
 <if(ruleDescriptor.hasMultipleReturnValues)>
 @implementation <returnTypeName()>
 <@methods()>
+<actions.(actionScope).ruleReturnMethods>
 @end
 <endif>
 <endif>
@@ -1364,9 +1387,11 @@ returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
 [_retval setValue:<expr> forKey:@"<attr.name>"];
 <else>
 _<attr.name> =<expr>;
+<if(LEXER)>
 // double check this after beta release!
 [_<attr.name> retain];
 <endif>
+<endif>
 >>
 /** How to translate $tokenLabel */
 tokenLabelRef(label) ::= "_<label>"
@@ -1389,7 +1414,14 @@ tokenLabelPropertyRef_tree(scope,attr) ::= "_<scope>_tree"
 ruleLabelPropertyRef_start(scope,attr) ::= "[_<scope> start]"
 ruleLabelPropertyRef_stop(scope,attr) ::= "[_<scope> stop]"
 ruleLabelPropertyRef_tree(scope,attr) ::= "[_<scope> tree]"
-ruleLabelPropertyRef_text(scope,attr) ::= "[input substringWithRange:NSMakeRange([_<scope> start], [_<scope> stop]-[_<scope> start])]"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+//[input textForNode:[_<scope> start]]
+[input substringWithRange:NSMakeRange([[input treeAdaptor] startIndex:[_<scope> start]], [[input treeAdaptor] stopIndex:[_<scope> start]])]
+<else>
+[input substringWithRange:NSMakeRange([_<scope> start], [_<scope> stop]-[_<scope> start])]
+<endif>
+>>
 ruleLabelPropertyRef_st(scope,attr) ::= "[_<scope> st]"
 
 /** Isolated $RULE ref ok in lexer as it's a Token */
@@ -1412,13 +1444,25 @@ rulePropertyRef_st(scope,attr) ::= "[_retval st]"
 ruleSetPropertyRef_tree(scope,attr,expr) ::= "[_retval setValue:<expr> forKey:@\"tree\"]"
 ruleSetPropertyRef_st(scope,attr,expr) ::= "<\n>#error StringTemplates are unsupported<\n>"
 
+/* hideous: find a way to cut down on the number of templates to support read/write access */
+/* TODO: also, which ones are valid to write to? ask Ter */
+lexerRuleSetPropertyRef_text(scope,attr,expr) ::= "[_state setText:<expr>];"
+lexerRuleSetPropertyRef_type(scope,attr,expr) ::= "_type"
+lexerRuleSetPropertyRef_line(scope,attr,expr) ::= "[_state tokenStartLine]"
+lexerRuleSetPropertyRef_pos(scope,attr,expr) ::= "[_state tokenCharPositionInLine]"
+lexerRuleSetPropertyRef_index(scope,attr,expr) ::= "-1" // undefined token index in lexer
+lexerRuleSetPropertyRef_channel(scope,attr,expr) ::= "[_state setChannel:<expr>];"
+lexerRuleSetPropertyRef_start(scope,attr,expr) ::= "[_state tokenStartCharIndex]"
+lexerRuleSetPropertyRef_stop(scope,attr,expr) ::= "([self charIndex]-1)"
+
+
 lexerRulePropertyRef_text(scope,attr) ::= "[self text]"
 lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "self->_tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "self->_tokenCharPositionInLine"
+lexerRulePropertyRef_line(scope,attr) ::= "[_state tokenStartLine]"
+lexerRulePropertyRef_pos(scope,attr) ::= "[_state tokenCharPositionInLine]"
 lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "self->_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
+lexerRulePropertyRef_channel(scope,attr) ::= "[_state channel]"
+lexerRulePropertyRef_start(scope,attr) ::= "[_state tokenStartCharIndex]"
 lexerRulePropertyRef_stop(scope,attr) ::= "([self charIndex]-1)"
 
 /** How to execute an action */
@@ -1429,7 +1473,7 @@ if ( <actions.(actionScope).synpredgate> ) {
   <action>
 }
 <else>
-if ( backtracking==0 ) {
+if ( ![_state isBacktracking] ) {
   <action>
 }
 <endif>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg
new file mode 100644
index 0000000..861c4cc
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg
@@ -0,0 +1,258 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2008 Ronald Blaschke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> _first_0 = null;
+<ASTLabelType> _last = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<if(rewriteMode)>
+retval.tree = (<ASTLabelType>)_first_0;
+if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
+    retval.tree = (<ASTLabelType>)adaptor.getParent(retval.tree);
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = null;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( state.backtracking==0 )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 ) {<endif>
+<if(hetero)>
+<label>_tree = new <hetero>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+}
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( state.backtracking==0 ) <endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<treeLevel>, <label>.getTree());
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_<treeLevel>);
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".getTree()",...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+new <hetero>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking==0 ) {<\n><endif>
+retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/Java/Java.stg b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
similarity index 64%
rename from src/org/antlr/codegen/templates/Java/Java.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
index 92f6b75..ef3b729 100644
--- a/src/org/antlr/codegen/templates/Java/Java.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
@@ -1,6 +1,7 @@
 /*
  [The "BSD licence"]
  Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2007-2008 Ronald Blaschke
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,19 +26,8 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-group Java implements ANTLRCore;
-
-javaTypeInitMap ::= [
-	"int":"0",
-	"long":"0",
-	"float":"0.0",
-	"double":"0.0",
-	"boolean":"false",
-	"byte":"0",
-	"short":"0",
-	"char":"0",
-	default:"null" // anything other than an atomic type
-]
+
+group Perl5 implements ANTLRCore;
 
 /** The overall file structure of a recognizer; stores methods for rules
  *  and cyclic DFAs plus support code.
@@ -45,25 +35,18 @@ javaTypeInitMap ::= [
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass, literals) ::=
 <<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
 <actions.(actionScope).header>
 
 <@imports>
-import org.antlr.runtime.*;
 <if(TREE_PARSER)>
-import org.antlr.runtime.tree.*;
 <endif>
-import java.util.Stack;
-import java.util.List;
-import java.util.ArrayList;
 <if(backtracking)>
-import java.util.Map;
-import java.util.HashMap;
 <endif>
 <@end>
 
@@ -72,33 +55,73 @@ import java.util.HashMap;
 >>
 
 lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-public class <name> extends Lexer {
-    <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <actions.lexer.members>
-    public <name>() {;} <! needed by subclasses !>
-    public <name>(CharStream input) {
-        super(input);
-<if(backtracking)>
-        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-    }
-    public String getGrammarFileName() { return "<fileName>"; }
+      filterMode, superClass="ANTLR::Runtime::Lexer")  ::= <<
+package <name>;
+use ANTLR::Runtime::Class;
+
+use Carp;
+use English qw( -no_match_vars ) ;
+use Readonly;
+use Switch;
+
+use ANTLR::Runtime::BaseRecognizer;
+use ANTLR::Runtime::DFA;
+use ANTLR::Runtime::NoViableAltException;
+
+extends 'ANTLR::Runtime::Lexer';
+
+use constant {
+    HIDDEN => ANTLR::Runtime::BaseRecognizer->HIDDEN
+};
+
+use constant {
+    <tokens:{ <it.name> => <it.type>, }; separator="\n">
+};
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+<actions.lexer.members>
+
+sub BUILD {
+    my ($self, $arg_ref) = @_;
+
+    $self->init_dfas();
+}
+
+sub get_grammar_file_name {
+    return "<fileName>";
+}
 
 <if(filterMode)>
-    <filteringNextToken()>
+<filteringNextToken()>
 <endif>
-    <rules; separator="\n\n">
+<rules; separator="\n\n">
+
+<synpreds:{p | <lexerSynpred(p)>}>
 
-    <synpreds:{p | <lexerSynpred(p)>}>
+<cyclicDFAs:{dfa | has 'dfa<dfa.decisionNumber>';}; separator="\n">
 
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+sub init_dfas {
+    my ($self) = @_;
 
+    <cyclicDFAs:{dfa |
+    $self->dfa<dfa.decisionNumber>(<name>::DFA<dfa.decisionNumber>->new({ recognizer => $self }));
+    }; separator="\n">
+
+    return;
 }
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+1;
+
 >>
 
+perlTypeInitMap ::= [
+	"$":"undef",
+	"@":"()",
+	"%":"()",
+	default:"undef"
+]
+
 /** A override of Lexer.nextToken() that backtracks over mTokens() looking
  *  for matches.  No error can be generated upon error; just rewind, consume
  *  a token and then try again.  backtracking needs to be set as well.
@@ -155,53 +178,79 @@ return false;
 }
 >>
 
+actionGate() ::= "$self->state->backtracking==0"
+
 filteringActionGate() ::= "backtracking==1"
 
 /** How to generate a parser */
 genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
+              bitsets, inputStreamType, superClass, filterMode,
               ASTLabelType="Object", labelType, members) ::= <<
-public class <name> extends <@superClassName><superClass><@end> {
-    public static final String[] tokenNames = new String[] {
-        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-    };
-    <tokens:{public static final int <it.name>=<it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <@members>
-   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-    public <name>(<inputStreamType> input) {
-        super(input);
+package <name>;
+use ANTLR::Runtime::Class;
+
+use English qw( -no_match_vars ) ;
+use Readonly;
+use Switch;
+use Carp;
+use ANTLR::Runtime::BitSet;
+
+extends '<@superClassName><superClass><@end>';
+
+Readonly my $token_names => [
+    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+];
+
+use constant {
+<tokens:{ <it.name> => <it.type>, }; separator="\n">
+};
+
+<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+<@members>
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+
+sub BUILD {
+    my ($self, $arg_ref) = @_;
+
 <if(backtracking)>
-        ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
+    $self->state->rule_memo({});<\n>
 <endif>
-    }
-    <@end>
+}
+<@end>
+
+sub get_token_names {
+    return $token_names;
+}
 
-    public String[] getTokenNames() { return tokenNames; }
-    public String getGrammarFileName() { return "<fileName>"; }
+sub get_grammar_file_name {
+    return "<fileName>";
+}
 
-    <members>
+<members>
 
-    <rules; separator="\n\n">
+<rules; separator="\n\n">
 
-    <synpreds:{p | <synpred(p)>}>
+<synpreds:{p | <synpred(p)>}>
 
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = __PACKAGE__::DFA<dfa.decisionNumber>->new($self);}; separator="\n">
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
 
-    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>
-}
+
+
+1;
 >>
 
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", ...)>
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="ANTLR::Runtime::Parser", labelType="ANTLR::Runtime::Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ANTLR::Runtime::TokenStream", ...)>
 >>
 
 /** How to generate a tree parser; same as parser except the input
  *  stream is a different type.
  */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="ANTLR::Runtime::TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
 <genericParser(inputStreamType="TreeNodeStream", ...)>
 >>
 
@@ -214,21 +263,24 @@ treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRu
  */
 synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 <<
-// $ANTLR start <ruleName>
-public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {   
+# $ANTLR start <ruleName>
+sub <ruleName>_fragment {
+# <ruleDescriptor.parameterScope:parameterScope(scope=it)>
+
 <if(trace)>
-    traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    try {
+    $self->traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    eval {
         <block>
-    }
-    finally {
-        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    };
+    $self->traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    if ($EVAL_ERROR) {
+        croak $EVAL_ERROR;
     }
 <else>
     <block>
 <endif>
 }
-// $ANTLR end <ruleName>
+# $ANTLR end <ruleName>
 >>
 
 synpred(name) ::= <<
@@ -262,7 +314,11 @@ if ( backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { retu
 
 /** How to test for failure and return from rule */
 checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (failed) return <ruleReturnValue()>;<endif>
+<if(backtracking)>
+if ($self->state->failed) {
+    return <ruleReturnValue()>;
+}
+<endif>
 >>
 
 /** This rule has failed, exit indicating failure during backtrack */
@@ -277,21 +333,22 @@ rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memo
 <ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
 <returnScope(scope=ruleDescriptor.returnScope)>
 
-// $ANTLR start <ruleName>
-// <fileName>:<description>
-public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+# $ANTLR start <ruleName>
+# <fileName>:<description>
+sub <ruleName>() {
+    my ($self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>) = @_;
+    <if(trace)>$self->traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
     <ruleScopeSetUp()>
     <ruleDeclarations()>
     <ruleLabelDefs()>
     <ruleDescriptor.actions.init>
     <@preamble()>
-    try {
+    eval {
         <ruleMemoization(name=ruleName)>
         <block>
         <ruleCleanUp()>
         <(ruleDescriptor.actions.after):execAction()>
-    }
+    };
 <if(exceptions)>
     <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
 <else>
@@ -299,23 +356,27 @@ public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterS
 <if(actions.(actionScope).rulecatch)>
     <actions.(actionScope).rulecatch>
 <else>
-    catch (RecognitionException re) {
-        reportError(re);
-        recover(input,re);
+    my $exception = $EVAL_ERROR;
+    if (ref $exception && $exception->isa('ANTLR::Runtime::RecognitionException')) {
+        $self->report_error($exception);
+        $self->recover($self->input, $exception);
+        $exception = undef;
     }<\n>
 <endif>
 <endif>
 <endif>
-    finally {
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
+    <if(trace)>$self->traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+    <memoize()>
+    <ruleScopeCleanUp()>
+    <finally>
+    if ($exception) {
+        croak $exception;
+        #$exception->rethrow();
     }
     <@postamble()>
     return <ruleReturnValue()>;
 }
-// $ANTLR end <ruleName>
+# $ANTLR end <ruleName>
 >>
 
 catch(decl,action) ::= <<
@@ -326,15 +387,15 @@ catch (<e.decl>) {
 
 ruleDeclarations() ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = new <returnType()>();
-retval.start = input.LT(1);<\n>
+my $retval = <returnType()>->new();
+$retval->set_start($self->input->LT(1));<\n>
 <else>
 <ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+my $<a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
 }>
 <endif>
 <if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.index();
+my $<ruleDescriptor.name>_start_index = $self->input->index();
 <endif>
 >>
 
@@ -350,7 +411,7 @@ ruleScopeCleanUp() ::= <<
 
 ruleLabelDefs() ::= <<
 <[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{<labelType> <it.label.text>=null;}; separator="\n"
+    :{my $<it.label.text> = undef;}; separator="\n"
 >
 <[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
     :{List list_<it.label.text>=null;}; separator="\n"
@@ -365,7 +426,7 @@ lexerRuleLabelDefs() ::= <<
   ruleDescriptor.ruleLabels]
     :{<labelType> <it.label.text>=null;}; separator="\n"
 >
-<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<ruleDescriptor.charLabels:{my $<it.label.text>;}; separator="\n">
 <[ruleDescriptor.tokenListLabels,
   ruleDescriptor.ruleListLabels,
   ruleDescriptor.ruleListLabels]
@@ -377,9 +438,9 @@ ruleReturnValue() ::= <<
 <if(!ruleDescriptor.isSynPred)>
 <if(ruleDescriptor.hasReturnValue)>
 <if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
+$<ruleDescriptor.singleValueReturnName>
 <else>
-retval
+$retval
 <endif>
 <endif>
 <endif>
@@ -388,7 +449,7 @@ retval
 ruleCleanUp() ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
 <if(!TREE_PARSER)>
-retval.stop = input.LT(-1);<\n>
+$retval->set_stop($self->input->LT(-1));<\n>
 <endif>
 <endif>
 >>
@@ -405,42 +466,47 @@ if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.n
  *  fragment rules.
  */
 lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start <ruleName>
-public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
+# $ANTLR start <ruleName>
+sub m_<ruleName> {
+# <ruleDescriptor.parameterScope:parameterScope(scope=it)>
+    my ($self) = @_;
     <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
     <ruleDeclarations()>
-    try {
+    eval {
 <if(nakedBlock)>
         <ruleMemoization(name=ruleName)>
         <lexerRuleLabelDefs()>
         <ruleDescriptor.actions.init>
         <block><\n>
 <else>
-        int _type = <ruleName>;
+        my $_type = <ruleName>;
+        my $_channel = $self->DEFAULT_TOKEN_CHANNEL;
         <ruleMemoization(name=ruleName)>
         <lexerRuleLabelDefs()>
         <ruleDescriptor.actions.init>
         <block>
         <ruleCleanUp()>
-        this.type = _type;
+        $self->state->type($_type);
+        $self->state->channel($_channel);
         <(ruleDescriptor.actions.after):execAction()>
 <endif>
-    }
-    finally {
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <ruleScopeCleanUp()>
-        <memoize()>
+    };
+    <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+    <memoize()>
+
+    if ($EVAL_ERROR) {
+        croak $EVAL_ERROR;
     }
 }
-// $ANTLR end <ruleName>
+# $ANTLR end <ruleName>
 >>
 
 /** How to generate code for the implicitly-defined lexer grammar rule
  *  that chooses between lexer rules.
  */
 tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-public void mTokens() throws RecognitionException {
+sub m_tokens {
+    my ($self) = @_;
     <block><\n>
 }
 >>
@@ -449,14 +515,14 @@ public void mTokens() throws RecognitionException {
 
 /** A (...) subrule with multiple alternatives */
 block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
+# <fileName>:<description>
+my $alt<decisionNumber> = <maxAlt>;
 <decls>
 <@predecision()>
 <decision>
 <@postdecision()>
 <@prebranch()>
-switch (alt<decisionNumber>) {
+switch ($alt<decisionNumber>) {
     <alts:altSwitchCase()>
 }
 <@postbranch()>
@@ -464,19 +530,19 @@ switch (alt<decisionNumber>) {
 
 /** A rule block with multiple alternatives */
 ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
+# <fileName>:<description>
+my $alt<decisionNumber> = <maxAlt>;
 <decls>
 <@predecision()>
 <decision>
 <@postdecision()>
-switch (alt<decisionNumber>) {
+switch ($alt<decisionNumber>) {
     <alts:altSwitchCase()>
 }
 >>
 
 ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
+# <fileName>:<description>
 <decls>
 <@prealt()>
 <alts>
@@ -485,7 +551,7 @@ ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNu
 
 /** A special case of a (...) subrule with a single alternative */
 blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
+# <fileName>:<description>
 <decls>
 <@prealt()>
 <alts>
@@ -494,28 +560,29 @@ blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber
 
 /** A (..)+ block with 1 or more alternatives */
 positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int cnt<decisionNumber>=0;
+# <fileName>:<description>
+my $cnt<decisionNumber> = 0;
 <decls>
 <@preloop()>
-loop<decisionNumber>:
-do {
-    int alt<decisionNumber>=<maxAlt>;
+LOOP<decisionNumber>:
+while (1) {
+    my $alt<decisionNumber> = <maxAlt>;
     <@predecision()>
     <decision>
     <@postdecision()>
-    switch (alt<decisionNumber>) {
-	<alts:altSwitchCase()>
-	default :
-	    if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
-	    <ruleBacktrackFailure()>
-            EarlyExitException eee =
-                new EarlyExitException(<decisionNumber>, input);
+    switch ($alt<decisionNumber>) {
+	    <alts:altSwitchCase()>
+	    else {
+	        if ( $cnt<decisionNumber> >= 1 ) { last LOOP<decisionNumber> }
+	        <ruleBacktrackFailure()>
+            my $eee =
+                ANTLR::Runtime::EarlyExitException->new(<decisionNumber>, $self->input);
             <@earlyExitException()>
-            throw eee;
+            croak $eee;
+        }
     }
-    cnt<decisionNumber>++;
-} while (true);
+    ++$cnt<decisionNumber>;
+}
 <@postloop()>
 >>
 
@@ -523,21 +590,20 @@ positiveClosureBlockSingleAlt ::= positiveClosureBlock
 
 /** A (..)* block with 1 or more alternatives */
 closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
+# <fileName>:<description>
 <decls>
 <@preloop()>
-loop<decisionNumber>:
-do {
-    int alt<decisionNumber>=<maxAlt>;
+LOOP<decisionNumber>:
+while (1) {
+    my $alt<decisionNumber> = <maxAlt>;
     <@predecision()>
     <decision>
     <@postdecision()>
-    switch (alt<decisionNumber>) {
-	<alts:altSwitchCase()>
-	default :
-	    break loop<decisionNumber>;
+    switch ($alt<decisionNumber>) {
+	    <alts:altSwitchCase()>
+	    else { last LOOP<decisionNumber> }
     }
-} while (true);
+}
 <@postloop()>
 >>
 
@@ -555,22 +621,28 @@ optionalBlockSingleAlt ::= block
  *  does the jump to the code that actually matches that alternative.
  */
 altSwitchCase() ::= <<
-case <i> :
+case <i> {
     <@prealt()>
     <it>
-    break;<\n>
+}<\n>
 >>
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
-// <fileName>:<description>
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+# <fileName>:<description>
 {
 <@declarations()>
 <elements:element()>
+<rew>
 <@cleanup()>
 }
 >>
 
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
 // E L E M E N T S
 
 /** Dump the elements one per line */
@@ -580,15 +652,13 @@ element() ::= <<
 >>
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)>$<label> =<endif>$self->match($self->input, <token>, $FOLLOW_<token>_in_<ruleName><elementIndex>);
+<checkRuleBacktrackFailure()>
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -601,50 +671,55 @@ list_<label>.add(<elem>);<\n>
 /** match a character */
 charRef(char,label) ::= <<
 <if(label)>
-<label> = input.LA(1);<\n>
+<label> = $self->input->LA(1);<\n>
 <endif>
-match(<char>); <checkRuleBacktrackFailure()>
+$self->match(<char>); <checkRuleBacktrackFailure()>
 >>
 
 /** match a character range */
 charRangeRef(a,b,label) ::= <<
 <if(label)>
-<label> = input.LA(1);<\n>
+<label> = $self->input->LA(1);<\n>
 <endif>
-matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+$self->match_range(<a>,<b>); <checkRuleBacktrackFailure()>
 >>
 
 /** For now, sets are interval tests and must be tested inline */
 matchSet(s,label,elementIndex,postmatchCode="") ::= <<
 <if(label)>
 <if(LEXER)>
-<label>= input.LA(1);<\n>
+<label>= $self->input->LA(1);<\n>
 <else>
 <label>=(<labelType>)input.LT(1);<\n>
 <endif>
 <endif>
 if ( <s> ) {
-    input.consume();
+    $self->input->consume();
     <postmatchCode>
 <if(!LEXER)>
-    errorRecovery=false;
+    $self->state->error_recovery(0);
 <endif>
     <if(backtracking)>failed=false;<endif>
 }
 else {
     <ruleBacktrackFailure()>
-    MismatchedSetException mse =
-        new MismatchedSetException(null,input);
+    my $mse =
+        ANTLR::Runtime::MismatchedSetException->new(undef, $self->input);
     <@mismatchedSetException()>
 <if(LEXER)>
-    recover(mse);
+    $self->recover($mse);
+    $mse->throw();
 <else>
-    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+    $mse->throw();
+    <! use following code to make it recover inline; remove throw mse;
+    $self->recoverFromMismatchedSet($self->input, $mse, $FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
 <endif>
-    throw mse;
 }<\n>
 >>
 
+matchRuleBlockSet ::= matchSet
+
 matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 <matchSet(...)>
 <listLabel(elem=label,...)>
@@ -654,10 +729,10 @@ matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 lexerStringRef(string,label) ::= <<
 <if(label)>
 int <label>Start = getCharIndex();
-match(<string>); <checkRuleBacktrackFailure()>
+$self->match(<string>); <checkRuleBacktrackFailure()>
 <labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
 <else>
-match(<string>); <checkRuleBacktrackFailure()><\n>
+$self->match(<string>); <checkRuleBacktrackFailure()><\n>
 <endif>
 >>
 
@@ -676,7 +751,7 @@ wildcardAndListLabel(label,elementIndex) ::= <<
 /** Match . wildcard in lexer */
 wildcardChar(label, elementIndex) ::= <<
 <if(label)>
-<label> = input.LA(1);<\n>
+<label> = $self->input->LA(1);<\n>
 <endif>
 matchAny(); <checkRuleBacktrackFailure()>
 >>
@@ -689,36 +764,41 @@ wildcardCharListLabel(label, elementIndex) ::= <<
 /** Match a rule reference by invoking it possibly with arguments
  *  and a return value or values.
  */
-ruleRef(rule,label,elementIndex,args) ::= <<
-pushFollow(FOLLOW_<rule>_in_<ruleName><elementIndex>);
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+$self->push_follow($FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
 <if(label)>
-<label>=<rule>(<args; separator=", ">);<\n>
+$<label> = $self-><rule.name>(<args; separator=", ">);<\n>
 <else>
-<rule>(<args; separator=", ">);<\n>
+$self-><rule.name>(<args; separator=", ">);<\n>
 <endif>
-_fsp--;
+$self->state->_fsp($self->state->_fsp - 1);
 <checkRuleBacktrackFailure()>
 >>
 
 /** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
 <listLabel(elem=label,...)>
 >>
 
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
 <if(label)>
 int <label>Start<elementIndex> = getCharIndex();
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+$self->m_<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
 <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
 <else>
-m<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+$self->m_<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -735,7 +815,8 @@ match(EOF); <checkRuleBacktrackFailure()>
 >>
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
 <root:element()>
 <actionsAfterRoot:element()>
 <if(nullableChildList)>
@@ -764,17 +845,22 @@ if ( !(<evalPredicate(...)>) ) {
 // F i x e d  D F A  (if-then-else)
 
 dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
+my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
+<edges; separator="\nels">
 else {
 <if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
+    $alt<decisionNumber> = <eotPredictsAlt>;
 <else>
     <ruleBacktrackFailure()>
-    NoViableAltException nvae =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    my $nvae =
+        ANTLR::Runtime::NoViableAltException->new({
+            grammar_decision_description => "<description>",
+            decision_number => <decisionNumber>,
+            state_number => <stateNumber>,
+            input => $self->input,
+        });<\n>
     <@noViableAltException()>
-    throw nvae;<\n>
+    croak $nvae;<\n>
 <endif>
 }
 >>
@@ -785,8 +871,8 @@ else {
  *  expect "if ( LA(1)==X ) match(X);" and that's it.
  */
 dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
+my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
+<edges; separator="\nels">
 >>
 
 /** A DFA state that is actually the loopback decision of a closure
@@ -796,21 +882,21 @@ int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
  *  anything other than 'a' predicts exiting.
  */
 dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
+my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
+<edges; separator="\nels"><\n>
 <if(eotPredictsAlt)>
 <if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+$alt<decisionNumber> = <eotPredictsAlt>; <! if no edges, don't gen ELSE !>
 <else>
 else {
-    alt<decisionNumber>=<eotPredictsAlt>;
+    $alt<decisionNumber> = <eotPredictsAlt>;
 }<\n>
 <endif>
 <endif>
 >>
 
 /** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+dfaAcceptState(alt) ::= "$alt<decisionNumber> = <alt>;"
 
 /** A simple edge with an expression.  If the expression is satisfied,
  *  enter to the target state.  To handle gated productions, we may
@@ -828,44 +914,44 @@ if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
  *  decides if this is possible: CodeGenerator.canGenerateSwitch().
  */
 dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    NoViableAltException nvae =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
-<endif>
+switch ( $self->input->LA(<k>) ) {
+    <edges; separator="\n">
+    else {
+    <if(eotPredictsAlt)>
+        $alt<decisionNumber> = <eotPredictsAlt>;
+    <else>
+        <ruleBacktrackFailure()>
+        my $nvae =
+            ANTLR::Runtime::NoViableAltException->new({
+                grammar_decision_description => "<description>",
+                decision_number => <decisionNumber>,
+                state_number => <stateNumber>,
+                input => $self->input,
+            });<\n>
+        <@noViableAltException()>
+        croak $nvae;<\n>
+    <endif>
+    }
 }<\n>
 >>
 
 dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
+switch ( $self->input->LA(<k>) ) {
     <edges; separator="\n">
 }<\n>
 >>
 
 dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
+switch ( $self->input->LA(<k>) ) {
 <edges; separator="\n"><\n>
 <if(eotPredictsAlt)>
-default:
-    alt<decisionNumber>=<eotPredictsAlt>;
-    break;<\n>
+else { $alt<decisionNumber> = <eotPredictsAlt> }<\n>
 <endif>
 }<\n>
 >>
 
 dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{case <it>:}; separator="\n">
-    {
-    <targetState>
-    }
-    break;
+case [<labels:{ <it> }; separator=", ">] { <targetState> }
 >>
 
 // C y c l i c  D F A
@@ -875,7 +961,7 @@ dfaEdgeSwitch(labels, targetState) ::= <<
  *  The <name> attribute is inherited via the parser, lexer, ...
  */
 dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = dfa<decisionNumber>.predict(input);
+$alt<decisionNumber> = $self->dfa<decisionNumber>->predict($self->input);
 >>
 
 /* Dump DFA tables as run-length-encoded Strings of octal values.
@@ -887,70 +973,91 @@ alt<decisionNumber> = dfa<decisionNumber>.predict(input);
  * the encoding methods.
  */
 cyclicDFA(dfa) ::= <<
-static final String DFA<dfa.decisionNumber>_eotS =
-    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_eofS =
-    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_minS =
-    "<dfa.javaCompressedMin; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_maxS =
-    "<dfa.javaCompressedMax; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_acceptS =
-    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_specialS =
-    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>";
-static final String[] DFA<dfa.decisionNumber>_transitionS = {
-        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-};
+Readonly my $DFA<dfa.decisionNumber>_eot => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedEOT; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_eof => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedEOF; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_min => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedMin; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_max => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedMax; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_accept => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedAccept; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_special => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedSpecial; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_transition => [ <dfa.javaCompressedTransition:{s|ANTLR::Runtime::DFA->unpack_rle([ <s; separator=", "> ])}; separator=", "> ];
 
-static final short[] DFA<dfa.decisionNumber>_eot = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS);
-static final short[] DFA<dfa.decisionNumber>_eof = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS);
-static final char[] DFA<dfa.decisionNumber>_min = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
-static final char[] DFA<dfa.decisionNumber>_max = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
-static final short[] DFA<dfa.decisionNumber>_accept = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
-static final short[] DFA<dfa.decisionNumber>_special = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS);
-static final short[][] DFA<dfa.decisionNumber>_transition;
-
-static {
-    int numStates = DFA<dfa.decisionNumber>_transitionS.length;
-    DFA<dfa.decisionNumber>_transition = new short[numStates][];
-    for (int i=0; i\<numStates; i++) {
-        DFA<dfa.decisionNumber>_transition[i] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
-    }
+{
+package <name>::DFA<dfa.decisionNumber>;
+use ANTLR::Runtime::Class;
+
+use strict;
+use warnings;
+
+extends 'ANTLR::Runtime::DFA';
+
+sub BUILD {
+    my $self = shift;
+    my $param_ref = __PACKAGE__->unpack_params(@_, {
+        spec => [
+            {
+                name => 'recognizer',
+                isa  => 'ANTLR::Runtime::BaseRecognizer'
+            },
+        ]
+    });
+
+    $self->recognizer($param_ref->{recognizer});
+    $self->decision_number(<dfa.decisionNumber>);
+    $self->eot($DFA<dfa.decisionNumber>_eot);
+    $self->eof($DFA<dfa.decisionNumber>_eof);
+    $self->min($DFA<dfa.decisionNumber>_min);
+    $self->max($DFA<dfa.decisionNumber>_max);
+    $self->accept($DFA<dfa.decisionNumber>_accept);
+    $self->special($DFA<dfa.decisionNumber>_special);
+    $self->transition($DFA<dfa.decisionNumber>_transition);
 }
 
-class DFA<dfa.decisionNumber> extends DFA {
-
-    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) {
-        this.recognizer = recognizer;
-        this.decisionNumber = <dfa.decisionNumber>;
-        this.eot = DFA<dfa.decisionNumber>_eot;
-        this.eof = DFA<dfa.decisionNumber>_eof;
-        this.min = DFA<dfa.decisionNumber>_min;
-        this.max = DFA<dfa.decisionNumber>_max;
-        this.accept = DFA<dfa.decisionNumber>_accept;
-        this.special = DFA<dfa.decisionNumber>_special;
-        this.transition = DFA<dfa.decisionNumber>_transition;
-    }
-    public String getDescription() {
-        return "<dfa.description>";
-    }
-    <@errorMethod()>
+sub get_description {
+    return "<dfa.description>";
+}
+
+<@errorMethod()>
+
 <if(dfa.specialStateSTs)>
-    public int specialStateTransition(int s, IntStream input) throws NoViableAltException {
-    	int _s = s;
-        switch ( s ) {
+sub special_state_transition {
+    my ($self, $param_ref) = unpack_params(@_, {
+        spec => [
+            {
+                name => 's',
+                type => SCALAR,
+            },
+            {
+                name => 'input',
+                isa  => 'ANTLR::Runtime::IntStream',
+            }
+        ]
+    });
+    my $s = $param_ref->{s};
+    my $input = $param_ref->{input};
+
+    switch ($s) {
         <dfa.specialStateSTs:{state |
-        case <i0> : <! compressed special state numbers 0..n-1 !>
+        case <i0> \{ <! compressed special state numbers 0..n-1 !>
             <state>}; separator="\n">
         }
+    }
+
 <if(backtracking)>
-        if (backtracking>0) {failed=true; return -1;}<\n>
+    if ($self->state->backtracking > 0) {
+        $self->state->failed = 1;
+        return -1;
+    }<\n>
 <endif>
-        NoViableAltException nvae =
-            new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input);
-        error(nvae);
-        throw nvae;
+
+    my $nvae =
+        ANTLR::Runtime::NoViableAltException->new({
+            grammar_decision_description => $self->get_description(),
+            decision_number => <dfa.decisionNumber>,
+            state_number => $s,
+            input => $input,
+        });<\n>
+    $self->error($nvae);
+    $nvae->throw();
     }<\n>
 <endif>
 }<\n>
@@ -960,13 +1067,14 @@ class DFA<dfa.decisionNumber> extends DFA {
  *  state.
  */
 cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+my $input = $self->input;
+my $LA<decisionNumber>_<stateNumber> = $input->LA(1);<\n>
 <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-int index<decisionNumber>_<stateNumber> = input.index();
-input.rewind();<\n>
+my $index<decisionNumber>_<stateNumber> = $input->index();
+$input->rewind();<\n>
 <endif>
 s = -1;
-<edges; separator="\nelse ">
+<edges; separator="\nels">
 <if(semPredState)> <! return input cursor to state before we rewound !>
 input.seek(index<decisionNumber>_<stateNumber>);<\n>
 <endif>
@@ -991,7 +1099,7 @@ s = <targetStateNumber>;<\n>
 
 // D F A  E X P R E S S I O N S
 
-andPredicates(left,right) ::= "(<left>&&<right>)"
+andPredicates(left,right) ::= "(<left> && <right>)"
 
 orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
 
@@ -1001,20 +1109,20 @@ evalPredicate(pred,description) ::= "<pred>"
 
 evalSynPredicate(pred,description) ::= "<pred>()"
 
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+lookaheadTest(atom,k,atomAsInt) ::= "$LA<decisionNumber>_<stateNumber> eq <atom>"
 
 /** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
  *  somewhere.  Must ask for the lookahead directly.
  */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "$self->input->LA(<k>) eq <atom>"
 
 lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+($LA<decisionNumber>_<stateNumber> ge <lower> && $LA<decisionNumber>_<stateNumber> le <upper>)
 >>
 
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "($self->input->LA(<k>) ge <lower> && $self->input->LA(<k>) le <upper>)"
 
-setTest(ranges) ::= "<ranges; separator=\"||\">"
+setTest(ranges) ::= "<ranges; separator=\" || \">"
 
 // A T T R I B U T E S
 
@@ -1064,15 +1172,19 @@ void
 >>
 
 /** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
+ *  must be an object, default value is "undef".
  */
 initValue(typeName) ::= <<
-<javaTypeInitMap.(typeName)>
+<if(typeName)>
+<perlTypeInitMap.(typeName)>
+<else>
+undef
+<endif>
 >>
 
 /** Define a rule label including default value */
 ruleLabelDef(label) ::= <<
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+my $<label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
 >>
 
 /** Define a return struct for a rule if the code needs to access its
@@ -1081,19 +1193,24 @@ ruleLabelDef(label) ::= <<
  */
 returnScope(scope) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-public static class <returnType()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
+{
+    package <returnType()>;
+    use ANTLR::Runtime::Class;
+
+    extends 'ANTLR::Runtime::<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope';
+
     <scope.attributes:{public <it.decl>;}; separator="\n">
     <@ruleReturnMembers()>
-};
+}
 <endif>
 >>
 
 parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
+<scope.attributes:{$<it.name>}; separator=", ">
 >>
 
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+parameterAttributeRef(attr) ::= "$<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "$<attr.name> =<expr>;"
 
 scopeAttributeRef(scope,attr,index,negIndex) ::= <<
 <if(negIndex)>
@@ -1128,9 +1245,9 @@ isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
 /** reference an attribute of rule; might only have single return value */
 ruleLabelRef(referencedRule,scope,attr) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
+$<scope>.<attr.name>
 <else>
-<scope>
+$<scope>
 <endif>
 >>
 
@@ -1138,7 +1255,7 @@ returnAttributeRef(ruleDescriptor,attr) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
 retval.<attr.name>
 <else>
-<attr.name>
+$<attr.name>
 <endif>
 >>
 
@@ -1146,12 +1263,12 @@ returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
 retval.<attr.name> =<expr>;
 <else>
-<attr.name> =<expr>;
+$<attr.name> =<expr>;
 <endif>
 >>
 
 /** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
+tokenLabelRef(label) ::= "$<label>"
 
 /** ids+=ID {$ids} or e+=expr {$e} */
 listLabelRef(label) ::= "list_<label>"
@@ -1159,7 +1276,7 @@ listLabelRef(label) ::= "list_<label>"
 
 // not sure the next are the right approach
 
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+tokenLabelPropertyRef_text(scope,attr) ::= "$<scope>->get_text()"
 tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
 tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
 tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
@@ -1176,14 +1293,14 @@ input.getTokenStream().toString(
   input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
   input.getTreeAdaptor().getTokenStopIndex(<scope>.start))
 <else>
-input.toString(<scope>.start,<scope>.stop)
+substr($self->input, $<scope>->start, $<scope>->stop)
 <endif>
 >>
 
 ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
 
 /** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
+lexerRuleLabel(label) ::= "$<label>"
 
 lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
 lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
@@ -1208,13 +1325,14 @@ input.toString(retval.start,input.LT(-1))
 rulePropertyRef_st(scope,attr) ::= "retval.st"
 
 lexerRulePropertyRef_text(scope,attr) ::= "getText()"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_type(scope,attr) ::= "$_type"
 lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
 lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
 lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "channel"
+lexerRulePropertyRef_channel(scope,attr) ::= "$_channel"
 lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
 lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+lexerRulePropertyRef_self(scope,attr) ::= "$self"
 
 // setting $st and $tree is allowed in local rule. everything else
 // is flagged as error
@@ -1242,10 +1360,10 @@ if ( backtracking==0 ) {
 // M I S C (properties, etc...)
 
 bitset(name, words64) ::= <<
-public static final BitSet <name> = new BitSet(new long[]{<words64:{<it>L};separator=",">});<\n>
+Readonly my $<name> => ANTLR::Runtime::BitSet->new({ words64 => [ <words64:{'<it>'};separator=", "> ] });<\n>
 >>
 
-codeFileExtension() ::= ".java"
+codeFileExtension() ::= ".pm"
 
-true() ::= "true"
-false() ::= "false"
+true() ::= "1"
+false() ::= "0"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg
new file mode 100644
index 0000000..d348b4c
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg
@@ -0,0 +1,458 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* in sync with Java/AST.stg revision 36 */
+
+group AST;
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+from antlr3.tree import *<\n>
+<endif>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+ at genericParser.init() ::= <<
+self._adaptor = None
+self.adaptor = CommonTreeAdaptor()
+<@super.init()>
+>>
+
+ at genericParser.members() ::= <<
+<@super.members()>
+<astAccessor()>
+>>
+
+astAccessor() ::= <<
+def getTreeAdaptor(self):
+    return self._adaptor
+
+def setTreeAdaptor(self, adaptor):
+    self._adaptor = adaptor
+    <grammar.directDelegates:{g|<g:delegateName()>.adaptor = adaptor}; separator="\n">
+
+adaptor = property(getTreeAdaptor, setTreeAdaptor)
+>>
+
+ at returnScope.ruleReturnInit() ::= <<
+self.tree = None
+>>
+
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+root_0 = None<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]
+    :{<it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.tokenListLabels:{<it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>")}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>")}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+ at alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = self._adaptor.nil()<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+
+// // T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+        alts, description,
+        referencedElementsDeep, // ALL referenced elements to right of ->
+        referencedTokenLabels,
+        referencedTokenListLabels,
+        referencedRuleLabels,
+        referencedRuleListLabels,
+        referencedWildcardLabels,
+        referencedWildcardListLabels,
+        rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+# AST Rewrite
+# elements: <referencedElementsDeep; separator=", ">
+# token labels: <referencedTokenLabels; separator=", ">
+# rule labels: <referencedRuleLabels; separator=", ">
+# token list labels: <referencedTokenListLabels; separator=", ">
+# rule list labels: <referencedRuleListLabels; separator=", ">
+# wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<finishedBacktracking({
+<prevRuleRootRef()>.tree = root_0
+<rewriteCodeLabels()>
+root_0 = self._adaptor.nil()
+<first(alts):rewriteAltFirst(); anchor>
+
+<rest(alts):{a | el<rewriteAltRest(a)>}; anchor, separator="\n\n">
+
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = self._adaptor.rulePostProcessing(root_0)
+self.input.replaceChildren(
+    self._adaptor.getParent(retval.start),
+    self._adaptor.getChildIndex(retval.start),
+    self._adaptor.getChildIndex(_last),
+    retval.tree
+    )<\n>
+<endif>
+<endif>
+
+<! if parser or tree-parser and rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0<\n>
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0<\n>
+<endif>
+<endif>
+})>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", <it>)};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", <it>)};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", list_<it>)};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{
+if <it> is not None:
+    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>", <it>.tree)
+else:
+    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", None)
+};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+if <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+if not (<referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">):
+    raise RewriteEarlyExitException()
+
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset()<\n>}>
+>>
+
+rewriteAltRest(a) ::= <<
+<if(a.pred)>
+if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>
+se: <! little hack to get if .. elif .. else block right !>
+    # <a.description>
+    <a.alt>
+<endif>
+>>
+
+rewriteAltFirst(a) ::= <<
+<if(a.pred)>
+if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>
+# <a.description>
+<a.alt>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = None"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+# <fileName>:<description>
+root_<treeLevel> = self._adaptor.nil()
+<root:rewriteElement()>
+<children:rewriteElement()>
+self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,hetero,args) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>)<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,hetero,args) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>)<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,hetero,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>)<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,hetero,elementIndex) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>)<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<!FIXME(96,untested)!>
+root_0 = <action><\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree())<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <action>)<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<action>, root_<treeLevel>)<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+createImaginaryNode(tokenType,hetero,args) ::= <<
+<if(hetero)>
+<! new MethodNode(IDLabel, args) !>
+<hetero>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
+<else>self._adaptor.create(<tokenType>, <args; separator=", ">)
+<endif>
+<endif>
+>>
+
+//<! need to call different adaptor.create*() methods depending of argument count !>
+//<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
+//<else><if (!rest(args))>self._adaptor.createFromType(<tokenType>, <first(args)>)
+//<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<tokenType>, <first(args)>, <first(rest(args))>)
+//<endif>
+//<endif>
+//<endif>
+
+
+createRewriteNodeFromElement(token,hetero,args) ::= <<
+<if(hetero)>
+<hetero>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+<! need to call different adaptor.create*() methods depending of argument count !>
+<if (!args)>self._adaptor.createFromType(<token>, "<token>")
+<else><if (!rest(args))>self._adaptor.createFromToken(<token>, <first(args)>)
+<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
+<endif>
+<endif>
+<endif>
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/src/org/antlr/codegen/templates/Java/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg
similarity index 61%
rename from src/org/antlr/codegen/templates/Java/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg
index 3abe396..2b8f4b7 100644
--- a/src/org/antlr/codegen/templates/Java/ASTDbg.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2009 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -27,39 +27,33 @@
 */
 
 /** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Python by code generator.
  */
 group ASTDbg;
 
-parserMembers() ::= <<
-protected DebugTreeAdaptor adaptor =
-	  new DebugTreeAdaptor(null,new CommonTreeAdaptor());
-public void setTreeAdaptor(TreeAdaptor adaptor) {
-    this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
-}
-public TreeAdaptor getTreeAdaptor() {
-    return adaptor;
-}<\n>
->>
+astAccessor() ::= <<
+def setTreeAdaptor(self, adaptor):
+<if(grammar.grammarIsRoot)>
+    self._adaptor = DebugTreeAdaptor(self.dbg, adaptor)
+<else>
+    self._adaptor = adaptor # delegator sends dbg adaptor 
+<endif>
+    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(self._adaptor)}>
 
-createListenerAndHandshake() ::= <<
-DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this, port, adaptor);
-setDebugListener(proxy);
-adaptor.setDebugEventListener(proxy);
-try {
-    proxy.handshake();
-}
-catch (IOException ioe) {
-    reportError(ioe);
-}
+def getTreeAdaptor(self):
+    return self._adaptor
+
+adaptor = property(getTreeAdaptor, setTreeAdaptor)<\n>
 >>
 
-ctorForPredefinedListener() ::= <<
-public <name>(<inputStreamType> input, DebugEventListener dbg) {
-    super(input, dbg);
-    adaptor.setDebugEventListener(dbg);
-}<\n>
+createListenerAndHandshake() ::= <<
+proxy = DebugEventSocketProxy(self, adaptor=<if(TREE_PARSER)>self.input.getTreeAdaptor()<else>self._adaptor<endif>,
+                              debug=debug_socket, port=port)
+self.setDebugListener(proxy)
+self.adaptor.setDebugListener(proxy)
+self.input.setDebugListener(proxy)
+#self.set<inputStreamType>(Debug<inputStreamType>(self.input, proxy))
+proxy.handshake()
 >>
 
- at rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
+ at rewriteElement.pregen() ::= "self._dbg.location(<e.line>, <e.pos>)"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg
new file mode 100644
index 0000000..0d61a0c
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg
@@ -0,0 +1,198 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+group ASTParser;
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+ at ruleBody.setErrorReturnValue() ::= <<
+retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = <createNodeFromToken(...)>
+self._adaptor.addChild(root_0, <label>_tree)
+})>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = <createNodeFromToken(...)>
+root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
+})>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,hetero,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+<super.matchSet(..., postmatchCode={<finishedBacktracking({self._adaptor.addChild(root_0, <createNodeFromToken(...)>)})>})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+<super.matchSet(..., postmatchCode={<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<createNodeFromToken(...)>, root_0)})>})>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({self._adaptor.addChild(root_0, <label>.tree)})>
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<label>.tree, root_0)})>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+// WILDCARD AST
+
+wildcard(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.createWithPayload(<label>)
+self._adaptor.addChild(root_0, <label>_tree)
+})>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(label,elementIndex) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.createWithPayload(<label>)
+root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
+})>
+>>
+
+createNodeFromToken(label,hetero) ::= <<
+<if(hetero)>
+<hetero>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+self._adaptor.createWithPayload(<label>)
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<finishedBacktracking({
+retval.tree = self._adaptor.rulePostProcessing(root_0)
+self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+})>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg
new file mode 100644
index 0000000..fca7551
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg
@@ -0,0 +1,312 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+group ASTTreeParser;
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+_first_0 = None
+_last = None<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<finishedBacktracking({
+<if(rewriteMode)>
+retval.tree = _first_0
+if self._adaptor.getParent(retval.tree) is not None and self._adaptor.isNil(self._adaptor.getParent(retval.tree)):
+    retval.tree = self._adaptor.getParent(retval.tree)
+<endif>
+})>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = self.input.LT(1)
+_save_last_<treeLevel> = _last
+_first_<treeLevel> = None
+<if(!rewriteMode)>
+root_<treeLevel> = self._adaptor.nil()<\n>
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<finishedBacktracking({
+<if(root.el.rule)>
+if _first_<enclosingTreeLevel> is None:
+    _first_<enclosingTreeLevel> = <root.el.label>.tree<\n>
+<else>
+if _first_<enclosingTreeLevel> is None:
+    _first_<enclosingTreeLevel> = <root.el.label><\n>
+<endif>
+})>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if self.input.LA(1) == DOWN:
+    self.match(self.input, DOWN, None)
+    <children:element()>
+    self.match(self.input, UP, None)
+
+<else>
+self.match(self.input, DOWN, None)
+<children:element()>
+self.match(self.input, UP, None)<\n>
+<endif>
+<if(!rewriteMode)>
+self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+<endif>
+_last = _save_last_<treeLevel>
+
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,hetero) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(hetero)>
+<label>_tree = <hetero>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<else> <! rewrite mode !>
+<finishedBacktracking({
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label><\n>
+})>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,hetero) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(hetero)>
+<label>_tree = <hetero>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
+})>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,hetero) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,hetero) ::= <<
+_last = self.input.LT(1)
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.dupTree(<label>)
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<else> <! rewrite mode !>
+<finishedBacktracking({
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label>
+})>
+<endif>
+>>
+
+// SET AST
+matchSet(s,label,hetero,elementIndex,postmatchCode) ::= <<
+_last = self.input.LT(1)
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(hetero)>
+<label>_tree = <hetero>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<endif>
+})>
+>>
+
+matchRuleBlockSet(s,label,hetero,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite()> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
+_last = self.input.LT(1)
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,hetero,elementIndex,debug) ::= <<
+<super.matchSet(..., postmatchCode={
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(hetero)>
+<label>_tree = <hetero>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
+})>
+<endif>
+})>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRef(...)>
+<finishedBacktracking({
+<if(!rewriteMode)>
+self._adaptor.addChild(root_<treeLevel>, <label>.tree)
+<else> <! rewrite mode !>
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label>.tree<\n>
+<endif>
+})>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+root_<treeLevel> = self._adaptor.becomeRoot(<label>.tree, root_<treeLevel>)
+})>
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(elem=label+".tree",...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,hetero,scope) ::= <<
+<if(hetero)>
+<hetero>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<finishedBacktracking({
+retval.tree = self._adaptor.rulePostProcessing(root_0)
+})>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg
new file mode 100644
index 0000000..d1670a1
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg
@@ -0,0 +1,317 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Python output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+group Dbg;
+
+ at outputFile.imports() ::= <<
+<@super.imports()>
+from antlr3.debug import *
+>>
+
+ at genericParser.args() ::= <<
+debug_socket = kwargs.pop('debug_socket', None)
+port = kwargs.pop('port', None)
+>>
+
+ at genericParser.init() ::= <<
+self.ruleLevel = 0
+
+if self._dbg is None:
+    <createListenerAndHandshake()>
+
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+proxy = DebugEventSocketProxy(self, adaptor=self.input.getTreeAdaptor(),
+                              debug=debug_socket, port=port)<\n>
+<else>
+proxy = DebugEventSocketProxy(self, debug=debug_socket, port=port)<\n>
+<endif>
+self.setDebugListener(proxy)
+proxy.handshake()
+
+>>
+
+ at genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+ruleNames = [
+    "invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n    ", separator=", ">
+    ]<\n>
+<endif>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+def getRuleLevel(self):
+    return self.ruleLevel
+
+def incRuleLevel(self):
+    self.ruleLevel += 1
+
+def decRuleLevel(self):
+    self.ruleLevel -= 1
+
+<if(profile)>
+    <ctorForProfilingRootGrammar()>
+<else>
+    <ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else> <! imported grammar !>
+def getRuleLevel(self):
+    return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel()
+
+def incRuleLevel(self):
+    <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel()
+
+def decRuleLevel(self):
+    <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel()
+
+<ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+FIXME(2)
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+    ((Profiler)self._dbg).examineRuleMemoization(input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    return super.alreadyParsedRule(input, ruleIndex);
+}<\n>
+FIXME(3)
+public void memoize(IntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)self._dbg).memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    super.memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+def evalPredicate(self, result, predicate):
+    self._dbg.semanticPredicate(result, predicate)
+    return result
+<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+<!
+public <name>(<inputStreamType> input) {
+    this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, int port, RecognizerSharedState state) {
+    super(input, state);
+    <parserCtorBody()>
+    <createListenerAndHandshake()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, self._dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+!>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+    this(input, new Profiler(null), new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, DebugEventListener self.dbg, RecognizerSharedState state) {
+    super(input, self.dbg, state);
+    Profiler p = (Profiler)self.dbg;
+    p.setParser(this);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, self.dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+<!
+public <name>(<inputStreamType> input, DebugEventListener self.dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+    super(input, dbg, state);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+}<\n>
+!>
+>>
+
+ctorForPredefinedListener() ::= <<
+<!
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+    <@superClassRef>super(input, dbg, new RecognizerSharedState());<@end>
+<if(profile)>
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+<endif>
+    <parserCtorBody()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, self._dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+!>
+>>
+
+ at genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+ at rule.body() ::= <<
+try:
+    self._dbg.enterRule(self.getGrammarFileName(), "<ruleName>")
+    if self.getRuleLevel() == 0:
+        self._dbg.commence();
+    self.incRuleLevel()
+    self._dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.column>)
+
+    <@super.body()>
+
+    self._dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.column>)
+finally:
+    self._dbg.exitRule(self.getGrammarFileName(), "<ruleName>")
+    self.decRuleLevel()
+    if self.getRuleLevel() == 0:
+         self._dbg.terminate()
+
+>>
+
+ at synpred.start() ::= "self._dbg.beginBacktrack(self._state.backtracking)"
+
+ at synpred.stop() ::= "self._dbg.endBacktrack(self._state.backtracking, success)"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { self._dbg.enterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally {self._dbg.exitSubRule(<decisionNumber>);}<\n>"
+
+enterDecision() ::=
+    "try { self._dbg.enterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally {self._dbg.exitDecision(<decisionNumber>);}<\n>"
+
+enterAlt(n) ::= "self._dbg.enterAlt(<n>)<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+ at block.body() ::= <<
+try:
+    self._dbg.enterSubRule(<decisionNumber>)
+    <@super.body()>
+finally:
+    self._dbg.exitSubRule(<decisionNumber>)
+>>
+
+ at blockBody.decision() ::= <<
+try:
+    self._dbg.enterDecision(<decisionNumber>)
+    <@super.decision()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+ at ruleBlock.decision() ::= <<
+try:
+    self._dbg.enterDecision(<decisionNumber>)
+    <@super.decision()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+ at ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+ at positiveClosureBlock.loopBody() ::= <<
+try:
+    self._dbg.enterSubRule(<decisionNumber>)
+    <@super.loopBody()>
+finally:
+    self._dbg.exitSubRule(<decisionNumber>)<\n>
+>>
+
+ at positiveClosureBlockLoop.decisionBody() ::= <<
+try:
+    self._dbg.enterDecision(<decisionNumber>)
+    <@super.decisionBody()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+ at positiveClosureBlockLoop.earlyExitException() ::=
+    "self._dbg.recognitionException(eee)<\n>"
+
+ at closureBlock.loopBody() ::= <<
+try:
+    self._dbg.enterSubRule(<decisionNumber>)
+    <@super.loopBody()>
+finally:
+    self._dbg.exitSubRule(<decisionNumber>)<\n>
+>>
+
+ at closureBlockLoop.decisionBody() ::= <<
+try:
+    self._dbg.enterDecision(<decisionNumber>)
+    <@super.decisionBody()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+ at altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+ at element.prematch() ::=
+    "self._dbg.location(<it.line>, <it.pos>)"
+
+ at matchSet.mismatchedSetException() ::=
+    "self._dbg.recognitionException(mse)"
+
+ at dfaState.noViableAltException() ::= "self._dbg.recognitionException(nvae)"
+
+ at dfaStateSwitch.noViableAltException() ::= "self._dbg.recognitionException(nvae)"
+
+dfaDecision(decisionNumber,description) ::= <<
+try:
+    self.isCyclicDecision = True
+    <super.dfaDecision(...)>
+
+except NoViableAltException, nvae:
+    self._dbg.recognitionException(nvae)
+    raise
+
+>>
+
+ at cyclicDFA.errorMethod() ::= <<
+def error(self, nvae):
+    self._dbg.recognitionException(nvae)
+
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+self.evalPredicate(<pred>,"<description>")
+>>
diff --git a/src/org/antlr/codegen/templates/Python/Python.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
similarity index 70%
rename from src/org/antlr/codegen/templates/Python/Python.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
index e89b2bb..4182469 100644
--- a/src/org/antlr/codegen/templates/Python/Python.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
@@ -36,7 +36,7 @@ group Python implements ANTLRCore;
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
            docComment, recognizer,
            name, tokens, tokenNames, rules, cyclicDFAs,
-           bitsets, buildTemplate, buildAST, rewrite, profile,
+           bitsets, buildTemplate, buildAST, rewriteMode, profile,
            backtracking, synpreds, memoize, numRules,
            fileName, ANTLRVersion, generatedTimestamp, trace,
            scopes, superClass, literals) ::=
@@ -44,6 +44,7 @@ outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
 # $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
 
 <@imports>
+import sys
 from antlr3 import *
 <if(TREE_PARSER)>
 from antlr3.tree import *<\n>
@@ -62,22 +63,67 @@ HIDDEN = BaseRecognizer.HIDDEN
 <tokens:{<it.name>=<it.type>}; separator="\n">
 
 <recognizer>
+
+<if(actions.(actionScope).main)>
+<actions.(actionScope).main>
+<else>
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+<if(LEXER)>
+    from antlr3.main import LexerMain
+    main = LexerMain(<recognizer.name>)<\n>
+<endif>
+<if(PARSER)>
+    from antlr3.main import ParserMain
+    main = ParserMain("<recognizer.grammar.name>Lexer", <recognizer.name>)<\n>
+<endif>
+<if(TREE_PARSER)>
+    from antlr3.main import WalkerMain
+    main = WalkerMain(<recognizer.name>)<\n>
+<endif>
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)<\n>
+<endif>
+
+<actions.(actionScope).footer>
+
+if __name__ == '__main__':
+    main(sys.argv)
+
 >>
 
 lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-class <name>(Lexer):
+      filterMode, superClass="Lexer") ::= <<
+<grammar.directDelegates:
+ {g|from <g.recognizerName> import <g.recognizerName>}; separator="\n">
+
+class <grammar.recognizerName>(<@superClassName><superClass><@end>):
     <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
 
     grammarFileName = "<fileName>"
+    antlr_version = version_str_to_tuple("<ANTLRVersion>")
+    antlr_version_str = "<ANTLRVersion>"
 
-    def __init__(self, input=None):
-        Lexer.__init__(self, input)
-<if(backtracking)>
-        self.ruleMemo = {}
+    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input=None, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+        super(<grammar.recognizerName>, self).__init__(input, state)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        self._state.ruleMemo = {}
+<endif>
 <endif>
 
-        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; seperator="\n">
+        <grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
+        <grammar.delegators:
+         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <last(grammar.delegators):
+    	 {g|self.gParent = <g:delegateName()>}; separator="\n">
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
 
         <actions.lexer.init>
 
@@ -109,28 +155,31 @@ def nextToken(self):
         if self.input.LA(1) == EOF:
             return EOF_TOKEN
 
-        self.token = None
-        self.channel = DEFAULT_CHANNEL
-        self.tokenStartCharIndex = self.input.index()
-        self.tokenStartCharPositionInLine = self.input.charPositionInLine
-        self.tokenStartLine = self.input.line
-        self._text = None
+        self._state.token = None
+        self._state.channel = DEFAULT_CHANNEL
+        self._state.tokenStartCharIndex = self.input.index()
+        self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
+        self._state.tokenStartLine = self.input.line
+        self._state._text = None
         try:
             m = self.input.mark()
-            self.backtracking = 1 <! means we won't throw slow exception !>
-            self.failed = False
-            self.mTokens()
-            self.backtracking = 0
-
-            <! mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. !>
-            if self.failed:
+            try:
+                # means we won't throw slow exception
+                self._state.backtracking = 1
+                try:
+                    self.mTokens()
+                finally:
+                    self._state.backtracking = 0
+
+            except BacktrackingFailed:
+                # mTokens backtracks with synpred at backtracking==2
+                # and we set the synpredgate to allow actions at level 1.
                 self.input.rewind(m)
-                self.input.consume() <! advance one char and try again !>
+                self.input.consume() # advance one char and try again
 
             else:
                 self.emit()
-                return self.token
+                return self._state.token
 
         except RecognitionException, re:
             # shouldn't happen in backtracking mode, but...
@@ -138,62 +187,97 @@ def nextToken(self):
             self.recover(re)
 
 
-def memoize(self, input, ruleIndex, ruleStartIndex):
-    if self.backtracking > 1:
+def memoize(self, input, ruleIndex, ruleStartIndex, success):
+    if self._state.backtracking > 1:
         # is Lexer always superclass?
-        Lexer.memoize(self, input, ruleIndex, ruleStartIndex)
+        <@superClassName><superClass><@end>.memoize(self, input, ruleIndex, ruleStartIndex, success)
 
 
 def alreadyParsedRule(self, input, ruleIndex):
-    if self.backtracking > 1:
-        return Lexer.alreadyParsedRule(self, input, ruleIndex)
+    if self._state.backtracking > 1:
+        return <@superClassName><superClass><@end>.alreadyParsedRule(self, input, ruleIndex)
     return False
 
 
 >>
 
-filteringActionGate() ::= "self.backtracking == 1"
+actionGate() ::= "self._state.backtracking == 0"
+
+filteringActionGate() ::= "self._state.backtracking == 1"
 
 /** How to generate a parser */
 
 genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              ASTLabelType="Object", labelType, members, init) ::= <<
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="Object", labelType, members, rewriteElementType, 
+              init) ::= <<
+<if(grammar.grammarIsRoot)>
 # token names
 tokenNames = [
     "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", 
     <tokenNames; wrap, separator=", ">
-]
-
+]<\n>
+<else>
+from <grammar.composite.rootGrammar.recognizerName> import tokenNames<\n>
+<endif>
 <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeClass(scope=it)><endif>}>
 
+<grammar.directDelegates:
+ {g|from <g.recognizerName> import <g.recognizerName>}; separator="\n">
+
 <rules:{<ruleAttributeScopeClass(scope=it.ruleDescriptor.ruleScope)>}>
 
-class <name>(<superClass>):
+class <grammar.recognizerName>(<@superClassName><superClass><@end>):
     grammarFileName = "<fileName>"
+    antlr_version = version_str_to_tuple("<ANTLRVersion>")
+    antlr_version_str = "<ANTLRVersion>"
     tokenNames = tokenNames
 
-    def __init__(self, input):
-        <superClass>.__init__(self, input)
-<if(backtracking)>
-        self.ruleMemo = {}
+    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input, state=None, *args, **kwargs):
+        if state is None:
+            state = RecognizerSharedState()
+
+        <@args()>
+        super(<grammar.recognizerName>, self).__init__(input, state, *args, **kwargs)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        self._state.ruleMemo = {}
+<endif>
 <endif>
 
-        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; seperator="\n">
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
 
         <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeStack(scope=it)><endif>}>
 	<rules:{<ruleAttributeScopeStack(scope=it.ruleDescriptor.ruleScope)>}>
 
         <init>
 
-        <@members>
+        <grammar.delegators:
+         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
+        <!grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(self<grammar.delegators:{g|, <g:delegateName()>}>, input, state)}; separator="\n"!>
+        <last(grammar.delegators):
+    	 {g|self.gParent = self.<g:delegateName()>}; separator="\n">
+
+        <@init>
         <@end>
 
 
+    <@members>
+    <@end>
+
     <members>
 
     <rules; separator="\n\n">
 
+    <! generate rule/method definitions for imported rules so they
+       appear to be defined in this recognizer. !>
+    # Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor| <delegateRule(ruleDescriptor)> }; separator="\n">
+
     <synpreds:{p | <synpred(p)>}>
 
     <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
@@ -202,15 +286,22 @@ class <name>(<superClass>):
 
 >>
 
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", init={<actions.parser.init>}, ...)>
+delegateRule(ruleDescriptor) ::= <<
+def <ruleDescriptor.name>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
+<\ >   <if(ruleDescriptor.hasReturnValue)>return <endif>self.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">)
+
+
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", init={<actions.parser.init>}, ...)>
 >>
 
 /** How to generate a tree parser; same as parser except the input
  *  stream is a different type.
  */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", init={<actions.treeparser.init>}, ...)>
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", init={<actions.treeparser.init>}, ...)>
 >>
 
 /** A simpler version of a rule template that is specific to the imaginary
@@ -222,7 +313,7 @@ treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRu
  */
 synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
 <<
-# $ANTLR start <ruleName>
+# $ANTLR start "<ruleName>"
 def <ruleName>_fragment(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
 <if(trace)>
     self.traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
@@ -235,22 +326,25 @@ def <ruleName>_fragment(self, <ruleDescriptor.parameterScope:parameterScope(scop
 <else>
     <block>
 <endif>
-# $ANTLR end <ruleName>
+# $ANTLR end "<ruleName>"
 
 
 >>
 
 synpred(name) ::= <<
 def <name>(self):
-    self.backtracking += 1
+    self._state.backtracking += 1
     <@start()>
     start = self.input.mark()
-    self.<name>_fragment()
-    success = not self.failed
+    try:
+        self.<name>_fragment()
+    except BacktrackingFailed:
+        success = False
+    else:
+        success = True
     self.input.rewind(start)
     <@stop()>
-    self.backtracking -= 1
-    self.failed = False
+    self._state.backtracking -= 1
     return success
 
 
@@ -262,26 +356,20 @@ lexerSynpred(name) ::= <<
 
 ruleMemoization(name) ::= <<
 <if(memoize)>
-if self.backtracking > 0 and self.alreadyParsedRule(self.input, <ruleDescriptor.index>):
+if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, <ruleDescriptor.index>):
+    # for cached failed rules, alreadyParsedRule will raise an exception
+    success = True
     return <ruleReturnValue()>
 
 <endif>
 >>
 
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>
-if self.failed:
-    return <ruleReturnValue()>
-<endif>
->>
-
 /** This rule has failed, exit indicating failure during backtrack */
 ruleBacktrackFailure() ::= <<
 <if(backtracking)>
-if self.backtracking > 0:
-    self.failed = True
-    return <ruleReturnValue()><\n>
+if self._state.backtracking > 0:
+    raise BacktrackingFailed
+
 <endif>
 >>
 
@@ -291,7 +379,7 @@ if self.backtracking > 0:
 rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
 <returnScope(scope=ruleDescriptor.returnScope)>
 
-# $ANTLR start <ruleName>
+# $ANTLR start "<ruleName>"
 # <fileName>:<description>
 <ruleDescriptor.actions.decorate>
 def <ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
@@ -303,44 +391,58 @@ def <ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
     <ruleLabelDefs()>
     <ruleDescriptor.actions.init>
     <@preamble()>
+    <@body><ruleBody()><@end>
+    <@postamble()>
+    return <ruleReturnValue()>
+
+# $ANTLR end "<ruleName>"
+>>
+
+ruleBody() ::= <<
+<if(memoize)>
+<if(backtracking)>
+success = False<\n>
+<endif>
+<endif>
+try:
     try:
-        try:
-            <ruleMemoization(name=ruleName)>
-            <block>
-            <ruleCleanUp()>
-            <(ruleDescriptor.actions.after):execAction()>
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
 
+<if(memoize)>
+<if(backtracking)>
+        success = True<\n>
+<endif>
+<endif>
 <if(exceptions)>
-        <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
 <else>
 <if(!emptyRule)>
 <if(actions.(actionScope).rulecatch)>
-        <actions.(actionScope).rulecatch>
+    <actions.(actionScope).rulecatch>
 <else>
-        except RecognitionException, re:
-            self.reportError(re)
-            self.recover(self.input, re)
+    except RecognitionException, re:
+        self.reportError(re)
+        self.recover(self.input, re)
+        <@setErrorReturnValue()>
 
 <endif>
 <else>
-        finally:
-            pass
+    finally:
+        pass
 
 <endif>
 <endif>
-    finally:
+finally:
 <if(trace)>
-        self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+    self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
 <endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-        pass
-
-    <@postamble()>
-    return <ruleReturnValue()>
-
-# $ANTLR end <ruleName>
+    <memoize()>
+    <ruleScopeCleanUp()>
+    <finally>
+    pass
 >>
 
 catch(decl,action) ::= <<
@@ -374,10 +476,12 @@ ruleScopeCleanUp() ::= <<
 >>
 
 ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
     :{<it.label.text> = None}; separator="\n"
 >
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,
+  ruleDescriptor.wildcardTreeListLabels]
     :{list_<it.label.text> = None}; separator="\n"
 >
 <[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
@@ -394,7 +498,6 @@ lexerRuleLabelDefs() ::= <<
 >
 <ruleDescriptor.charLabels:{<it.label.text> = None}; separator="\n">
 <[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
   ruleDescriptor.ruleListLabels]
     :{list_<it.label.text> = None}; separator="\n"
 >
@@ -423,8 +526,8 @@ retval.stop = self.input.LT(-1)<\n>
 memoize() ::= <<
 <if(memoize)>
 <if(backtracking)>
-if self.backtracking > 0:
-    self.memoize(self.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex)
+if self._state.backtracking > 0:
+    self.memoize(self.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex, success)
 
 <endif>
 <endif>
@@ -434,13 +537,18 @@ if self.backtracking > 0:
  *  fragment rules.
  */
 lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-# $ANTLR start <ruleName>
+# $ANTLR start "<ruleName>"
 def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
 <if(trace)>
     self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
 <endif>
     <ruleScopeSetUp()>
     <ruleDeclarations()>
+<if(memoize)>
+<if(backtracking)>
+    success = False<\n>
+<endif>
+<endif>
     try:
 <if(nakedBlock)>
         <ruleMemoization(name=ruleName)>
@@ -448,15 +556,23 @@ def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
         <ruleDescriptor.actions.init>
         <block><\n>
 <else>
-        self.type = <ruleName>
+        _type = <ruleName>
+        _channel = DEFAULT_CHANNEL
 
         <ruleMemoization(name=ruleName)>
         <lexerRuleLabelDefs()>
         <ruleDescriptor.actions.init>
         <block>
         <ruleCleanUp()>
+        self._state.type = _type
+        self._state.channel = _channel
         <(ruleDescriptor.actions.after):execAction()>
 <endif>
+<if(memoize)>
+<if(backtracking)>
+        success = True<\n>
+<endif>
+<endif>
 
     finally:
 <if(trace)>
@@ -466,7 +582,7 @@ def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>):
         <memoize()>
         pass
 
-# $ANTLR end <ruleName>
+# $ANTLR end "<ruleName>"
 
 
 >>
@@ -488,8 +604,12 @@ block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,max
 # <fileName>:<description>
 alt<decisionNumber> = <maxAlt>
 <decls>
+<@body><blockBody()><@end>
+>>
+
+blockBody() ::= <<
 <@predecision()>
-<decision>
+<@decision><decision><@end>
 <@postdecision()>
 <@prebranch()>
 <alts:altSwitchCase(); separator="\nel">
@@ -502,7 +622,7 @@ ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK
 alt<decisionNumber> = <maxAlt>
 <decls>
 <@predecision()>
-<decision>
+<@decision><decision><@end>
 <@postdecision()>
 <alts:altSwitchCase(); separator="\nel">
 >>
@@ -530,10 +650,17 @@ positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decision
 cnt<decisionNumber> = 0
 <decls>
 <@preloop()>
+<@loopBody>
+<positiveClosureBlockLoop()>
+<@end>
+<@postloop()>
+>>
+
+positiveClosureBlockLoop() ::= <<
 while True: #loop<decisionNumber>
     alt<decisionNumber> = <maxAlt>
     <@predecision()>
-    <decision>
+    <@decisionBody><decision><@end>
     <@postdecision()>
     <alts:altSwitchCase(); separator="\nel">
     else:
@@ -546,8 +673,6 @@ while True: #loop<decisionNumber>
         raise eee
 
     cnt<decisionNumber> += 1
-
-<@postloop()>
 >>
 
 positiveClosureBlockSingleAlt ::= positiveClosureBlock
@@ -557,16 +682,21 @@ closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,m
 # <fileName>:<description>
 <decls>
 <@preloop()>
+<@loopBody>
+<closureBlockLoop()>
+<@end>
+<@postloop()>
+>>
+
+closureBlockLoop() ::= <<
 while True: #loop<decisionNumber>
     alt<decisionNumber> = <maxAlt>
     <@predecision()>
-    <decision>
+    <@decisionBody><decision><@end>
     <@postdecision()>
     <alts:altSwitchCase(); separator="\nel">
     else:
         break #loop<decisionNumber>
-
-<@postloop()>
 >>
 
 closureBlockSingleAlt ::= closureBlock
@@ -589,13 +719,20 @@ if alt<decisionNumber> == <i>:
 >>
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
+alt(elements,altNum,description,autoAST,outerAlt, treeLevel,rew) ::= <<
 # <fileName>:<description>
+pass <! so empty alternatives are a valid block !>
 <@declarations()>
 <elements:element()>
+<rew>
 <@cleanup()>
 >>
 
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
 // E L E M E N T S
 
 /** Dump the elements one per line */
@@ -605,24 +742,20 @@ element() ::= <<
 >>
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-<if(label)>
-<label> = self.input.LT(1)<\n>
-<endif>
-self.match(self.input, <token>, self.FOLLOW_<token>_in_<ruleName><elementIndex>)
-<checkRuleBacktrackFailure()>
+tokenRef(token,label,elementIndex,hetero) ::= <<
+<if(label)><label>=<endif>self.match(self.input, <token>, self.FOLLOW_<token>_in_<ruleName><elementIndex>)
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::= <<
 <tokenRef(...)>
-<listLabel(...)>
+<listLabel(elem=label,...)>
 >>
 
 listLabel(label, elem) ::= <<
 if list_<label> is None:
     list_<label> = []
-list_<label>.append(<label>)<\n>
+list_<label>.append(<elem>)<\n>
 >>
 
 /** match a character */
@@ -631,7 +764,6 @@ charRef(char,label) ::= <<
 <label> = self.input.LA(1)<\n>
 <endif>
 self.match(<char>)
-<checkRuleBacktrackFailure()>
 >>
 
 /** match a character range */
@@ -640,7 +772,6 @@ charRangeRef(a,b,label) ::= <<
 <label> = self.input.LA(1)<\n>
 <endif>
 self.matchRange(<a>, <b>)
-<checkRuleBacktrackFailure()>
 >>
 
 /** For now, sets are interval tests and must be tested inline */
@@ -649,13 +780,10 @@ matchSet(s,label,elementIndex,postmatchCode="") ::= <<
 <label> = self.input.LT(1)<\n>
 <endif>
 if <s>:
-    self.input.consume();
+    self.input.consume()
     <postmatchCode>
 <if(!LEXER)>
-    self.errorRecovery = False<\n>
-<endif>
-<if(backtracking)>
-    self.failed = False<\n>
+    self._state.errorRecovery = False<\n>
 <endif>
 
 else:
@@ -663,19 +791,24 @@ else:
     mse = MismatchedSetException(None, self.input)
     <@mismatchedSetException()>
 <if(LEXER)>
-    self.recover(mse)<\n>
+    self.recover(mse)
+    raise mse
 <else>
+    raise mse
+    <! use following code to make it recover inline; remove throw mse;
     self.recoverFromMismatchedSet(
         self.input, mse, self.FOLLOW_set_in_<ruleName><elementIndex>
-        )<\n>
+        )
+    !>
 <endif>
-    raise mse
 <\n>
 >>
 
+matchRuleBlockSet ::= matchSet
+
 matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
 <matchSet(...)>
-<listLabel(...)>
+<listLabel(elem=label,...)>
 >>
 
 /** Match a string literal */
@@ -683,11 +816,9 @@ lexerStringRef(string,label) ::= <<
 <if(label)>
 <label>Start = self.getCharIndex()
 self.match(<string>)
-<checkRuleBacktrackFailure()>
 <label> = CommonToken(input=self.input, type=INVALID_TOKEN_TYPE, channel=DEFAULT_CHANNEL, start=<label>Start, stop=self.getCharIndex()-1)
 <else>
 self.match(<string>)
-<checkRuleBacktrackFailure()><\n>
 <endif>
 >>
 
@@ -696,12 +827,11 @@ wildcard(label,elementIndex) ::= <<
 <label> = self.input.LT(1)<\n>
 <endif>
 self.matchAny(self.input)
-<checkRuleBacktrackFailure()>
 >>
 
 wildcardAndListLabel(label,elementIndex) ::= <<
 <wildcard(...)>
-<listLabel(...)>
+<listLabel(elem=label,...)>
 >>
 
 /** Match . wildcard in lexer */
@@ -710,40 +840,40 @@ wildcardChar(label, elementIndex) ::= <<
 <label> = self.input.LA(1)<\n>
 <endif>
 self.matchAny()
-<checkRuleBacktrackFailure()>
 >>
 
 wildcardCharListLabel(label, elementIndex) ::= <<
 <wildcardChar(...)>
-<listLabel(...)>
+<listLabel(elem=label,...)>
 >>
 
 /** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
+ *  and a return value or values. The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
  */
-ruleRef(rule,label,elementIndex,args) ::= <<
-self.following.append(self.FOLLOW_<rule>_in_<ruleName><elementIndex>)
-<if(label)>
-<label> = self.<rule>(<args; separator=", ">)<\n>
-<else>
-self.<rule>(<args; separator=", ">)<\n>
-<endif>
-self.following.pop()
-<checkRuleBacktrackFailure()>
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+self._state.following.append(self.FOLLOW_<rule.name>_in_<ruleName><elementIndex>)
+<if(label)><label> = <endif>self.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">)<\n>
+self._state.following.pop()
 >>
 
 /** ids+=rule */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
-<listLabel(...)>
+<listLabel(elem=label,...)>
 >>
 
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex) ::= <<
+/** A lexer rule reference 
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
 <if(label)>
 <label>Start<elementIndex> = self.getCharIndex()
-self.m<rule>(<args; separator=", ">)
-<checkRuleBacktrackFailure()>
+self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
 <label> = CommonToken(
     input=self.input, 
     type=INVALID_TOKEN_TYPE,
@@ -752,13 +882,12 @@ self.m<rule>(<args; separator=", ">)
     stop=self.getCharIndex()-1
     )
 <else>
-self.m<rule>(<args; separator=", ">)
-<checkRuleBacktrackFailure()>
+self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
 <endif>
 >>
 
 /** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::= <<
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
 >>
@@ -768,32 +897,27 @@ lexerMatchEOF(label,elementIndex) ::= <<
 <if(label)>
 <label>Start<elementIndex> = self.getCharIndex()
 self.match(EOF)
-<checkRuleBacktrackFailure()>
 <label> = CommonToken(input=self.input, type=EOF, channel=DEFAULT_CHANNEL, start=<label>Start<elementIndex>, stop=self.getCharIndex()-1)
 <else>
 self.match(EOF)
-<checkRuleBacktrackFailure()>
 <endif>
 >>
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::= <<
+tree(root, actionsAfterRoot, children, nullableChildList, 
+     enclosingTreeLevel, treeLevel) ::= <<
 <root:element()>
 <actionsAfterRoot:element()>
 <if(nullableChildList)>
 if self.input.LA(1) == DOWN:
     self.match(self.input, DOWN, None)
-    <checkRuleBacktrackFailure()>
     <children:element()>
     self.match(self.input, UP, None)
-    <checkRuleBacktrackFailure()>
 
 <else>
 self.match(self.input, DOWN, None)
-<checkRuleBacktrackFailure()>
 <children:element()>
 self.match(self.input, UP, None)
-<checkRuleBacktrackFailure()>
 <endif>
 >>
 
@@ -962,8 +1086,12 @@ DFA<dfa.decisionNumber>_transition = [
 
 # class definition for DFA #<dfa.decisionNumber>
 
-<if(dfa.specialStateSTs)>
 class DFA<dfa.decisionNumber>(DFA):
+    pass
+
+    <@errorMethod()>
+
+<if(dfa.specialStateSTs)>
     def specialStateTransition(self_, s, input):
         # convince pylint that my self_ magic is ok ;)
         # pylint: disable-msg=E0213
@@ -979,15 +1107,13 @@ if s == <i0>: <! compressed special state numbers 0..n-1 !>
     <state>}; separator="\nel">
 
 <if(backtracking)>
-        if self.backtracking >0:
-            self.failed = True
-            return -1<\n>
+        if self._state.backtracking >0:
+            raise BacktrackingFailed
+
 <endif>
         nvae = NoViableAltException(self_.getDescription(), <dfa.decisionNumber>, _s, input)
         self_.error(nvae)
         raise nvae<\n>
-<else>
-DFA<dfa.decisionNumber> = DFA<\n>
 <endif>
 
 >>
@@ -1042,13 +1168,13 @@ se:
 
 // D F A  E X P R E S S I O N S
 
-andPredicates(left,right) ::= "(<left> and <right>)"
+andPredicates(left,right) ::= "((<left>) and (<right>))"
 
 orPredicates(operands) ::= "(<first(operands)><rest(operands):{o |  or <o>}>)"
 
 notPredicate(pred) ::= "not (<evalPredicate(...)>)"
 
-evalPredicate(pred,description) ::= "<pred>"
+evalPredicate(pred,description) ::= "(<pred>)"
 
 evalSynPredicate(pred,description) ::= "self.<pred>()"
 
@@ -1099,27 +1225,32 @@ self.<scope.name>_stack = []<\n>
 <endif>
 >>
 
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
 /** Define a rule label including default value */
 ruleLabelDef(label) ::= <<
 <label.label.text> = None<\n>
 >>
 
+returnStructName() ::= "<it.name>_return"
+
 /** Define a return struct for a rule if the code needs to access its
  *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
  *  subgroups to stick in members.
  */
 returnScope(scope) ::= <<
 <if(ruleDescriptor.hasMultipleReturnValues)>
-class <ruleDescriptor.name>_return(object):
+class <ruleDescriptor:returnStructName()>(<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope):
     def __init__(self):
-        self.start = None
-        self.stop = None
-        <if(TREE_PARSER)>
-        self.tree = None
-        <endif>
+        super(<grammar.recognizerName>.<ruleDescriptor:returnStructName()>, self).__init__()
 
         <scope.attributes:{self.<it.decl> = None}; separator="\n">
-        <@ruleReturnMembers()>
+        <@ruleReturnInit()>
+
+
+    <@ruleReturnMembers()>
 
 <endif>
 >>
@@ -1143,6 +1274,19 @@ self.<scope>_stack[-1].<attr.name>
 <endif>
 >>
 
+/* not applying patch because of bug in action parser!
+
+<if(negIndex)>
+((len(self.<scope>_stack) - <negIndex> - 1) >= 0 and [self.<scope>_stack[-<negIndex>].<attr.name>] or [None])[0]
+<else>
+<if(index)>
+((<index> \< len(self.<scope>_stack)) and [self.<scope>_stack[<index>].<attr.name>] or [None])[0]
+<else>
+((len(self.<scope>_stack) > 0) and [self.<scope>_stack[-1].<attr.name>] or [None])[0]
+<endif>
+<endif>
+
+*/
 
 scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
 <if(negIndex)>
@@ -1167,7 +1311,7 @@ isolatedDynamicScopeRef(scope) ::= "self.<scope>_stack"
 /** reference an attribute of rule; might only have single return value */
 ruleLabelRef(referencedRule,scope,attr) ::= <<
 <if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
+((<scope> is not None) and [<scope>.<attr.name>] or [None])[0]
 <else>
 <scope>
 <endif>
@@ -1212,49 +1356,51 @@ ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
 ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
 ruleLabelPropertyRef_text(scope,attr) ::= <<
 <if(TREE_PARSER)>
-self.input.getTokenStream().toString(
+((<scope> is not None) and [self.input.getTokenStream().toString(
     self.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
     self.input.getTreeAdaptor().getTokenStopIndex(<scope>.start)
-    )
+    )] or [None])[0]
 <else>
-self.input.toString(<scope>.start,<scope>.stop)
+((<scope> is not None) and [self.input.toString(<scope>.start,<scope>.stop)] or [None])[0]
 <endif>
 >>
-ruleLabelPropertyRef_st(scope,attr) ::= "<!FIXME(201:ST)!><scope>.st"
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> is not None) and [<scope>.st] or [None])[0]"
 
 /** Isolated $RULE ref ok in lexer as it's a Token */
 lexerRuleLabel(label) ::= "<label>"
 
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.type"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.line"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.charPositionInLine"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.channel"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.index"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.text"
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "((<scope> is not None) and [<scope>.type] or [0])[0]"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "((<scope> is not None) and [<scope>.line] or [0])[0]"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "((<scope> is not None) and [<scope>.charPositionInLine] or [0])[0]"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "((<scope> is not None) and [<scope>.channel] or [0])[0]"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "((<scope> is not None) and [<scope>.index] or [0])[0]"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "((<scope> is not None) and [<scope>.text] or [None])[0]"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "((<scope> is not None) and [int(<scope>.text)] or [0])[0]"
 
 // Somebody may ref $template or $tree or $stop within a rule:
 rulePropertyRef_start(scope,attr) ::= "retval.start"
 rulePropertyRef_stop(scope,attr) ::= "retval.stop" //mmm... or input.LT(-1)??
 rulePropertyRef_tree(scope,attr) ::= "retval.tree"
 rulePropertyRef_text(scope,attr) ::= "self.input.toString(retval.start, self.input.LT(-1))"
-rulePropertyRef_st(scope,attr) ::= "<!FIXME(203:ST)!>retval.st"
+rulePropertyRef_st(scope,attr) ::= "retval.st"
 
 lexerRulePropertyRef_text(scope,attr) ::= "self.text"
-lexerRulePropertyRef_type(scope,attr) ::= "self.type"
-lexerRulePropertyRef_line(scope,attr) ::= "self.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "self.tokenStartCharPositionInLine"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "self._state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "self._state.tokenStartCharPositionInLine"
 lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "self.channel"
-lexerRulePropertyRef_start(scope,attr) ::= "self.tokenStartCharIndex"
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "self._state.tokenStartCharIndex"
 lexerRulePropertyRef_stop(scope,attr) ::= "(self.getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
 
 // setting $st and $tree is allowed in local rule. everything else
 // is flagged as error
 ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "<!FIXME(205:ST)!>retval.st =<expr>"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>"
 
 
-/** How to execute an action */
+/** How to execute an action (only when not backtracking) */
 execAction(action) ::= <<
 <if(backtracking)>
 <if(actions.(actionScope).synpredgate)>
@@ -1262,7 +1408,7 @@ if <actions.(actionScope).synpredgate>:
     <action>
 
 <else>
-if self.backtracking == 0:
+if <actions.(actionScope).synpredgate>:
     <action>
 
 <endif>
@@ -1273,6 +1419,10 @@ if self.backtracking == 0:
 <endif>
 >>
 
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+
 // M I S C (properties, etc...)
 
 codeFileExtension() ::= ".py"
diff --git a/src/org/antlr/codegen/templates/Java/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg
similarity index 58%
rename from src/org/antlr/codegen/templates/Java/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg
index dbe70eb..0a9391c 100644
--- a/src/org/antlr/codegen/templates/Java/ST.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg
@@ -32,93 +32,110 @@ group ST;
 
 @outputFile.imports() ::= <<
 <@super.imports()>
-import org.antlr.stringtemplate.*;
-import org.antlr.stringtemplate.language.*;
-import java.util.HashMap;
+import stringtemplate3
 >>
 
 /** Add this to each rule's return value struct */
+ at returnScope.ruleReturnInit() ::= <<
+self.st = None
+>>
+
 @returnScope.ruleReturnMembers() ::= <<
-public StringTemplate st;
-public Object getTemplate() { return st; }
-public String toString() { return st==null?null:st.toString(); }
+def getTemplate(self):
+    return self.st
+
+def toString(self):
+    if self.st is not None:
+        return self.st.toString()
+    return None
+__str__ = toString
+
+>>
+
+ at genericParser.init() ::= <<
+<@super.init()>
+self.templateLib = stringtemplate3.StringTemplateGroup(
+    '<name>Templates', lexer='angle-bracket'
+    )
+
 >>
 
 @genericParser.members() ::= <<
 <@super.members()>
-protected StringTemplateGroup templateLib =
-  new StringTemplateGroup("<name>Templates", AngleBracketTemplateLexer.class);
-
-public void setTemplateLib(StringTemplateGroup templateLib) {
-  this.templateLib = templateLib;
-}
-public StringTemplateGroup getTemplateLib() {
-  return templateLib;
-}
-/** allows convenient multi-value initialization:
- *  "new STAttrMap().put(...).put(...)"
- */
-public static class STAttrMap extends HashMap {
-  public STAttrMap put(String attrName, Object value) {
-    super.put(attrName, value);
-    return this;
-  }
-  public STAttrMap put(String attrName, int value) {
-    super.put(attrName, new Integer(value));
-    return this;
-  }
-}
+def setTemplateLib(self, templateLib):
+    self.templateLib = templateLib
+
+def getTemplateLib(self):
+    return self.templateLib
+
 >>
 
 /** x+=rule when output=template */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
 <ruleRef(...)>
-<listLabel(elem=label+".getTemplate()",...)>
+<listLabel(elem=label+".st",...)>
 >>
 
 rewriteTemplate(alts) ::= <<
-
-// TEMPLATE REWRITE
+# TEMPLATE REWRITE
 <if(backtracking)>
-if ( backtracking==0 ) {
-  <alts:rewriteTemplateAlt(); separator="else ">
-  <if(rewrite)><replaceTextInLine()><endif>
-}
+if <actions.(actionScope).synpredgate>:
+    <first(alts):rewriteTemplateAltFirst()>
+    <rest(alts):{el<rewriteTemplateAlt()>}>
+    <if(rewriteMode)><replaceTextInLine()><endif>
+
 <else>
-<alts:rewriteTemplateAlt(); separator="else ">
-<if(rewrite)><replaceTextInLine()><endif>
+<first(alts):rewriteTemplateAltFirst()>
+<rest(alts):{el<rewriteTemplateAlt()>}>
+<if(rewriteMode)><replaceTextInLine()><endif>
 <endif>
 >>
 
 replaceTextInLine() ::= <<
 <if(TREE_PARSER)>
-((TokenRewriteStream)input.getTokenStream()).replace(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start),
-  retval.st);
+self.input.getTokenStream().replace(
+    self.input.getTreeAdaptor().getTokenStartIndex(retval.start),
+    self.input.getTreeAdaptor().getTokenStopIndex(retval.start),
+    retval.st
+    )
+<else>
+self.input.replace(
+    retval.start.getTokenIndex(),
+    self.input.LT(-1).getTokenIndex(),
+    retval.st
+    )
+<endif>
+>>
+
+rewriteTemplateAltFirst() ::= <<
+<if(it.pred)>
+if <it.pred>:
+    # <it.description>
+    retval.st = <it.alt>
+<\n>
 <else>
-((TokenRewriteStream)input).replace(
-  ((Token)retval.start).getTokenIndex(),
-  input.LT(-1).getTokenIndex(),
-  retval.st);
+# <it.description>
+retval.st = <it.alt>
+<\n>
 <endif>
 >>
 
 rewriteTemplateAlt() ::= <<
-// <it.description>
 <if(it.pred)>
-if (<it.pred>) {
-    retval.st = <it.alt>;
-}<\n>
+if <it.pred>:
+    # <it.description>
+    retval.st = <it.alt>
+<\n>
 <else>
-{
-    retval.st = <it.alt>;
-}<\n>
+se:
+    # <it.description>
+    retval.st = <it.alt>
+<\n>
 <endif>
 >>
 
 rewriteEmptyTemplate(alts) ::= <<
-null;
+None
 >>
 
 /** Invoke a template with a set of attribute name/value pairs.
@@ -128,23 +145,24 @@ null;
  *  template.
  */
 rewriteExternalTemplate(name,args) ::= <<
-templateLib.getInstanceOf("<name>"<if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
-  <endif>)
+self.templateLib.getInstanceOf("<name>"<if(args)>,
+    attributes={<args:{a | "<a.name>": <a.value>}; separator=", ">}<endif>)
 >>
 
 /** expr is a string expression that says what template to load */
 rewriteIndirectTemplate(expr,args) ::= <<
-templateLib.getInstanceOf(<expr><if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
-  <endif>)
+self.templateLib.getInstanceOf(<expr><if(args)>,
+    attributes={<args:{a | "<a.name>": <a.value>}; separator=", ">}<endif>)
 >>
 
 /** Invoke an inline template with a set of attribute name/value pairs */
 rewriteInlineTemplate(args, template) ::= <<
-new StringTemplate(templateLib, "<template>"<if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
-  <endif>)
+stringtemplate3.StringTemplate(
+    "<template>",
+    group=self.templateLib<if(args)>,
+    attributes={<args:{a | "<a.name>": <a.value>}; separator=", ">}
+    <endif>
+    )
 >>
 
 /** plain -> {foo} action */
@@ -154,10 +172,10 @@ rewriteAction(action) ::= <<
 
 /** An action has %st.attrName=expr; or %{st}.attrName=expr; */
 actionSetAttribute(st,attrName,expr) ::= <<
-(<st>).setAttribute("<attrName>",<expr>);
+(<st>)["<attrName>"] = <expr>
 >>
 
 /** Translate %{stringExpr} */
 actionStringConstructor(stringExpr) ::= <<
-new StringTemplate(templateLib,<stringExpr>)
+stringtemplate3.StringTemplate(<stringExpr>, group=self.templateLib)
 >>
diff --git a/src/org/antlr/codegen/templates/Ruby/Ruby.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
similarity index 97%
rename from src/org/antlr/codegen/templates/Ruby/Ruby.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
index b116b05..62c2620 100644
--- a/src/org/antlr/codegen/templates/Ruby/Ruby.stg
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
@@ -32,9 +32,8 @@ group Ruby implements ANTLRCore;
  *  and cyclic DFAs plus support code.
  */
 outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-		   docComment, recognizer,
-		   name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewrite, profile,
+           docComment, recognizer, name, tokens, tokenNames, rules,
+           cyclicDFAs, bitsets, buildTemplate, buildAST, rewriteMode, profile,
 	   backtracking, synpreds, memoize, numRules,
 	   fileName, ANTLRVersion, generatedTimestamp, trace,
 	   scopes, superClass, literals) ::=
@@ -51,7 +50,8 @@ outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
  *
  * labelType is not used for Ruby (no explicit type declarations)
  */
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType, filterMode) ::=
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType, filterMode,
+      superClass) ::=
 <<
 class <name>
     require 'stringio'
@@ -543,7 +543,7 @@ optionalBlock ::= block
 optionalBlockSingleAlt ::= block
 
 /** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt)::=
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::=
 <<
 # <description>
 <elements: element(); separator="\n">
@@ -552,7 +552,7 @@ alt(elements,altNum,description,autoAST,outerAlt)::=
 // E L E M E N T S
 
 /** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex)::=
+tokenRef(token,label,elementIndex,hetero) ::=
 <<
 <if(label)>
 _<label> = @input.look_ahead(1)<\n>
@@ -561,7 +561,7 @@ match(:<token>)
 >>
 
 /** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex)::=
+tokenRefAndListLabel(token,label,elementIndex,hetero) ::=
 <<
 <tokenRef(...)>
 <listLabel(elem=label, ...)>
@@ -654,7 +654,7 @@ wildcardCharListLabel(label, elementIndex)::=
 /** Match a rule reference by invoking it possibly with arguments
  *  and a return value or values.
  */
-ruleRef(rule,label,elementIndex,args)::=
+ruleRef(rule,label,elementIndex,args,scope) ::=
 <<
 <if(label)>
 _<label> = <rule>(<args; separator=", ">)<\n>
@@ -664,7 +664,7 @@ _<label> = <rule>(<args; separator=", ">)<\n>
 >>
 
 /** ids+=ID */
-ruleRefAndListLabel(rule,label,elementIndex,args)::=
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::=
 <<
 <ruleRef(...)>
 <listLabel(elem=label,...)>
@@ -679,7 +679,7 @@ B: .;
 TODO: Should we use a real token type instead of :invalid? How do we get it?
  
 */
-lexerRuleRef(rule,label,args,elementIndex)::=
+lexerRuleRef(rule,label,args,elementIndex,scope)::=
 <<
 <if(label)>
 _<label>_start_<elementIndex> = @input.index
@@ -696,7 +696,7 @@ match_<rule>(<args; separator=", ">)
 >>
 
 
-lexerRuleRefAndListLabel(rule,label,args,elementIndex) ::=
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::=
 <<
 <lexerRuleRef(...)>
 <listLabel(elem=label,...)>
@@ -712,7 +712,8 @@ match(:EOF)
 >>
 
 /** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList) ::=
+tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel,
+     treeLevel) ::=
 <<
 	raise "tree not implemented"
 >>
@@ -1216,7 +1217,7 @@ codeFileExtension()::=".rb"
 true()::= "true"
 false()::= "false"
 
-
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
 
 ///////////// --------------------------- private templates --------------------------------
 
diff --git a/src/org/antlr/tool/templates/depend.stg b/tool/src/main/resources/org/antlr/tool/templates/depend.stg
similarity index 100%
rename from src/org/antlr/tool/templates/depend.stg
rename to tool/src/main/resources/org/antlr/tool/templates/depend.stg
diff --git a/src/org/antlr/tool/templates/dot/edge.st b/tool/src/main/resources/org/antlr/tool/templates/dot/action-edge.st
similarity index 100%
copy from src/org/antlr/tool/templates/dot/edge.st
copy to tool/src/main/resources/org/antlr/tool/templates/dot/action-edge.st
diff --git a/src/org/antlr/tool/templates/dot/decision-rank.st b/tool/src/main/resources/org/antlr/tool/templates/dot/decision-rank.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/decision-rank.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/decision-rank.st
diff --git a/src/org/antlr/tool/templates/dot/dfa.st b/tool/src/main/resources/org/antlr/tool/templates/dot/dfa.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/dfa.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/dfa.st
diff --git a/src/org/antlr/tool/templates/dot/edge.st b/tool/src/main/resources/org/antlr/tool/templates/dot/edge.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/edge.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/edge.st
diff --git a/src/org/antlr/tool/templates/dot/epsilon-edge.st b/tool/src/main/resources/org/antlr/tool/templates/dot/epsilon-edge.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/epsilon-edge.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/epsilon-edge.st
diff --git a/src/org/antlr/tool/templates/dot/nfa.st b/tool/src/main/resources/org/antlr/tool/templates/dot/nfa.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/nfa.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/nfa.st
diff --git a/src/org/antlr/tool/templates/dot/state.st b/tool/src/main/resources/org/antlr/tool/templates/dot/state.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/state.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/state.st
diff --git a/src/org/antlr/tool/templates/dot/stopstate.st b/tool/src/main/resources/org/antlr/tool/templates/dot/stopstate.st
similarity index 100%
rename from src/org/antlr/tool/templates/dot/stopstate.st
rename to tool/src/main/resources/org/antlr/tool/templates/dot/stopstate.st
diff --git a/src/org/antlr/tool/templates/messages/formats/antlr.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/formats/antlr.stg
similarity index 100%
rename from src/org/antlr/tool/templates/messages/formats/antlr.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/formats/antlr.stg
diff --git a/src/org/antlr/tool/templates/messages/formats/gnu.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/formats/gnu.stg
similarity index 100%
rename from src/org/antlr/tool/templates/messages/formats/gnu.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/formats/gnu.stg
diff --git a/src/org/antlr/tool/templates/messages/formats/vs2005.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/formats/vs2005.stg
similarity index 100%
rename from src/org/antlr/tool/templates/messages/formats/vs2005.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/formats/vs2005.stg
diff --git a/src/org/antlr/tool/templates/messages/languages/en.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
similarity index 74%
rename from src/org/antlr/tool/templates/messages/languages/en.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
index a02df50..b3ed9e5 100644
--- a/src/org/antlr/tool/templates/messages/languages/en.stg
+++ b/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
@@ -45,9 +45,10 @@ ERROR_READING_TOKENS_FILE(arg,exception,stackTrace) ::= <<
 problem reading token vocabulary file <arg>: <exception>
 <stackTrace; separator="\n">
 >>
-DIR_NOT_FOUND(arg) ::= "directory not found: <arg>"
-OUTPUT_DIR_IS_FILE(arg) ::= "output directory is a file: <arg>"
-CANNOT_OPEN_FILE(arg) ::= "cannot find or open file: <arg>"
+DIR_NOT_FOUND(arg,exception,stackTrace) ::= "directory not found: <arg>"
+OUTPUT_DIR_IS_FILE(arg,exception,stackTrace) ::= "output directory is a file: <arg>"
+CANNOT_OPEN_FILE(arg,exception,stackTrace) ::= "cannot find or open file: <arg><if(exception)>; reason: <exception><endif>"
+CIRCULAR_DEPENDENCY() ::= "your grammars contain a circular dependency and cannot be sorted into a valid build order."
 
 INTERNAL_ERROR(arg,arg2,exception,stackTrace) ::= <<
 internal error: <arg> <arg2><if(exception)>: <exception><endif>
@@ -180,8 +181,8 @@ UNKNOWN_DYNAMIC_SCOPE(arg) ::=
   "unknown dynamic scope: <arg>"
 UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE(arg,arg2) ::=
   "unknown dynamically-scoped attribute for scope <arg>: <arg2>"
-AMBIGUOUS_RULE_SCOPE(arg) ::=
-  "reference $<arg> is ambiguous since rule <arg> is referenced in the production and rule <arg> also has a dynamic scope"
+RULE_REF_AMBIG_WITH_RULE_IN_ALT(arg) ::=
+  "reference $<arg> is ambiguous; rule <arg> is enclosing rule and referenced in the production (assuming enclosing rule)"
 ISOLATED_RULE_ATTRIBUTE(arg) ::=
   "reference to locally-defined rule scope attribute without rule name: <arg>"
 INVALID_ACTION_SCOPE(arg,arg2) ::=
@@ -197,7 +198,9 @@ MISSING_ATTRIBUTE_NAME() ::=
 ARG_INIT_VALUES_ILLEGAL(arg) ::=
   "rule parameters may not have init values: <arg>"
 REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION(arg) ::=
-  "rule <arg> uses rewrite syntax or operator with no output option or lexer rule uses !"
+  "<if(arg)>rule <arg> uses <endif>rewrite syntax or operator with no output option; setting output=AST"
+AST_OP_WITH_NON_AST_OUTPUT_OPTION(arg) ::=
+  "AST operator with non-AST output option: <arg>"
 NO_RULES(arg) ::= "grammar file <arg> has no rules"
 MISSING_AST_TYPE_IN_TREE_GRAMMAR(arg) ::=
   "tree grammar <arg> has no ASTLabelType option"
@@ -205,32 +208,52 @@ REWRITE_FOR_MULTI_ELEMENT_ALT(arg) ::=
   "with rewrite=true, alt <arg> not simple node or obvious tree element; text attribute for rule not guaranteed to be correct"
 RULE_INVALID_SET(arg) ::= 
   "Cannot complement rule <arg>; not a simple set or element"
+HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
+  "alts with rewrites can't use heterogeneous types left of ->"
+NO_SUCH_GRAMMAR_SCOPE(arg,arg2) ::=
+  "reference to undefined grammar in rule reference: <arg>.<arg2>"
+NO_SUCH_RULE_IN_SCOPE(arg,arg2) ::=
+  "rule <arg2> is not defined in grammar <arg>"
+TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
+  "cannot alias <arg>; string already assigned to <arg2>"
+TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
+  "cannot alias <arg>; token name already assigned to <arg2>"
+TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
+  "tokenVocab option ignored in imported grammar <arg>"
+INVALID_IMPORT(arg,arg2) ::=
+  "<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
+IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
+  "no lexer rules contributed to <arg> from imported grammar <arg2>"
+IMPORT_NAME_CLASH(arg,arg2) ::=
+  "combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
+AST_OP_IN_ALT_WITH_REWRITE(arg,arg2) ::=
+  "rule <arg> alt <arg2> uses rewrite syntax and also an AST operator"
+WILDCARD_AS_ROOT(arg) ::= "Wildcard invalid as root; wildcard can itself be a tree"
+CONFLICTING_OPTION_IN_TREE_FILTER(arg,arg2) ::= "option <arg>=<arg2> conflicts with tree grammar filter mode"
 
 // GRAMMAR WARNINGS
 
-GRAMMAR_NONDETERMINISM(input,conflictingAlts,paths,disabled) ::=
+GRAMMAR_NONDETERMINISM(input,conflictingAlts,paths,disabled,hasPredicateBlockedByAction) ::=
 <<
 <if(paths)>
 Decision can match input such as "<input>" using multiple alternatives:
 <paths:{  alt <it.alt> via NFA path <it.states; separator=","><\n>}>
 <else>
-Decision can match input such as "<input>" using multiple alternatives: <conflictingAlts; separator=", "><\n>
-<endif>
-<if(disabled)>
-As a result, alternative(s) <disabled; separator=","> were disabled for that input
+Decision can match input such as "<input>" using multiple alternatives: <conflictingAlts; separator=", ">
 <endif>
+<if(disabled)><\n>As a result, alternative(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
 >>
 
-DANGLING_STATE(danglingAlts) ::= <<
-the decision cannot distinguish between alternative(s) <danglingAlts; separator=","> for at least one input sequence
+DANGLING_STATE(danglingAlts,input) ::= <<
+the decision cannot distinguish between alternative(s) <danglingAlts; separator=","> for input such as "<input>"
 >>
 
 UNREACHABLE_ALTS(alts) ::= <<
-The following alternatives are unreachable: <alts; separator=","><\n>
+The following alternatives can never be matched: <alts; separator=","><\n>
 >>
 
-INSUFFICIENT_PREDICATES(alts) ::= <<
-The following alternatives are insufficiently covered with predicates: <alts; separator=","><\n>
+INSUFFICIENT_PREDICATES(upon,altToLocations,hasPredicateBlockedByAction) ::= <<
+Input such as "<upon>" is insufficiently covered with predicates at locations: <altToLocations.keys:{alt|alt <alt>: <altToLocations.(alt):{loc| line <loc.line>:<loc.column> at <loc.text>}; separator=", ">}; separator=", "><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
 >>
 
 DUPLICATE_SET_ENTRY(arg) ::=
@@ -241,7 +264,7 @@ ANTLR could not analyze this decision in rule <enclosingRule>; often this is bec
 >>
 
 RECURSION_OVERLOW(alt,input,targetRules,callSiteStates) ::= <<
-Alternative <alt>: after matching input such as <input> decision cannot predict what comes next due to recursion overflow <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule>};separator=", ">}; separator=" and ">
+Alternative <alt>: after matching input such as <input> decision cannot predict what comes next due to recursion overflow <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule.name>};separator=", ">}; separator=" and ">
 >>
 
 LEFT_RECURSION(targetRules,alt,callSiteStates) ::= <<
@@ -249,10 +272,10 @@ Alternative <alt> discovers infinite left-recursion <targetRules,callSiteStates:
 >>
 
 UNREACHABLE_TOKENS(tokens) ::= <<
-The following token definitions are unreachable: <tokens; separator=",">
+The following token definitions can never be matched because prior tokens match the same input: <tokens; separator=",">
 >>
 
-TOKEN_NONDETERMINISM(input,conflictingTokens,paths,disabled) ::=
+TOKEN_NONDETERMINISM(input,conflictingTokens,paths,disabled,hasPredicateBlockedByAction) ::=
 <<
 <if(paths)>
 Decision can match input such as "<input>" using multiple alternatives:
@@ -260,13 +283,11 @@ Decision can match input such as "<input>" using multiple alternatives:
 <else>
 Multiple token rules can match input such as "<input>": <conflictingTokens; separator=", "><\n>
 <endif>
-<if(disabled)>
-As a result, tokens(s) <disabled; separator=","> were disabled for that input
-<endif>
+<if(disabled)><\n>As a result, token(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
 >>
 
 LEFT_RECURSION_CYCLES(listOfCycles) ::= <<
-The following sets of rules are mutually left-recursive <listOfCycles:{c| [<c:{r|<r>}; separator=", ">]}; separator=" and ">
+The following sets of rules are mutually left-recursive <listOfCycles:{c| [<c:{r|<r.name>}; separator=", ">]}; separator=" and ">
 >>
 
 NONREGULAR_DECISION(ruleName,alts) ::= <<
diff --git a/tool/src/test/java/org/antlr/test/BaseTest.java b/tool/src/test/java/org/antlr/test/BaseTest.java
new file mode 100644
index 0000000..4c42b2a
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/BaseTest.java
@@ -0,0 +1,890 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Message;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.antlr.tool.ANTLRErrorListener;
+
+
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Assert;
+import static org.junit.Assert.*;
+
+import java.io.*;
+import java.util.*;
+
+
+public abstract class BaseTest {
+
+	public static final String jikes = null;//"/usr/bin/jikes";
+	public static final String pathSep = System.getProperty("path.separator");
+    
+   /**
+    * When runnning from Maven, the junit tests are run via the surefire plugin. It sets the
+    * classpath for the test environment into the following property. We need to pick this up
+    * for the junit tests that are going to generate and try to run code.
+    */
+    public static final String SUREFIRE_CLASSPATH = System.getProperty("surefire.test.class.path", "");
+
+    /**
+     * Build up the full classpath we need, including the surefire path (if present)
+     */
+    public static final String CLASSPATH = System.getProperty("java.class.path") + (SUREFIRE_CLASSPATH.equals("") ? "" : pathSep + SUREFIRE_CLASSPATH); 
+    
+	public String tmpdir = null;
+
+    /** reset during setUp and set to true if we find a problem */  
+    protected boolean lastTestFailed = false;
+
+	/** If error during parser execution, store stderr here; can't return
+     *  stdout and stderr.  This doesn't trap errors from running antlr.
+     */
+	protected String stderrDuringParse;
+
+    @Before
+	public void setUp() throws Exception {
+        lastTestFailed = false; // hope for the best, but set to true in asserts that fail
+        // new output dir for each test
+        tmpdir = new File(System.getProperty("java.io.tmpdir"), "antlr-"+getClass().getName()+"-"+System.currentTimeMillis()).getAbsolutePath();
+        ErrorManager.resetErrorState();
+        // force reset of static caches
+        new StringTemplateGroup("") {
+            {
+                StringTemplateGroup.nameToGroupMap = Collections.synchronizedMap(new HashMap());
+                StringTemplateGroup.nameToInterfaceMap = Collections.synchronizedMap(new HashMap());
+            }
+        };
+        StringTemplate.resetTemplateCounter();
+        StringTemplate.defaultGroup = new StringTemplateGroup("defaultGroup", ".");
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        // remove tmpdir if no error.
+        if ( !lastTestFailed ) eraseTempDir();
+
+    }
+
+    protected Tool newTool(String[] args) {
+		Tool tool = new Tool(args);
+		tool.setOutputDirectory(tmpdir);
+		return tool;
+	}
+
+	protected Tool newTool() {
+		Tool tool = new Tool();
+		tool.setOutputDirectory(tmpdir);
+		return tool;
+	}
+
+	protected boolean compile(String fileName) {
+		String compiler = "javac";
+		String classpathOption = "-classpath";
+
+		if (jikes!=null) {
+			compiler = jikes;
+			classpathOption = "-bootclasspath";
+		}
+
+		String[] args = new String[] {
+					compiler, "-d", tmpdir,
+					classpathOption, tmpdir+pathSep+CLASSPATH,
+					tmpdir+"/"+fileName
+		};
+		String cmdLine = compiler+" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName;
+		//System.out.println("compile: "+cmdLine);
+		File outputDir = new File(tmpdir);
+		try {
+			Process process =
+				Runtime.getRuntime().exec(args, null, outputDir);
+			StreamVacuum stdout = new StreamVacuum(process.getInputStream());
+			StreamVacuum stderr = new StreamVacuum(process.getErrorStream());
+			stdout.start();
+			stderr.start();
+			process.waitFor();
+            stdout.join();
+            stderr.join();
+			if ( stdout.toString().length()>0 ) {
+				System.err.println("compile stdout from: "+cmdLine);
+				System.err.println(stdout);
+			}
+			if ( stderr.toString().length()>0 ) {
+				System.err.println("compile stderr from: "+cmdLine);
+				System.err.println(stderr);
+			}
+			int ret = process.exitValue();
+			return ret==0;
+		}
+		catch (Exception e) {
+			System.err.println("can't exec compilation");
+			e.printStackTrace(System.err);
+			return false;
+		}
+	}
+
+	/** Return true if all is ok, no errors */
+	protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) {
+		boolean allIsWell = true;
+		mkdir(tmpdir);
+		writeFile(tmpdir, fileName, grammarStr);
+		try {
+			final List options = new ArrayList();
+			if ( debug ) {
+				options.add("-debug");
+			}
+			options.add("-o");
+			options.add(tmpdir);
+			options.add("-lib");
+			options.add(tmpdir);
+			options.add(new File(tmpdir,grammarFileName).toString());
+			final String[] optionsA = new String[options.size()];
+			options.toArray(optionsA);
+			/*
+			final ErrorQueue equeue = new ErrorQueue();
+			ErrorManager.setErrorListener(equeue);
+			*/
+			Tool antlr = newTool(optionsA);
+			antlr.process();
+			ANTLRErrorListener listener = ErrorManager.getErrorListener();
+			if ( listener instanceof ErrorQueue ) {
+				ErrorQueue equeue = (ErrorQueue)listener;
+				if ( equeue.errors.size()>0 ) {
+					allIsWell = false;
+					System.err.println("antlr reports errors from "+options);
+					for (int i = 0; i < equeue.errors.size(); i++) {
+						Message msg = (Message) equeue.errors.get(i);
+						System.err.println(msg);
+					}
+                    System.out.println("!!!\ngrammar:");
+                    System.out.println(grammarStr);
+                    System.out.println("###");
+                }
+			}
+		}
+		catch (Exception e) {
+			allIsWell = false;
+			System.err.println("problems building grammar: "+e);
+			e.printStackTrace(System.err);
+		}
+		return allIsWell;
+	}
+
+	protected String execLexer(String grammarFileName,
+							   String grammarStr,
+							   String lexerName,
+							   String input,
+							   boolean debug)
+	{
+		rawGenerateAndBuildRecognizer(grammarFileName,
+									  grammarStr,
+									  null,
+									  lexerName,
+									  debug);
+		writeFile(tmpdir, "input", input);
+		return rawExecRecognizer(null,
+								 null,
+								 lexerName,
+								 null,
+								 null,
+								 false,
+								 false,
+								 false,
+								 debug);
+	}
+
+	protected String execParser(String grammarFileName,
+								String grammarStr,
+								String parserName,
+								String lexerName,
+								String startRuleName,
+								String input, boolean debug)
+	{
+		rawGenerateAndBuildRecognizer(grammarFileName,
+									  grammarStr,
+									  parserName,
+									  lexerName,
+									  debug);
+		writeFile(tmpdir, "input", input);
+		boolean parserBuildsTrees =
+			grammarStr.indexOf("output=AST")>=0 ||
+			grammarStr.indexOf("output = AST")>=0;
+		boolean parserBuildsTemplate =
+			grammarStr.indexOf("output=template")>=0 ||
+			grammarStr.indexOf("output = template")>=0;
+		return rawExecRecognizer(parserName,
+								 null,
+								 lexerName,
+								 startRuleName,
+								 null,
+								 parserBuildsTrees,
+								 parserBuildsTemplate,
+								 false,
+								 debug);
+	}
+
+	protected String execTreeParser(String parserGrammarFileName,
+									String parserGrammarStr,
+									String parserName,
+									String treeParserGrammarFileName,
+									String treeParserGrammarStr,
+									String treeParserName,
+									String lexerName,
+									String parserStartRuleName,
+									String treeParserStartRuleName,
+									String input)
+	{
+		return execTreeParser(parserGrammarFileName,
+							  parserGrammarStr,
+							  parserName,
+							  treeParserGrammarFileName,
+							  treeParserGrammarStr,
+							  treeParserName,
+							  lexerName,
+							  parserStartRuleName,
+							  treeParserStartRuleName,
+							  input,
+							  false);
+	}
+
+	protected String execTreeParser(String parserGrammarFileName,
+									String parserGrammarStr,
+									String parserName,
+									String treeParserGrammarFileName,
+									String treeParserGrammarStr,
+									String treeParserName,
+									String lexerName,
+									String parserStartRuleName,
+									String treeParserStartRuleName,
+									String input,
+									boolean debug)
+	{
+		// build the parser
+		rawGenerateAndBuildRecognizer(parserGrammarFileName,
+									  parserGrammarStr,
+									  parserName,
+									  lexerName,
+									  debug);
+
+		// build the tree parser
+		rawGenerateAndBuildRecognizer(treeParserGrammarFileName,
+									  treeParserGrammarStr,
+									  treeParserName,
+									  lexerName,
+									  debug);
+
+		writeFile(tmpdir, "input", input);
+
+		boolean parserBuildsTrees =
+			parserGrammarStr.indexOf("output=AST")>=0 ||
+			parserGrammarStr.indexOf("output = AST")>=0;
+		boolean treeParserBuildsTrees =
+			treeParserGrammarStr.indexOf("output=AST")>=0 ||
+			treeParserGrammarStr.indexOf("output = AST")>=0;
+		boolean parserBuildsTemplate =
+			parserGrammarStr.indexOf("output=template")>=0 ||
+			parserGrammarStr.indexOf("output = template")>=0;
+
+		return rawExecRecognizer(parserName,
+								 treeParserName,
+								 lexerName,
+								 parserStartRuleName,
+								 treeParserStartRuleName,
+								 parserBuildsTrees,
+								 parserBuildsTemplate,
+								 treeParserBuildsTrees,
+								 debug);
+	}
+
+	/** Return true if all is well */
+	protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
+													String grammarStr,
+													String parserName,
+													String lexerName,
+													boolean debug)
+	{
+		boolean allIsWell =
+			antlr(grammarFileName, grammarFileName, grammarStr, debug);
+		if ( lexerName!=null ) {
+			boolean ok;
+			if ( parserName!=null ) {
+				ok = compile(parserName+".java");
+				if ( !ok ) { allIsWell = false; }
+			}
+			ok = compile(lexerName+".java");
+			if ( !ok ) { allIsWell = false; }
+		}
+		else {
+			boolean ok = compile(parserName+".java");
+			if ( !ok ) { allIsWell = false; }
+		}
+		return allIsWell;
+	}
+
+	protected String rawExecRecognizer(String parserName,
+									   String treeParserName,
+									   String lexerName,
+									   String parserStartRuleName,
+									   String treeParserStartRuleName,
+									   boolean parserBuildsTrees,
+									   boolean parserBuildsTemplate,
+									   boolean treeParserBuildsTrees,
+									   boolean debug)
+	{
+        this.stderrDuringParse = null;
+		if ( treeParserBuildsTrees && parserBuildsTrees ) {
+			writeTreeAndTreeTestFile(parserName,
+									 treeParserName,
+									 lexerName,
+									 parserStartRuleName,
+									 treeParserStartRuleName,
+									 debug);
+		}
+		else if ( parserBuildsTrees ) {
+			writeTreeTestFile(parserName,
+							  treeParserName,
+							  lexerName,
+							  parserStartRuleName,
+							  treeParserStartRuleName,
+							  debug);
+		}
+		else if ( parserBuildsTemplate ) {
+			writeTemplateTestFile(parserName,
+								  lexerName,
+								  parserStartRuleName,
+								  debug);
+		}
+		else if ( parserName==null ) {
+			writeLexerTestFile(lexerName, debug);
+		}
+		else {
+			writeTestFile(parserName,
+						  lexerName,
+						  parserStartRuleName,
+						  debug);
+		}
+
+		compile("Test.java");
+		try {
+			String[] args = new String[] {
+				"java", "-classpath", tmpdir+pathSep+CLASSPATH,
+				"Test", new File(tmpdir, "input").getAbsolutePath()
+			};
+			//String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath();
+			//System.out.println("execParser: "+cmdLine);
+			Process process =
+				Runtime.getRuntime().exec(args, null, new File(tmpdir));
+			StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream());
+			StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream());
+			stdoutVacuum.start();
+			stderrVacuum.start();
+			process.waitFor();
+			stdoutVacuum.join();
+			stderrVacuum.join();
+			String output = null;
+			output = stdoutVacuum.toString();
+			if ( stderrVacuum.toString().length()>0 ) {
+				this.stderrDuringParse = stderrVacuum.toString();
+				//System.err.println("exec stderrVacuum: "+ stderrVacuum);
+			}
+			return output;
+		}
+		catch (Exception e) {
+			System.err.println("can't exec recognizer");
+			e.printStackTrace(System.err);
+		}
+		return null;
+	}
+
+	protected void checkGrammarSemanticsError(ErrorQueue equeue,
+											  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+				System.out.println(equeue.infos);
+				System.out.println(equeue.warnings);
+				System.out.println(equeue.errors);
+				assertTrue("number of errors mismatch", n, equeue.errors.size());
+						   */
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = (Message)equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		if ( equeue.size()!=1 ) {
+			System.err.println(equeue);
+		}
+	}
+
+	protected void checkGrammarSemanticsWarning(ErrorQueue equeue,
+												GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.warnings.size(); i++) {
+			Message m = (Message)equeue.warnings.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+	}
+    
+    protected void checkError(ErrorQueue equeue,
+                              Message expectedMessage)
+        throws Exception
+    {
+        //System.out.println("errors="+equeue);
+        Message foundMsg = null;
+        for (int i = 0; i < equeue.errors.size(); i++) {
+            Message m = (Message)equeue.errors.get(i);
+            if (m.msgID==expectedMessage.msgID ) {
+                foundMsg = m;
+            }
+        }
+        assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
+        assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
+        assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
+        /*
+        assertTrue("error is not a GrammarSemanticsMessage",
+                   foundMsg instanceof GrammarSemanticsMessage);
+         */
+        assertEquals(expectedMessage.arg, foundMsg.arg);
+        assertEquals(expectedMessage.arg2, foundMsg.arg2);
+        ErrorManager.resetErrorState(); // wack errors for next test
+    }
+
+    public static class StreamVacuum implements Runnable {
+		StringBuffer buf = new StringBuffer();
+		BufferedReader in;
+		Thread sucker;
+		public StreamVacuum(InputStream in) {
+			this.in = new BufferedReader( new InputStreamReader(in) );
+		}
+		public void start() {
+			sucker = new Thread(this);
+			sucker.start();
+		}
+		public void run() {
+			try {
+				String line = in.readLine();
+				while (line!=null) {
+					buf.append(line);
+					buf.append('\n');
+					line = in.readLine();
+				}
+			}
+			catch (IOException ioe) {
+				System.err.println("can't read output from process");
+			}
+		}
+		/** wait for the thread to finish */
+		public void join() throws InterruptedException {
+			sucker.join();
+		}
+		public String toString() {
+			return buf.toString();
+		}
+	}
+
+	protected void writeFile(String dir, String fileName, String content) {
+		try {
+			File f = new File(dir, fileName);
+			FileWriter w = new FileWriter(f);
+			BufferedWriter bw = new BufferedWriter(w);
+			bw.write(content);
+			bw.close();
+			w.close();
+		}
+		catch (IOException ioe) {
+			System.err.println("can't write file");
+			ioe.printStackTrace(System.err);
+		}
+	}
+
+	protected void mkdir(String dir) {
+		File f = new File(dir);
+		f.mkdirs();
+	}
+
+	protected void writeTestFile(String parserName,
+								 String lexerName,
+								 String parserStartRuleName,
+								 boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        $createParser$\n"+
+			"        parser.$parserStartRuleName$();\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	protected void writeLexerTestFile(String lexerName, boolean debug) {
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        System.out.println(tokens);\n" +
+			"    }\n" +
+			"}"
+			);
+		outputFileST.setAttribute("lexerName", lexerName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	protected void writeTreeTestFile(String parserName,
+									 String treeParserName,
+									 String lexerName,
+									 String parserStartRuleName,
+									 String treeParserStartRuleName,
+									 boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
+			"        $createParser$\n"+
+			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
+			"        $if(!treeParserStartRuleName)$\n" +
+			"        if ( r.tree!=null ) {\n" +
+			"            System.out.println(((Tree)r.tree).toStringTree());\n" +
+			"            ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
+			"		 }\n" +
+			"        $else$\n" +
+			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
+			"        nodes.setTokenStream(tokens);\n" +
+			"        $treeParserName$ walker = new $treeParserName$(nodes);\n" +
+			"        walker.$treeParserStartRuleName$();\n" +
+			"        $endif$\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("treeParserName", treeParserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		outputFileST.setAttribute("treeParserStartRuleName", treeParserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	/** Parser creates trees and so does the tree parser */
+	protected void writeTreeAndTreeTestFile(String parserName,
+											String treeParserName,
+											String lexerName,
+											String parserStartRuleName,
+											String treeParserStartRuleName,
+											boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
+			"        $createParser$\n"+
+			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
+			"        ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
+			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
+			"        nodes.setTokenStream(tokens);\n" +
+			"        $treeParserName$ walker = new $treeParserName$(nodes);\n" +
+			"        $treeParserName$.$treeParserStartRuleName$_return r2 = walker.$treeParserStartRuleName$();\n" +
+			"		 CommonTree rt = ((CommonTree)r2.tree);\n" +
+			"		 if ( rt!=null ) System.out.println(((CommonTree)r2.tree).toStringTree());\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("treeParserName", treeParserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		outputFileST.setAttribute("treeParserStartRuleName", treeParserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+	protected void writeTemplateTestFile(String parserName,
+										 String lexerName,
+										 String parserStartRuleName,
+										 boolean debug)
+	{
+		StringTemplate outputFileST = new StringTemplate(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.stringtemplate.*;\n" +
+			"import org.antlr.stringtemplate.language.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"import java.io.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    static String templates =\n" +
+			"    		\"group test;\"+" +
+			"    		\"foo(x,y) ::= \\\"<x> <y>\\\"\";\n"+
+			"    static StringTemplateGroup group ="+
+			"    		new StringTemplateGroup(new StringReader(templates)," +
+			"					AngleBracketTemplateLexer.class);"+
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        $lexerName$ lex = new $lexerName$(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        $createParser$\n"+
+			"		 parser.setTemplateLib(group);\n"+
+			"        $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
+			"        if ( r.st!=null )\n" +
+			"            System.out.print(r.st.toString());\n" +
+			"	 	 else\n" +
+			"            System.out.print(\"\");\n" +
+			"    }\n" +
+			"}"
+			);
+		StringTemplate createParserST =
+			new StringTemplate(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        $parserName$ parser = new $parserName$(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new StringTemplate(
+				"        $parserName$ parser = new $parserName$(tokens);\n");
+		}
+		outputFileST.setAttribute("createParser", createParserST);
+		outputFileST.setAttribute("parserName", parserName);
+		outputFileST.setAttribute("lexerName", lexerName);
+		outputFileST.setAttribute("parserStartRuleName", parserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.toString());
+	}
+
+    protected void eraseFiles(final String filesEndingWith) {
+        File tmpdirF = new File(tmpdir);
+        String[] files = tmpdirF.list();
+        for(int i = 0; files!=null && i < files.length; i++) {
+            if ( files[i].endsWith(filesEndingWith) ) {
+                new File(tmpdir+"/"+files[i]).delete();
+            }
+        }
+    }
+
+    protected void eraseFiles() {
+        File tmpdirF = new File(tmpdir);
+        String[] files = tmpdirF.list();
+        for(int i = 0; files!=null && i < files.length; i++) {
+            new File(tmpdir+"/"+files[i]).delete();
+        }
+    }
+
+    protected void eraseTempDir() {
+        File tmpdirF = new File(tmpdir);
+        if ( tmpdirF.exists() ) {
+            eraseFiles();
+            tmpdirF.delete();
+        }
+    }
+
+	public String getFirstLineOfException() {
+		if ( this.stderrDuringParse ==null ) {
+			return null;
+		}
+		String[] lines = this.stderrDuringParse.split("\n");
+		String prefix="Exception in thread \"main\" ";
+		return lines[0].substring(prefix.length(),lines[0].length());
+	}
+
+	public List realElements(List elements) {
+		List n = new ArrayList();
+		for (int i = Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE - 1; i < elements.size(); i++) {
+			Object o = (Object) elements.get(i);
+			if ( o!=null ) {
+				n.add(o);
+			}
+		}
+		return n;
+	}
+
+	public List<String> realElements(Map<String, Integer> elements) {
+		List n = new ArrayList();
+		Iterator iterator = elements.keySet().iterator();
+		while (iterator.hasNext()) {
+			String tokenID = (String) iterator.next();
+			if ( elements.get(tokenID) >= Label.MIN_TOKEN_TYPE ) {
+				n.add(tokenID+"="+elements.get(tokenID));
+			}
+		}
+		Collections.sort(n);
+		return n;
+	}
+
+    public String sortLinesInString(String s) {
+        String lines[] = s.split("\n");
+        Arrays.sort(lines);
+        List<String> linesL = Arrays.asList(lines);
+        StringBuffer buf = new StringBuffer();
+        for (String l : linesL) {
+            buf.append(l);
+            buf.append('\n');
+        }
+        return buf.toString();
+    }
+    
+    /**
+     * When looking at a result set that consists of a Map/HashTable
+     * we cannot rely on the output order, as the hashing algorithm or other aspects
+     * of the implementation may be different on differnt JDKs or platforms. Hence
+     * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a
+     * bit of a hack, but guarantees that we get the same order on all systems. We assume that
+     * the keys are strings.
+     * 
+     * @param m The Map that contains keys we wish to return in sorted order
+     * @return A string that represents all the keys in sorted order.
+     */
+    public String sortMapToString(Map m) {
+        
+        System.out.println("Map toString looks like: " + m.toString());
+        // Pass in crap, and get nothing back
+        //
+        if  (m == null) {
+            return null;
+        }
+        
+        // Sort the keys in the Map
+        //
+        TreeMap nset = new TreeMap(m);
+        
+        System.out.println("Tree map looks like: " + nset.toString());
+        return nset.toString();
+    }
+
+    // override to track errors
+
+    public void assertEquals(String msg, Object a, Object b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertEquals(Object a, Object b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertEquals(String msg, long a, long b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertEquals(long a, long b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
+
+    public void assertTrue(String msg, boolean b) { try {Assert.assertTrue(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertTrue(boolean b) { try {Assert.assertTrue(b);} catch (Error e) {lastTestFailed=true; throw e;} }
+
+    public void assertFalse(String msg, boolean b) { try {Assert.assertFalse(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertFalse(boolean b) { try {Assert.assertFalse(b);} catch (Error e) {lastTestFailed=true; throw e;} }
+
+    public void assertNotNull(String msg, Object p) { try {Assert.assertNotNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertNotNull(Object p) { try {Assert.assertNotNull(p);} catch (Error e) {lastTestFailed=true; throw e;} }
+
+    public void assertNull(String msg, Object p) { try {Assert.assertNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} }
+    public void assertNull(Object p) { try {Assert.assertNull(p);} catch (Error e) {lastTestFailed=true; throw e;} }
+}
diff --git a/src/org/antlr/test/DebugTestAutoAST.java b/tool/src/test/java/org/antlr/test/DebugTestAutoAST.java
similarity index 100%
rename from src/org/antlr/test/DebugTestAutoAST.java
rename to tool/src/test/java/org/antlr/test/DebugTestAutoAST.java
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java
similarity index 81%
copy from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
copy to tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java
index 815b4e6..e83de00 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,8 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.test;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
-	}
+public class DebugTestCompositeGrammars extends TestCompositeGrammars {
+	public DebugTestCompositeGrammars() {debug=true;}
 }
diff --git a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java
similarity index 81%
rename from runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
rename to tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java
index 815b4e6..f4f1287 100644
--- a/runtime/Java/src/org/antlr/runtime/tree/RewriteEmptyStreamException.java
+++ b/tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,11 +25,9 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime.tree;
+package org.antlr.test;
 
-/** Ref to ID or expr but no tokens in ID stream or subtrees in expr stream */
-public class RewriteEmptyStreamException extends RewriteCardinalityException {
-	public RewriteEmptyStreamException(String elementDescription) {
-		super(elementDescription);
-	}
+public class DebugTestRewriteAST extends TestRewriteAST {
+	public DebugTestRewriteAST() {debug=true;}
 }
+
diff --git a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java b/tool/src/test/java/org/antlr/test/ErrorQueue.java
similarity index 62%
rename from runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
rename to tool/src/test/java/org/antlr/test/ErrorQueue.java
index 97a7d34..f6dcc94 100644
--- a/runtime/Java/src/org/antlr/runtime/MismatchedTokenException.java
+++ b/tool/src/test/java/org/antlr/test/ErrorQueue.java
@@ -1,6 +1,6 @@
 /*
  [The "BSD licence"]
- Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2005-2008 Terence Parr
  All rights reserved.
 
  Redistribution and use in source and binary forms, with or without
@@ -25,20 +25,49 @@
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
-package org.antlr.runtime;
+package org.antlr.test;
 
-public class MismatchedTokenException extends RecognitionException {
-	public int expecting;
+import org.antlr.tool.ANTLRErrorListener;
+import org.antlr.tool.Message;
+import org.antlr.tool.ToolMessage;
 
-	public MismatchedTokenException() {
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import java.util.List;
+import java.util.LinkedList;
+
+public class ErrorQueue implements ANTLRErrorListener {
+	List infos = new LinkedList();
+	List errors = new LinkedList();
+	List warnings = new LinkedList();
+
+	public void info(String msg) {
+		infos.add(msg);
+	}
+
+	public void error(Message msg) {
+		errors.add(msg);
 	}
 
-	public MismatchedTokenException(int expecting, IntStream input) {
-		super(input);
-		this.expecting = expecting;
+	public void warning(Message msg) {
+		warnings.add(msg);
+	}
+
+	public void error(ToolMessage msg) {
+		errors.add(msg);
+	}
+
+	public int size() {
+		return infos.size() + errors.size() + warnings.size();
 	}
 
 	public String toString() {
-		return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
+		return "infos: "+infos+
+			"errors: "+errors+
+			"warnings: "+warnings;
 	}
 }
+
diff --git a/src/org/antlr/test/TestASTConstruction.java b/tool/src/test/java/org/antlr/test/TestASTConstruction.java
similarity index 83%
rename from src/org/antlr/test/TestASTConstruction.java
rename to tool/src/test/java/org/antlr/test/TestASTConstruction.java
index a321fed..877a793 100644
--- a/src/org/antlr/test/TestASTConstruction.java
+++ b/tool/src/test/java/org/antlr/test/TestASTConstruction.java
@@ -28,6 +28,10 @@
 package org.antlr.test;
 
 import org.antlr.tool.Grammar;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 public class TestASTConstruction extends BaseTest {
 
@@ -35,7 +39,7 @@ public class TestASTConstruction extends BaseTest {
     public TestASTConstruction() {
     }
 
-	public void testA() throws Exception {
+	@Test public void testA() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : A;");
@@ -45,7 +49,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNakeRulePlusInLexer() throws Exception {
+	@Test public void testNakeRulePlusInLexer() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar P;\n"+
 				"A : B+;\n" +
@@ -56,7 +60,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRulePlus() throws Exception {
+	@Test public void testRulePlus() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : (b)+;\n" +
@@ -67,7 +71,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNakedRulePlus() throws Exception {
+	@Test public void testNakedRulePlus() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : b+;\n" +
@@ -78,7 +82,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRuleOptional() throws Exception {
+	@Test public void testRuleOptional() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : (b)?;\n" +
@@ -89,7 +93,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNakedRuleOptional() throws Exception {
+	@Test public void testNakedRuleOptional() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : b?;\n" +
@@ -100,7 +104,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRuleStar() throws Exception {
+	@Test public void testRuleStar() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : (b)*;\n" +
@@ -111,7 +115,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNakedRuleStar() throws Exception {
+	@Test public void testNakedRuleStar() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar P;\n"+
 				"a : b*;\n" +
@@ -122,7 +126,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharStar() throws Exception {
+	@Test public void testCharStar() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : 'a'*;");
@@ -132,7 +136,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharStarInLexer() throws Exception {
+	@Test public void testCharStarInLexer() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar P;\n"+
 				"B : 'b'*;");
@@ -142,7 +146,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testStringStar() throws Exception {
+	@Test public void testStringStar() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : 'while'*;");
@@ -152,7 +156,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testStringStarInLexer() throws Exception {
+	@Test public void testStringStarInLexer() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar P;\n"+
 				"B : 'while'*;");
@@ -162,7 +166,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharPlus() throws Exception {
+	@Test public void testCharPlus() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : 'a'+;");
@@ -172,7 +176,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharPlusInLexer() throws Exception {
+	@Test public void testCharPlusInLexer() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar P;\n"+
 				"B : 'b'+;");
@@ -182,7 +186,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharOptional() throws Exception {
+	@Test public void testCharOptional() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : 'a'?;");
@@ -192,7 +196,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharOptionalInLexer() throws Exception {
+	@Test public void testCharOptionalInLexer() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar P;\n"+
 				"B : 'b'?;");
@@ -202,7 +206,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCharRangePlus() throws Exception {
+	@Test public void testCharRangePlus() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar P;\n"+
 				"ID : 'a'..'z'+;");
@@ -212,7 +216,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testLabel() throws Exception {
+	@Test public void testLabel() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=ID;");
@@ -222,7 +226,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testLabelOfOptional() throws Exception {
+	@Test public void testLabelOfOptional() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=ID?;");
@@ -232,7 +236,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testLabelOfClosure() throws Exception {
+	@Test public void testLabelOfClosure() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=ID*;");
@@ -242,7 +246,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRuleLabel() throws Exception {
+	@Test public void testRuleLabel() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=b;\n" +
@@ -253,7 +257,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSetLabel() throws Exception {
+	@Test public void testSetLabel() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=(A|B);\n");
@@ -263,7 +267,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNotSetLabel() throws Exception {
+	@Test public void testNotSetLabel() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=~(A|B);\n");
@@ -273,7 +277,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNotSetListLabel() throws Exception {
+	@Test public void testNotSetListLabel() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x+=~(A|B);\n");
@@ -283,7 +287,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNotSetListLabelInLoop() throws Exception {
+	@Test public void testNotSetListLabelInLoop() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x+=~(A|B)+;\n");
@@ -293,7 +297,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRuleLabelOfPositiveClosure() throws Exception {
+	@Test public void testRuleLabelOfPositiveClosure() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x=b+;\n" +
@@ -304,7 +308,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testListLabelOfClosure() throws Exception {
+	@Test public void testListLabelOfClosure() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x+=ID*;");
@@ -314,7 +318,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testListLabelOfClosure2() throws Exception {
+	@Test public void testListLabelOfClosure2() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n"+
 				"a : x+='int'*;");
@@ -324,7 +328,7 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRuleListLabelOfPositiveClosure() throws Exception {
+	@Test public void testRuleListLabelOfPositiveClosure() throws Exception {
 		Grammar g = new Grammar(
 				"grammar P;\n" +
 				"options {output=AST;}\n"+
@@ -336,25 +340,25 @@ public class TestASTConstruction extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRootTokenInStarLoop() throws Exception {
+	@Test public void testRootTokenInStarLoop() throws Exception {
 		Grammar g = new Grammar(
 				"grammar Expr;\n" +
 				"options { backtrack=true; }\n" +
 				"a : ('*'^)* ;\n");  // bug: the synpred had nothing in it
 		String expecting =
-			" ( rule synpred1 ARG RET scope ( BLOCK ( ALT '*' <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
-		String found = g.getRule("synpred1").tree.toStringTree();
+			" ( rule synpred1_Expr ARG RET scope ( BLOCK ( ALT '*' <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("synpred1_Expr").tree.toStringTree();
 		assertEquals(expecting, found);
 	}
 
-	public void testActionInStarLoop() throws Exception {
+	@Test public void testActionInStarLoop() throws Exception {
 		Grammar g = new Grammar(
 				"grammar Expr;\n" +
 				"options { backtrack=true; }\n" +
 				"a : ({blort} 'x')* ;\n");  // bug: the synpred had nothing in it
 		String expecting =
-			" ( rule synpred1 ARG RET scope ( BLOCK ( ALT blort 'x' <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
-		String found = g.getRule("synpred1").tree.toStringTree();
+			" ( rule synpred1_Expr ARG RET scope ( BLOCK ( ALT blort 'x' <end-of-alt> ) <end-of-block> ) <end-of-rule> )";
+		String found = g.getRule("synpred1_Expr").tree.toStringTree();
 		assertEquals(expecting, found);
 	}
 
diff --git a/src/org/antlr/test/TestAttributes.java b/tool/src/test/java/org/antlr/test/TestAttributes.java
similarity index 72%
rename from src/org/antlr/test/TestAttributes.java
rename to tool/src/test/java/org/antlr/test/TestAttributes.java
index 8f47c07..d5bee10 100644
--- a/src/org/antlr/test/TestAttributes.java
+++ b/tool/src/test/java/org/antlr/test/TestAttributes.java
@@ -28,16 +28,22 @@
 package org.antlr.test;
 
 import org.antlr.Tool;
+import org.antlr.grammar.v3.ActionTranslator;
 import org.antlr.codegen.CodeGenerator;
-import org.antlr.codegen.ActionTranslatorLexer;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.stringtemplate.StringTemplateGroup;
 import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
 import org.antlr.tool.*;
 
 import java.io.StringReader;
-import java.util.List;
 import java.util.ArrayList;
+import java.util.List;
+import org.antlr.grammar.v2.ANTLRParser;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /** Check the $x, $x.y attributes.  For checking the actual
  *  translation, assume the Java target.  This is still a great test
@@ -49,12 +55,12 @@ public class TestAttributes extends BaseTest {
 	public TestAttributes() {
 	}
 
-	public void testEscapedLessThanInAction() throws Exception {
+	@Test public void testEscapedLessThanInAction() throws Exception {
 		Grammar g = new Grammar();
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		String action = "i<3; '<xmltag>'";
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
 		String expecting = action;
 		String rawTranslation =
@@ -67,21 +73,21 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testEscaped$InAction() throws Exception {
+	@Test public void testEscaped$InAction() throws Exception {
 		String action = "int \\$n; \"\\$in string\\$\"";
 		String expecting = "int $n; \"$in string$\"";
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"@members {"+action+"}\n"+
-				"a[User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
+			"@members {"+action+"}\n"+
+			"a[User u, int i]\n" +
+			"        : {"+action+"}\n" +
+			"        ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),0);
 		String rawTranslation =
@@ -93,7 +99,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testArguments() throws Exception {
+	@Test public void testArguments() throws Exception {
 		String action = "$i; $i.x; $u; $u.x";
 		String expecting = "i; i.x; u; u.x";
 
@@ -101,14 +107,281 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
+			"a[User u, int i]\n" +
+			"        : {"+action+"}\n" +
+			"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testComplicatedArgParsing() throws Exception {
+		String action = "x, (*a).foo(21,33), 3.2+1, '\\n', "+
+						"\"a,oo\\nick\", {bl, \"fdkj\"eck}";
+		String expecting = "x, (*a).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[User u, int i]\n" +
+			"        : A a["+action+"] B\n" +
+			"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =	translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testBracketArgParsing() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[String[\\] ick, int i]\n" +
+			"        : A \n"+
+			"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		Rule r = g.getRule("a");
+		AttributeScope parameters = r.parameterScope;
+		List<Attribute> attrs = parameters.getAttributes();
+		assertEquals("attribute mismatch","String[] ick",attrs.get(0).decl.toString());
+		assertEquals("parameter name mismatch","ick",attrs.get(0).name);
+		assertEquals("declarator mismatch", "String[]", attrs.get(0).type);
+
+		assertEquals("attribute mismatch","int i",attrs.get(1).decl.toString());
+		assertEquals("parameter name mismatch","i",attrs.get(1).name);
+		assertEquals("declarator mismatch", "int", attrs.get(1).type);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testStringArgParsing() throws Exception {
+		String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19";
+		String expecting = "34, '{', \"it's<\", '\"', \"\\\"\", 19";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[User u, int i]\n" +
+			"        : A a["+action+"] B\n" +
+			"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =	translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		List<String> expectArgs = new ArrayList<String>() {
+			{add("34");}
+			{add("'{'");}
+			{add("\"it's<\"");}
+			{add("'\"'");}
+			{add("\"\\\"\"");} // that's "\""
+			{add("19");}
+		};
+		List<String> actualArgs = CodeGenerator.getListOfArgumentsFromAction(action, ',');
+		assertEquals("args mismatch", expectArgs, actualArgs);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testComplicatedSingleArgParsing() throws Exception {
+		String action = "(*a).foo(21,33,\",\")";
+		String expecting = "(*a).foo(21,33,\",\")";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[User u, int i]\n" +
+			"        : A a["+action+"] B\n" +
+			"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =	translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testArgWithLT() throws Exception {
+		String action = "34<50";
+		String expecting = "34<50";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[boolean b]\n" +
+			"        : A a["+action+"] B\n" +
+			"        ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGenericsAsArgumentDefinition() throws Exception {
+		String action = "$foo.get(\"ick\");";
+		String expecting = "foo.get(\"ick\");";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar T;\n"+
+			"a[HashMap<String,String> foo]\n" +
+			"        : {"+action+"}\n" +
+			"        ;";
+		Grammar g = new Grammar(grammar);
+		Rule ra = g.getRule("a");
+		List<Attribute> attrs = ra.parameterScope.getAttributes();
+		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString());
+		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
+		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGenericsAsArgumentDefinition2() throws Exception {
+		String action = "$foo.get(\"ick\"); x=3;";
+		String expecting = "foo.get(\"ick\"); x=3;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar T;\n"+
+			"a[HashMap<String,String> foo, int x, List<String> duh]\n" +
+			"        : {"+action+"}\n" +
+			"        ;";
+		Grammar g = new Grammar(grammar);
+		Rule ra = g.getRule("a");
+		List<Attribute> attrs = ra.parameterScope.getAttributes();
+
+		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString().trim());
+		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
+		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
+
+		assertEquals("attribute mismatch","int x",attrs.get(1).decl.toString().trim());
+		assertEquals("parameter name mismatch","x",attrs.get(1).name);
+		assertEquals("declarator mismatch", "int", attrs.get(1).type);
+
+		assertEquals("attribute mismatch","List<String> duh",attrs.get(2).decl.toString().trim());
+		assertEquals("parameter name mismatch","duh",attrs.get(2).name);
+		assertEquals("declarator mismatch", "List<String>", attrs.get(2).type);
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGenericsAsReturnValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar T;\n"+
+			"a returns [HashMap<String,String> foo] : ;\n";
+		Grammar g = new Grammar(grammar);
+		Rule ra = g.getRule("a");
+		List<Attribute> attrs = ra.returnScope.getAttributes();
+		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString());
+		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
+		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testComplicatedArgParsingWithTranslation() throws Exception {
+		String action = "x, $A.text+\"3242\", (*$A).foo(21,33), 3.2+1, '\\n', "+
+						"\"a,oo\\nick\", {bl, \"fdkj\"eck}";
+		String expecting = "x, (A1!=null?A1.getText():null)+\"3242\", (*A1).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[User u, int i]\n" +
+			"        : A a["+action+"] B\n" +
+			"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -127,9 +400,9 @@ public class TestAttributes extends BaseTest {
 	 convert actions to strings; keep as templates.
 	 June 9, 2006: made action translation leave templates not strings
 	 */
-	public void testRefToReturnValueBeforeRefToPredefinedAttr() throws Exception {
+	@Test public void testRefToReturnValueBeforeRefToPredefinedAttr() throws Exception {
 		String action = "$x.foo";
-		String expecting = "x.foo";
+		String expecting = "(x!=null?x.foo:0)";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -141,7 +414,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -154,13 +427,13 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleLabelBeforeRefToPredefinedAttr() throws Exception {
+	@Test public void testRuleLabelBeforeRefToPredefinedAttr() throws Exception {
 		// As of Mar 2007, I'm removing unused labels.  Unfortunately,
 		// the action is not seen until code gen.  Can't see $x.text
 		// before stripping unused labels.  We really need to translate
 		// actions first so code gen logic can use info.
 		String action = "$x.text";
-		String expecting = "input.toString(x.start,x.stop)";
+		String expecting = "(x!=null?input.toString(x.start,x.stop):null)";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -172,7 +445,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -185,7 +458,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testInvalidArguments() throws Exception {
+	@Test public void testInvalidArguments() throws Exception {
 		String action = "$x";
 		String expecting = action;
 
@@ -193,12 +466,12 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
+			"a[User u, int i]\n" +
+			"        : {"+action+"}\n" +
+			"        ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -216,7 +489,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testReturnValue() throws Exception {
+	@Test public void testReturnValue() throws Exception {
 		String action = "$x.i";
 		String expecting = "x";
 
@@ -224,16 +497,16 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a returns [int i]\n" +
-				"        : 'a'\n" +
-				"        ;\n" +
-				"b : x=a {"+action+"} ;\n");
+			"a returns [int i]\n" +
+			"        : 'a'\n" +
+			"        ;\n" +
+			"b : x=a {"+action+"} ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "b",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -247,7 +520,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testReturnValueWithNumber() throws Exception {
+	@Test public void testReturnValueWithNumber() throws Exception {
 		String action = "$x.i1";
 		String expecting = "x";
 
@@ -255,16 +528,16 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a returns [int i1]\n" +
-				"        : 'a'\n" +
-				"        ;\n" +
-				"b : x=a {"+action+"} ;\n");
+			"a returns [int i1]\n" +
+			"        : 'a'\n" +
+			"        ;\n" +
+			"b : x=a {"+action+"} ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "b",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -278,7 +551,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testReturnValues() throws Exception {
+	@Test public void testReturnValues() throws Exception {
 		String action = "$i; $i.x; $u; $u.x";
 		String expecting = "retval.i; retval.i.x; retval.u; retval.u.x";
 
@@ -286,14 +559,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a returns [User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
+			"a returns [User u, int i]\n" +
+			"        : {"+action+"}\n" +
+			"        ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -307,7 +580,7 @@ public class TestAttributes extends BaseTest {
 	}
 
 	/* regression test for ANTLR-46 */
-	public void testReturnWithMultipleRuleRefs() throws Exception {
+	@Test public void testReturnWithMultipleRuleRefs() throws Exception {
 		String action1 = "$obj = $rule2.obj;";
 		String action2 = "$obj = $rule3.obj;";
 		String expecting1 = "obj = rule21;";
@@ -335,7 +608,7 @@ public class TestAttributes extends BaseTest {
 		String action = action1;
 		String expecting = expecting1;
 		do {
-			ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"rule1",
+			ActionTranslator translator = new ActionTranslator(generator,"rule1",
 																		 new antlr.CommonToken(ANTLRParser.ACTION,action),i+1);
 			String rawTranslation =
 					translator.translate();
@@ -350,7 +623,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testInvalidReturnValues() throws Exception {
+	@Test public void testInvalidReturnValues() throws Exception {
 		String action = "$x";
 		String expecting = action;
 
@@ -358,12 +631,12 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a returns [User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
+			"a returns [User u, int i]\n" +
+			"        : {"+action+"}\n" +
+			"        ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -380,25 +653,23 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testTokenLabels() throws Exception {
+	@Test public void testTokenLabels() throws Exception {
 		String action = "$id; $f; $id.text; $id.getText(); $id.dork " +
 						"$id.type; $id.line; $id.pos; " +
 						"$id.channel; $id.index;";
-		String expecting = "id; f; id.getText(); id.getText(); id.dork " +
-						   "id.getType(); id.getLine(); id.getCharPositionInLine(); " +
-						   "id.getChannel(); id.getTokenIndex();";
+		String expecting = "id; f; (id!=null?id.getText():null); id.getText(); id.dork (id!=null?id.getType():0); (id!=null?id.getLine():0); (id!=null?id.getCharPositionInLine():0); (id!=null?id.getChannel():0); (id!=null?id.getTokenIndex():0);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a : id=ID f=FLOAT {"+action+"}\n" +
-				"  ;");
+			"a : id=ID f=FLOAT {"+action+"}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -411,19 +682,21 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleLabels() throws Exception {
-		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.stop;";
-		String expecting = "r.x; ((Token)r.start); ((Token)r.stop); ((Object)r.tree); r.x; ((Token)r.stop);";
+	@Test public void testRuleLabels() throws Exception {
+		String action = "$r.x; $r.start;\n $r.stop;\n $r.tree; $a.x; $a.stop;";
+		String expecting = "(r!=null?r.x:0); (r!=null?((Token)r.start):null);\n" +
+						   "             (r!=null?((Token)r.stop):null);\n" +
+						   "             (r!=null?((Object)r.tree):null); (r!=null?r.x:0); (r!=null?((Token)r.stop):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;");
+			"a returns [int x]\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a {###"+action+"!!!}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -437,20 +710,36 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleLabelsWithSpecialToken() throws Exception {
+	@Test public void testAmbiguRuleRef() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A a {$a.text} | B ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		// error(132): <string>:2:9: reference $a is ambiguous; rule a is enclosing rule and referenced in the production
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+	}
+
+	@Test public void testRuleLabelsWithSpecialToken() throws Exception {
 		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.stop;";
-		String expecting = "r.x; ((MYTOKEN)r.start); ((MYTOKEN)r.stop); ((Object)r.tree); r.x; ((MYTOKEN)r.stop);";
+		String expecting = "(r!=null?r.x:0); (r!=null?((MYTOKEN)r.start):null); (r!=null?((MYTOKEN)r.stop):null); (r!=null?((Object)r.tree):null); (r!=null?r.x:0); (r!=null?((MYTOKEN)r.stop):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"options {TokenLabelType=MYTOKEN;}\n"+
-				"a returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;");
+			"options {TokenLabelType=MYTOKEN;}\n"+
+			"a returns [int x]\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a {###"+action+"!!!}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -465,18 +754,18 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testForwardRefRuleLabels() throws Exception {
+	@Test public void testForwardRefRuleLabels() throws Exception {
 		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.tree;";
-		String expecting = "r.x; ((Token)r.start); ((Token)r.stop); ((Object)r.tree); r.x; ((Object)r.tree);";
+		String expecting = "(r!=null?r.x:0); (r!=null?((Token)r.start):null); (r!=null?((Token)r.stop):null); (r!=null?((Object)r.tree):null); (r!=null?r.x:0); (r!=null?((Object)r.tree):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;\n" +
-				"a returns [int x]\n" +
-				"  : ;\n");
+			"b : r=a {###"+action+"!!!}\n" +
+			"  ;\n" +
+			"a returns [int x]\n" +
+			"  : ;\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -491,7 +780,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testInvalidRuleLabelAccessesParameter() throws Exception {
+	@Test public void testInvalidRuleLabelAccessesParameter() throws Exception {
 		String action = "$r.z";
 		String expecting = action;
 
@@ -499,14 +788,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a[int z] returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
+			"a[int z] returns [int x]\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a[3] {"+action+"}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -524,7 +813,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testInvalidRuleLabelAccessesScopeAttribute() throws Exception {
+	@Test public void testInvalidRuleLabelAccessesScopeAttribute() throws Exception {
 		String action = "$r.n";
 		String expecting = action;
 
@@ -532,15 +821,15 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a\n" +
-				"scope { int n; }\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
+			"a\n" +
+			"scope { int n; }\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a[3] {"+action+"}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -558,7 +847,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testInvalidRuleAttribute() throws Exception {
+	@Test public void testInvalidRuleAttribute() throws Exception {
 		String action = "$r.blort";
 		String expecting = action;
 
@@ -566,14 +855,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a[int z] returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
+			"a[int z] returns [int x]\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a[3] {"+action+"}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -591,7 +880,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testMissingRuleAttribute() throws Exception {
+	@Test public void testMissingRuleAttribute() throws Exception {
 		String action = "$r";
 		String expecting = action;
 
@@ -599,14 +888,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a[int z] returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
+			"a[int z] returns [int x]\n" +
+			"  :\n" +
+			"  ;\n"+
+			"b : r=a[3] {"+action+"}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -619,7 +908,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testMissingUnlabeledRuleAttribute() throws Exception {
+	@Test public void testMissingUnlabeledRuleAttribute() throws Exception {
 		String action = "$a";
 		String expecting = action;
 
@@ -627,13 +916,13 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a returns [int x]:\n" +
-				"  ;\n"+
-				"b : a {"+action+"}\n" +
-				"  ;");
+			"a returns [int x]:\n" +
+			"  ;\n"+
+			"b : a {"+action+"}\n" +
+			"  ;");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -645,7 +934,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testNonDynamicAttributeOutsideRule() throws Exception {
+	@Test public void testNonDynamicAttributeOutsideRule() throws Exception {
 		String action = "public void foo() { $x; }";
 		String expecting = action;
 
@@ -653,11 +942,11 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"@members {'+action+'}\n" +
-				"a : ;\n");
+			"@members {'+action+'}\n" +
+			"a : ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 null,
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
 		String rawTranslation =
@@ -675,7 +964,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testNonDynamicAttributeOutsideRule2() throws Exception {
+	@Test public void testNonDynamicAttributeOutsideRule2() throws Exception {
 		String action = "public void foo() { $x.y; }";
 		String expecting = action;
 
@@ -683,11 +972,11 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"@members {'+action+'}\n" +
-				"a : ;\n");
+			"@members {'+action+'}\n" +
+			"a : ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 null,
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
 		String rawTranslation =
@@ -708,26 +997,26 @@ public class TestAttributes extends BaseTest {
 
 	// D Y N A M I C A L L Y  S C O P E D  A T T R I B U T E S
 
-	public void testBasicGlobalScope() throws Exception {
+	@Test public void testBasicGlobalScope() throws Exception {
 		String action = "$Symbols::names.add($id.text);";
-		String expecting = "((Symbols_scope)Symbols_stack.peek()).names.add(id.getText());";
+		String expecting = "((Symbols_scope)Symbols_stack.peek()).names.add((id!=null?id.getText():null));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
+			"scope Symbols {\n" +
+			"  int n;\n" +
+			"  List names;\n" +
+			"}\n" +
+			"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+			"  ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -740,7 +1029,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testUnknownGlobalScope() throws Exception {
+	@Test public void testUnknownGlobalScope() throws Exception {
 		String action = "$Symbols::names.add($id.text);";
 
 		ErrorQueue equeue = new ErrorQueue();
@@ -754,7 +1043,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 
 		assertEquals("unexpected errors: "+equeue, 2, equeue.errors.size());
@@ -766,27 +1055,27 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testIndexedGlobalScope() throws Exception {
+	@Test public void testIndexedGlobalScope() throws Exception {
 		String action = "$Symbols[-1]::names.add($id.text);";
 		String expecting =
-			"((Symbols_scope)Symbols_stack.elementAt(Symbols_stack.size()-1-1)).names.add(id.getText());";
+			"((Symbols_scope)Symbols_stack.elementAt(Symbols_stack.size()-1-1)).names.add((id!=null?id.getText():null));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
+			"scope Symbols {\n" +
+			"  int n;\n" +
+			"  List names;\n" +
+			"}\n" +
+			"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+			"  ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -799,73 +1088,69 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void test0IndexedGlobalScope() throws Exception {
+	@Test public void test0IndexedGlobalScope() throws Exception {
 		String action = "$Symbols[0]::names.add($id.text);";
 		String expecting =
-			"((Symbols_scope)Symbols_stack.elementAt(0)).names.add(id.getText());";
+			"((Symbols_scope)Symbols_stack.elementAt(0)).names.add((id!=null?id.getText():null));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
+			"scope Symbols {\n" +
+			"  int n;\n" +
+			"  List names;\n" +
+			"}\n" +
+			"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+			"  ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
 		StringTemplateGroup templates =
 			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
-		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
-		String found = actionST.toString();
-		assertEquals(expecting, found);
+		assertEquals(expecting, rawTranslation);
 
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testAbsoluteIndexedGlobalScope() throws Exception {
+	@Test public void testAbsoluteIndexedGlobalScope() throws Exception {
 		String action = "$Symbols[3]::names.add($id.text);";
 		String expecting =
-			"((Symbols_scope)Symbols_stack.elementAt(3)).names.add(id.getText());";
+			"((Symbols_scope)Symbols_stack.elementAt(3)).names.add((id!=null?id.getText():null));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
+			"scope Symbols {\n" +
+			"  int n;\n" +
+			"  List names;\n" +
+			"}\n" +
+			"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+			"  ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
 		StringTemplateGroup templates =
 			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
-		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
-		String found = actionST.toString();
-		assertEquals(expecting, found);
+		assertEquals(expecting, rawTranslation);
 
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testScopeAndAttributeWithUnderscore() throws Exception {
+	@Test public void testScopeAndAttributeWithUnderscore() throws Exception {
 		String action = "$foo_bar::a_b;";
 		String expecting = "((foo_bar_scope)foo_bar_stack.peek()).a_b;";
 
@@ -873,17 +1158,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope foo_bar {\n" +
-				"  int a_b;\n" +
-				"}\n" +
-				"a scope foo_bar; : (ID {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
+			"scope foo_bar {\n" +
+			"  int a_b;\n" +
+			"}\n" +
+			"a scope foo_bar; : (ID {"+action+"} )+\n" +
+			"  ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -897,7 +1182,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testSharedGlobalScope() throws Exception {
+	@Test public void testSharedGlobalScope() throws Exception {
 		String action = "$Symbols::x;";
 		String expecting = "((Symbols_scope)Symbols_stack.peek()).x;";
 
@@ -905,21 +1190,21 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  String x;\n" +
-				"}\n" +
-				"a\n"+
-				"scope { int y; }\n"+
-				"scope Symbols;\n" +
-				" : b {"+action+"}\n" +
-				" ;\n" +
-				"b : ID {$Symbols::x=$ID.text} ;\n" +
-				"ID : 'a';\n");
+			"scope Symbols {\n" +
+			"  String x;\n" +
+			"}\n" +
+			"a\n"+
+			"scope { int y; }\n"+
+			"scope Symbols;\n" +
+			" : b {"+action+"}\n" +
+			" ;\n" +
+			"b : ID {$Symbols::x=$ID.text} ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -932,7 +1217,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testGlobalScopeOutsideRule() throws Exception {
+	@Test public void testGlobalScopeOutsideRule() throws Exception {
 		String action = "public void foo() {$Symbols::names.add('foo');}";
 		String expecting = "public void foo() {((Symbols_scope)Symbols_stack.peek()).names.add('foo');}";
 
@@ -940,18 +1225,18 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"@members {'+action+'}\n" +
-				"a : \n" +
-				"  ;\n");
+			"scope Symbols {\n" +
+			"  int n;\n" +
+			"  List names;\n" +
+			"}\n" +
+			"@members {'+action+'}\n" +
+			"a : \n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -964,7 +1249,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleScopeOutsideRule() throws Exception {
+	@Test public void testRuleScopeOutsideRule() throws Exception {
 		String action = "public void foo() {$a::name;}";
 		String expecting = "public void foo() {((a_scope)a_stack.peek()).name;}";
 
@@ -972,16 +1257,16 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"@members {"+action+"}\n" +
-				"a\n" +
-				"scope { int name; }\n" +
-				"  : {foo();}\n" +
-				"  ;\n");
+			"@members {"+action+"}\n" +
+			"a\n" +
+			"scope { String name; }\n" +
+			"  : {foo();}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 null,
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),0);
 		String rawTranslation =
@@ -995,7 +1280,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testBasicRuleScope() throws Exception {
+	@Test public void testBasicRuleScope() throws Exception {
 		String action = "$a::n;";
 		String expecting = "((a_scope)a_stack.peek()).n;";
 
@@ -1003,16 +1288,16 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : {"+action+"}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1025,7 +1310,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testUnqualifiedRuleScopeAccessInsideRule() throws Exception {
+	@Test public void testUnqualifiedRuleScopeAccessInsideRule() throws Exception {
 		String action = "$n;";
 		String expecting = action;
 
@@ -1033,11 +1318,11 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : {"+action+"}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
@@ -1052,7 +1337,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testIsolatedDynamicRuleScopeRef() throws Exception {
+	@Test public void testIsolatedDynamicRuleScopeRef() throws Exception {
 		String action = "$a;"; // refers to stack not top of stack
 		String expecting = "a_stack;";
 
@@ -1060,17 +1345,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : b ;\n" +
-				"b : {"+action+"}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : b ;\n" +
+			"b : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1083,7 +1368,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testDynamicRuleScopeRefInSubrule() throws Exception {
+	@Test public void testDynamicRuleScopeRefInSubrule() throws Exception {
 		String action = "$a::n;";
 		String expecting = "((a_scope)a_stack.peek()).n;";
 
@@ -1091,17 +1376,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : b ;\n" +
-				"b : {"+action+"}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  float n;\n" +
+			"} : b ;\n" +
+			"b : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1114,7 +1399,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testIsolatedGlobalScopeRef() throws Exception {
+	@Test public void testIsolatedGlobalScopeRef() throws Exception {
 		String action = "$Symbols;";
 		String expecting = "Symbols_stack;";
 
@@ -1122,21 +1407,21 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  String x;\n" +
-				"}\n" +
-				"a\n"+
-				"scope { int y; }\n"+
-				"scope Symbols;\n" +
-				" : b {"+action+"}\n" +
-				" ;\n" +
-				"b : ID {$Symbols::x=$ID.text} ;\n" +
-				"ID : 'a';\n");
+			"scope Symbols {\n" +
+			"  String x;\n" +
+			"}\n" +
+			"a\n"+
+			"scope { int y; }\n"+
+			"scope Symbols;\n" +
+			" : b {"+action+"}\n" +
+			" ;\n" +
+			"b : ID {$Symbols::x=$ID.text} ;\n" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1149,7 +1434,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleScopeFromAnotherRule() throws Exception {
+	@Test public void testRuleScopeFromAnotherRule() throws Exception {
 		String action = "$a::n;"; // must be qualified
 		String expecting = "((a_scope)a_stack.peek()).n;";
 
@@ -1157,18 +1442,18 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : b\n" +
-				"  ;\n" +
-				"b : {"+action+"}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  boolean n;\n" +
+			"} : b\n" +
+			"  ;\n" +
+			"b : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1181,7 +1466,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception {
+	@Test public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception {
 		String action = "$a.i;";
 		String expecting = "i;";
 
@@ -1189,13 +1474,13 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a[int i]: {"+action+"}\n" +
-				"  ;\n");
+			"a[int i]: {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1208,7 +1493,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
+	@Test public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
 		String action = "$a.i;";
 		String expecting = "retval.i;";
 
@@ -1216,13 +1501,13 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a returns [int i, int j]: {"+action+"}\n" +
-				"  ;\n");
+			"a returns [int i, int j]: {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1235,7 +1520,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
+	@Test public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
 		String action = "$a.i = 1;";
 		String expecting = "retval.i = 1;";
 
@@ -1249,7 +1534,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1262,7 +1547,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testIsolatedRefToCurrentRule() throws Exception {
+	@Test public void testIsolatedRefToCurrentRule() throws Exception {
 		String action = "$a;";
 		String expecting = "";
 
@@ -1270,8 +1555,8 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : 'a' {"+action+"}\n" +
-				"  ;\n");
+			"a : 'a' {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
@@ -1286,16 +1571,16 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testIsolatedRefToRule() throws Exception {
+	@Test public void testIsolatedRefToRule() throws Exception {
 		String action = "$x;";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : x=b {"+action+"}\n" +
-				"  ;\n" +
-				"b : 'b' ;\n");
+			"a : x=b {"+action+"}\n" +
+			"  ;\n" +
+			"b : 'b' ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
@@ -1309,7 +1594,7 @@ public class TestAttributes extends BaseTest {
 	}
 
 	/*  I think these have to be errors $a.x makes no sense.
-	public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception {
+	@Test public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception {
 			String action = "$a.x;";
 			String expecting = "x;";
 
@@ -1323,7 +1608,7 @@ public class TestAttributes extends BaseTest {
 			CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 			g.setCodeGenerator(generator);
 			generator.genRecognizer(); // forces load of templates
-			ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+			ActionTranslator translator = new ActionTranslator(generator,"a",
 															   new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 			String rawTranslation =
 				translator.translate();
@@ -1336,7 +1621,7 @@ public class TestAttributes extends BaseTest {
 			assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 		}
 
-	public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception {
+	@Test public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception {
 		String action = "$a.x;"; // must be qualified
 		String expecting = "list_x;";
 
@@ -1350,7 +1635,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 														   new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1363,7 +1648,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 */
-	public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception {
+	@Test public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception {
 		String action = "$a.st;"; // can be qualified
 		String expecting = "retval.st;";
 
@@ -1371,14 +1656,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
-				"options {output=template;}\n"+
-				"a : (A->{$A.text}) {"+action+"}\n" +
-				"  ;\n");
+			"options {output=template;}\n"+
+			"a : (A->{$A.text}) {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1391,20 +1676,20 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleRefWhenRuleHasScope() throws Exception {
+	@Test public void testRuleRefWhenRuleHasScope() throws Exception {
 		String action = "$b.start;";
-		String expecting = "((Token)b1.start);";
+		String expecting = "(b1!=null?((Token)b1.start):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n" +
-				"a : b {###"+action+"!!!} ;\n" +
-				"b\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : 'b' \n" +
-				"  ;\n");
+			"a : b {###"+action+"!!!} ;\n" +
+			"b\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : 'b' \n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
@@ -1418,7 +1703,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception {
+	@Test public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception {
 		String action = "$b::n;";
 		String expecting = "((b_scope)b_stack.peek()).n;";
 
@@ -1426,17 +1711,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n" +
-				"s : b ;\n"+
-				"b\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
-				"  ;\n");
+			"s : b ;\n"+
+			"b\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator, "b",
+		ActionTranslator translator = new ActionTranslator(generator, "b",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1449,7 +1734,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRefToTemplateAttributeForCurrentRule() throws Exception {
+	@Test public void testRefToTemplateAttributeForCurrentRule() throws Exception {
 		String action = "$st=null;";
 		String expecting = "retval.st =null;";
 
@@ -1457,14 +1742,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
-				"options {output=template;}\n"+
-				"a : {"+action+"}\n" +
-				"  ;\n");
+			"options {output=template;}\n"+
+			"a : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1477,7 +1762,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRefToTextAttributeForCurrentRule() throws Exception {
+	@Test public void testRefToTextAttributeForCurrentRule() throws Exception {
 		String action = "$text";
 		String expecting = "input.toString(retval.start,input.LT(-1))";
 
@@ -1485,14 +1770,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
-				"options {output=template;}\n"+
-				"a : {"+action+"}\n" +
-				"  ;\n");
+			"options {output=template;}\n"+
+			"a : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1505,7 +1790,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRefToStartAttributeForCurrentRule() throws Exception {
+	@Test public void testRefToStartAttributeForCurrentRule() throws Exception {
 		String action = "$start;";
 		String expecting = "((Token)retval.start);";
 
@@ -1513,13 +1798,13 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
-				"a : {###"+action+"!!!}\n" +
-				"  ;\n");
+			"a : {###"+action+"!!!}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		StringTemplate codeST = generator.getRecognizerST();
 		String code = codeST.toString();
@@ -1529,26 +1814,26 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testTokenLabelFromMultipleAlts() throws Exception {
+	@Test public void testTokenLabelFromMultipleAlts() throws Exception {
 		String action = "$ID.text;"; // must be qualified
 		String action2 = "$INT.text;"; // must be qualified
-		String expecting = "ID1.getText();";
-		String expecting2 = "INT2.getText();";
+		String expecting = "(ID1!=null?ID1.getText():null);";
+		String expecting2 = "(INT2!=null?INT2.getText():null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ID {"+action+"}\n" +
-				"  | INT {"+action2+"}\n" +
-				"  ;\n" +
-				"ID : 'a';\n" +
-				"INT : '0';\n");
+			"a : ID {"+action+"}\n" +
+			"  | INT {"+action2+"}\n" +
+			"  ;\n" +
+			"ID : 'a';\n" +
+			"INT : '0';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1559,7 +1844,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		translator = new ActionTranslatorLexer(generator,
+		translator = new ActionTranslator(generator,
 											   "a",
 											   new antlr.CommonToken(ANTLRParser.ACTION,action2),2);
 		rawTranslation =
@@ -1574,26 +1859,26 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleLabelFromMultipleAlts() throws Exception {
+	@Test public void testRuleLabelFromMultipleAlts() throws Exception {
 		String action = "$b.text;"; // must be qualified
 		String action2 = "$c.text;"; // must be qualified
-		String expecting = "input.toString(b1.start,b1.stop);";
-		String expecting2 = "input.toString(c2.start,c2.stop);";
+		String expecting = "(b1!=null?input.toString(b1.start,b1.stop):null);";
+		String expecting2 = "(c2!=null?input.toString(c2.start,c2.stop):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : b {"+action+"}\n" +
-				"  | c {"+action2+"}\n" +
-				"  ;\n" +
-				"b : 'a';\n" +
-				"c : '0';\n");
+			"a : b {"+action+"}\n" +
+			"  | c {"+action2+"}\n" +
+			"  ;\n" +
+			"b : 'a';\n" +
+			"c : '0';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -1604,7 +1889,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		translator = new ActionTranslatorLexer(generator,
+		translator = new ActionTranslator(generator,
 											   "a",
 											   new antlr.CommonToken(ANTLRParser.ACTION,action2),2);
 		rawTranslation =
@@ -1619,7 +1904,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testUnknownDynamicAttribute() throws Exception {
+	@Test public void testUnknownDynamicAttribute() throws Exception {
 		String action = "$a::x";
 		String expecting = action;
 
@@ -1627,17 +1912,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : {"+action+"}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : {"+action+"}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -1656,7 +1941,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testUnknownGlobalDynamicAttribute() throws Exception {
+	@Test public void testUnknownGlobalDynamicAttribute() throws Exception {
 		String action = "$Symbols::x";
 		String expecting = action;
 
@@ -1664,17 +1949,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"}\n" +
-				"a : {'+action+'}\n" +
-				"  ;\n");
+			"scope Symbols {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : {'+action+'}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -1693,7 +1978,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testUnqualifiedRuleScopeAttribute() throws Exception {
+	@Test public void testUnqualifiedRuleScopeAttribute() throws Exception {
 		String action = "$n;"; // must be qualified
 		String expecting = "$n;";
 
@@ -1701,17 +1986,17 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : b\n" +
-				"  ;\n" +
-				"b : {'+action+'}\n" +
-				"  ;\n");
+			"a\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : b\n" +
+			"  ;\n" +
+			"b : {'+action+'}\n" +
+			"  ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "b",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -1730,14 +2015,14 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testRuleAndTokenLabelTypeMismatch() throws Exception {
+	@Test public void testRuleAndTokenLabelTypeMismatch() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : id='foo' id=b\n" +
-				"  ;\n" +
-				"b : ;\n");
+			"a : id='foo' id=b\n" +
+			"  ;\n" +
+			"b : ;\n");
 		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
 		Object expectedArg = "id";
 		Object expectedArg2 = "rule!=token";
@@ -1746,14 +2031,14 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testListAndTokenLabelTypeMismatch() throws Exception {
+	@Test public void testListAndTokenLabelTypeMismatch() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ids+='a' ids='b'\n" +
-				"  ;\n" +
-				"b : ;\n");
+			"a : ids+='a' ids='b'\n" +
+			"  ;\n" +
+			"b : ;\n");
 		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
 		Object expectedArg = "ids";
 		Object expectedArg2 = "token!=token-list";
@@ -1762,15 +2047,15 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testListAndRuleLabelTypeMismatch() throws Exception {
+	@Test public void testListAndRuleLabelTypeMismatch() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n" +
-				"options {output=AST;}\n"+
-				"a : bs+=b bs=b\n" +
-				"  ;\n" +
-				"b : 'b';\n");
+			"options {output=AST;}\n"+
+			"a : bs+=b bs=b\n" +
+			"  ;\n" +
+			"b : 'b';\n");
 		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
 		Object expectedArg = "bs";
 		Object expectedArg2 = "rule!=rule-list";
@@ -1779,15 +2064,15 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testArgReturnValueMismatch() throws Exception {
+	@Test public void testArgReturnValueMismatch() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a[int i] returns [int x, int i]\n" +
-				"  : \n" +
-				"  ;\n" +
-				"b : ;\n");
+			"a[int i] returns [int x, int i]\n" +
+			"  : \n" +
+			"  ;\n" +
+			"b : ;\n");
 		int expectedMsgID = ErrorManager.MSG_ARG_RETVAL_CONFLICT;
 		Object expectedArg = "i";
 		Object expectedArg2 = "a";
@@ -1796,7 +2081,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testSimplePlusEqualLabel() throws Exception {
+	@Test public void testSimplePlusEqualLabel() throws Exception {
 		String action = "$ids.size();"; // must be qualified
 		String expecting = "list_ids.size();";
 
@@ -1804,13 +2089,13 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-				"a : ids+=ID ( COMMA ids+=ID {"+action+"})* ;\n");
+			"a : ids+=ID ( COMMA ids+=ID {"+action+"})* ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -1824,7 +2109,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testPlusEqualStringLabel() throws Exception {
+	@Test public void testPlusEqualStringLabel() throws Exception {
 		String action = "$ids.size();"; // must be qualified
 		String expecting = "list_ids.size();";
 
@@ -1832,14 +2117,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ids+='if' ( ',' ids+=ID {"+action+"})* ;" +
-				"ID : 'a';\n");
+			"a : ids+='if' ( ',' ids+=ID {"+action+"})* ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -1853,7 +2138,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testPlusEqualSetLabel() throws Exception {
+	@Test public void testPlusEqualSetLabel() throws Exception {
 		String action = "$ids.size();"; // must be qualified
 		String expecting = "list_ids.size();";
 
@@ -1861,14 +2146,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ids+=('a'|'b') ( ',' ids+=ID {"+action+"})* ;" +
-				"ID : 'a';\n");
+			"a : ids+=('a'|'b') ( ',' ids+=ID {"+action+"})* ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -1882,7 +2167,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testPlusEqualWildcardLabel() throws Exception {
+	@Test public void testPlusEqualWildcardLabel() throws Exception {
 		String action = "$ids.size();"; // must be qualified
 		String expecting = "list_ids.size();";
 
@@ -1890,12 +2175,12 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ids+=. ( ',' ids+=ID {"+action+"})* ;" +
-				"ID : 'a';\n");
+			"a : ids+=. ( ',' ids+=ID {"+action+"})* ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		g.setCodeGenerator(generator);
@@ -1911,22 +2196,22 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testImplicitTokenLabel() throws Exception {
+	@Test public void testImplicitTokenLabel() throws Exception {
 		String action = "$ID; $ID.text; $ID.getText()";
-		String expecting = "ID1; ID1.getText(); ID1.getText()";
+		String expecting = "ID1; (ID1!=null?ID1.getText():null); ID1.getText()";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ID {"+action+"} ;" +
-				"ID : 'a';\n");
+			"a : ID {"+action+"} ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		g.setCodeGenerator(generator);
@@ -1942,16 +2227,16 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testImplicitRuleLabel() throws Exception {
+	@Test public void testImplicitRuleLabel() throws Exception {
 		String action = "$r.start;";
-		String expecting = "((Token)r1.start);";
+		String expecting = "(r1!=null?((Token)r1.start):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : r {###"+action+"!!!} ;" +
-				"r : 'a';\n");
+			"a : r {###"+action+"!!!} ;" +
+			"r : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -1966,16 +2251,16 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception {
+	@Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception {
 		String action = "$r.start;";
-		String expecting = "((Token)x.start);";
+		String expecting = "(x!=null?((Token)x.start):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : x=r {###"+action+"!!!} ;" +
-				"r : 'a';\n");
+			"a : x=r {###"+action+"!!!} ;" +
+			"r : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -1990,17 +2275,17 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception {
+	@Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception {
 		String action = "$r.start;";
-		String expecting = "((Token)x.start);";
+		String expecting = "(x!=null?((Token)x.start):null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"options {output=AST;}\n" +
-				"a : x+=r {###"+action+"!!!} ;" +
-				"r : 'a';\n");
+			"options {output=AST;}\n" +
+			"a : x+=r {###"+action+"!!!} ;" +
+			"r : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2015,23 +2300,23 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception {
+	@Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception {
 		String action = "$ID.text;";
-		String expecting = "x.getText();";
+		String expecting = "(x!=null?x.getText():null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : x=ID {"+action+"} ;" +
-				"ID : 'a';\n");
+			"a : x=ID {"+action+"} ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer();
 
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -2044,23 +2329,23 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception {
+	@Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception {
 		String action = "$ID.text;";
-		String expecting = "x.getText();";
+		String expecting = "(x!=null?x.getText():null);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : x+=ID {"+action+"} ;" +
-				"ID : 'a';\n");
+			"a : x+=ID {"+action+"} ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer();
 
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -2073,7 +2358,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRuleLabelWithoutOutputOption() throws Exception {
+	@Test public void testRuleLabelWithoutOutputOption() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -2096,7 +2381,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testRuleLabelOnTwoDifferentRulesAST() throws Exception {
+	@Test public void testRuleLabelOnTwoDifferentRulesAST() throws Exception {
 		String grammar =
 			"grammar T;\n"+
 			"options {output=AST;}\n"+
@@ -2106,11 +2391,11 @@ public class TestAttributes extends BaseTest {
 			"WS : (' '|'\n') {skip();};\n";
 		String expecting = "[a, b]\na b\n";
 		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "a b", false);
+								  "s", "a b", false);
 		assertEquals(expecting, found);
 	}
 
-	public void testRuleLabelOnTwoDifferentRulesTemplate() throws Exception {
+	@Test public void testRuleLabelOnTwoDifferentRulesTemplate() throws Exception {
 		String grammar =
 			"grammar T;\n"+
 			"options {output=template;}\n"+
@@ -2120,17 +2405,17 @@ public class TestAttributes extends BaseTest {
 			"WS : (' '|'\n') {skip();};\n";
 		String expecting = "[hi, mom]\n";
 		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "a b", false);
+								  "s", "a b", false);
 		assertEquals(expecting, found);
 	}
 
-	public void testMissingArgs() throws Exception {
+	@Test public void testMissingArgs() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : r ;" +
-				"r[int i] : 'a';\n");
+			"a : r ;" +
+			"r[int i] : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2145,13 +2430,13 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testArgsWhenNoneDefined() throws Exception {
+	@Test public void testArgsWhenNoneDefined() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : r[32,34] ;" +
-				"r : 'a';\n");
+			"a : r[32,34] ;" +
+			"r : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2166,7 +2451,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testReturnInitValue() throws Exception {
+	@Test public void testReturnInitValue() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -2185,7 +2470,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testMultipleReturnInitValue() throws Exception {
+	@Test public void testMultipleReturnInitValue() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -2204,7 +2489,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("String s=new String(\"foo\")", parameters.get(2).toString());
 	}
 
-	public void testCStyleReturnInitValue() throws Exception {
+	@Test public void testCStyleReturnInitValue() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -2223,13 +2508,13 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testArgsWithInitValues() throws Exception {
+	@Test public void testArgsWithInitValues() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : r[32,34] ;" +
-				"r[int x, int y=3] : 'a';\n");
+			"a : r[32,34] ;" +
+			"r[int x, int y=3] : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2244,13 +2529,13 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testArgsOnToken() throws Exception {
+	@Test public void testArgsOnToken() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ID[32,34] ;" +
-				"ID : 'a';\n");
+			"a : ID[32,34] ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2265,13 +2550,13 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testArgsOnTokenInLexer() throws Exception {
+	@Test public void testArgsOnTokenInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : 'z' ID[32,34] ;" +
-				"ID : 'a';\n");
+			"R : 'z' ID[32,34] ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2286,21 +2571,21 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testLabelOnRuleRefInLexer() throws Exception {
+	@Test public void testLabelOnRuleRefInLexer() throws Exception {
 		String action = "$i.text";
-		String expecting = "i.getText()";
+		String expecting = "(i!=null?i.getText():null)";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : 'z' i=ID {"+action+"};" +
-				"fragment ID : 'a';\n");
+			"R : 'z' i=ID {"+action+"};" +
+			"fragment ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "R",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2315,21 +2600,21 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRefToRuleRefInLexer() throws Exception {
+	@Test public void testRefToRuleRefInLexer() throws Exception {
 		String action = "$ID.text";
-		String expecting = "ID1.getText()";
+		String expecting = "(ID1!=null?ID1.getText():null)";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : 'z' ID {"+action+"};" +
-				"ID : 'a';\n");
+			"R : 'z' ID {"+action+"};" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "R",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2344,21 +2629,21 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testRefToRuleRefInLexerNoAttribute() throws Exception {
+	@Test public void testRefToRuleRefInLexerNoAttribute() throws Exception {
 		String action = "$ID";
 		String expecting = "ID1";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : 'z' ID {"+action+"};" +
-				"ID : 'a';\n");
+			"R : 'z' ID {"+action+"};" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "R",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2373,12 +2658,12 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testCharLabelInLexer() throws Exception {
+	@Test public void testCharLabelInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : x='z' ;\n");
+			"R : x='z' ;\n");
 
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2388,12 +2673,12 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testCharListLabelInLexer() throws Exception {
+	@Test public void testCharListLabelInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : x+='z' ;\n");
+			"R : x+='z' ;\n");
 
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2402,12 +2687,12 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testWildcardCharLabelInLexer() throws Exception {
+	@Test public void testWildcardCharLabelInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : x=. ;\n");
+			"R : x=. ;\n");
 
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2416,12 +2701,12 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testWildcardCharListLabelInLexer() throws Exception {
+	@Test public void testWildcardCharListLabelInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : x+=. ;\n");
+			"R : x+=. ;\n");
 
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2430,13 +2715,13 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testMissingArgsInLexer() throws Exception {
+	@Test public void testMissingArgsInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"A : R ;" +
-				"R[int i] : 'a';\n");
+			"A : R ;" +
+			"R[int i] : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2452,20 +2737,20 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testLexerRulePropertyRefs() throws Exception {
+	@Test public void testLexerRulePropertyRefs() throws Exception {
 		String action = "$text $type $line $pos $channel $index $start $stop";
-		String expecting = "getText() _type tokenStartLine tokenStartCharPositionInLine channel -1 tokenStartCharIndex (getCharIndex()-1)";
+		String expecting = "getText() _type state.tokenStartLine state.tokenStartCharPositionInLine _channel -1 state.tokenStartCharIndex (getCharIndex()-1)";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : 'r' {"+action+"};\n");
+			"R : 'r' {"+action+"};\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "R",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2480,21 +2765,21 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testLexerLabelRefs() throws Exception {
+	@Test public void testLexerLabelRefs() throws Exception {
 		String action = "$a $b.text $c $d.text";
-		String expecting = "a b.getText() c d.getText()";
+		String expecting = "a (b!=null?b.getText():null) c (d!=null?d.getText():null)";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : a='c' b='hi' c=. d=DUH {"+action+"};\n" +
-				"DUH : 'd' ;\n");
+			"R : a='c' b='hi' c=. d=DUH {"+action+"};\n" +
+			"DUH : 'd' ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "R",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2509,20 +2794,20 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testSettingLexerRulePropertyRefs() throws Exception {
+	@Test public void testSettingLexerRulePropertyRefs() throws Exception {
 		String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index";
-		String expecting = "getText() _type=1 tokenStartLine=1 tokenStartCharPositionInLine=1 channel=1 -1";
+		String expecting = "getText() _type=1 state.tokenStartLine=1 state.tokenStartCharPositionInLine=1 _channel=1 -1";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
-				"R : 'r' {"+action+"};\n");
+			"R : 'r' {"+action+"};\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "R",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2537,21 +2822,23 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testArgsOnTokenInLexerRuleOfCombined() throws Exception {
+	@Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : R;\n" +
-				"R : 'z' ID[32] ;\n" +
-				"ID : 'a';\n");
+			"a : R;\n" +
+			"R : 'z' ID[32] ;\n" +
+			"ID : 'a';\n");
 
 		String lexerGrammarStr = g.getLexerGrammar();
 		StringReader sr = new StringReader(lexerGrammarStr);
 		Grammar lexerGrammar = new Grammar();
 		lexerGrammar.setFileName("<internally-generated-lexer>");
 		lexerGrammar.importTokenVocabulary(g);
-		lexerGrammar.setGrammarContent(sr);
+		lexerGrammar.parseAndBuildAST(sr);
+		lexerGrammar.defineGrammarSymbols();
+		lexerGrammar.checkNameSpaceAndActions();
 		sr.close();
 
 		Tool antlr = newTool();
@@ -2568,21 +2855,23 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception {
+	@Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : R;\n" +
-				"R : 'z' ID ;\n" +
-				"ID[int i] : 'a';\n");
+			"a : R;\n" +
+			"R : 'z' ID ;\n" +
+			"ID[int i] : 'a';\n");
 
 		String lexerGrammarStr = g.getLexerGrammar();
 		StringReader sr = new StringReader(lexerGrammarStr);
 		Grammar lexerGrammar = new Grammar();
 		lexerGrammar.setFileName("<internally-generated-lexer>");
 		lexerGrammar.importTokenVocabulary(g);
-		lexerGrammar.setGrammarContent(sr);
+		lexerGrammar.parseAndBuildAST(sr);
+		lexerGrammar.defineGrammarSymbols();
+		lexerGrammar.checkNameSpaceAndActions();
 		sr.close();
 
 		Tool antlr = newTool();
@@ -2601,7 +2890,7 @@ public class TestAttributes extends BaseTest {
 
 	// T R E E S
 
-	public void testTokenLabelTreeProperty() throws Exception {
+	@Test public void testTokenLabelTreeProperty() throws Exception {
 		String action = "$id.tree;";
 		String expecting = "id_tree;";
 
@@ -2609,14 +2898,14 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : id=ID {"+action+"} ;\n" +
-				"ID : 'a';\n");
+			"a : id=ID {"+action+"} ;\n" +
+			"ID : 'a';\n");
 
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 									  "a",
 									  new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		g.setCodeGenerator(generator);
@@ -2632,7 +2921,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testTokenRefTreeProperty() throws Exception {
+	@Test public void testTokenRefTreeProperty() throws Exception {
 		String action = "$ID.tree;";
 		String expecting = "ID1_tree;";
 
@@ -2640,15 +2929,15 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ID {"+action+"} ;" +
-				"ID : 'a';\n");
+			"a : ID {"+action+"} ;" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer();
 
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,"a",
+		ActionTranslator translator = new ActionTranslator(generator,"a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
 			translator.translate();
@@ -2659,7 +2948,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testAmbiguousTokenRef() throws Exception {
+	@Test public void testAmbiguousTokenRef() throws Exception {
 		String action = "$ID;";
 		String expecting = "";
 
@@ -2667,8 +2956,8 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ID ID {"+action+"};" +
-				"ID : 'a';\n");
+			"a : ID ID {"+action+"};" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2682,7 +2971,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testAmbiguousTokenRefWithProp() throws Exception {
+	@Test public void testAmbiguousTokenRefWithProp() throws Exception {
 		String action = "$ID.text;";
 		String expecting = "";
 
@@ -2690,8 +2979,8 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar t;\n"+
-				"a : ID ID {"+action+"};" +
-				"ID : 'a';\n");
+			"a : ID ID {"+action+"};" +
+			"ID : 'a';\n");
 		Tool antlr = newTool();
 		antlr.setOutputDirectory(null); // write to /dev/null
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
@@ -2705,7 +2994,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testRuleRefWithDynamicScope() throws Exception {
+	@Test public void testRuleRefWithDynamicScope() throws Exception {
 		String action = "$field::x = $field.st;";
 		String expecting = "((field_scope)field_stack.peek()).x = retval.st;";
 
@@ -2713,15 +3002,15 @@ public class TestAttributes extends BaseTest {
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 			"grammar a;\n" +
-				"field\n" +
-				"scope { StringTemplate x; }\n" +
-				"    :   'y' {"+action+"}\n" +
-				"    ;\n");
+			"field\n" +
+			"scope { StringTemplate x; }\n" +
+			"    :   'y' {"+action+"}\n" +
+			"    ;\n");
 		Tool antlr = newTool();
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "field",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2735,7 +3024,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testAssignToOwnRulenameAttr() throws Exception {
+	@Test public void testAssignToOwnRulenameAttr() throws Exception {
 		String action = "$rule.tree = null;";
 		String expecting = "retval.tree = null;";
 		ErrorQueue equeue = new ErrorQueue();
@@ -2749,7 +3038,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2763,7 +3052,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testAssignToOwnParamAttr() throws Exception {
+	@Test public void testAssignToOwnParamAttr() throws Exception {
 		String action = "$rule.i = 42; $i = 23;";
 		String expecting = "i = 42; i = 23;";
 		ErrorQueue equeue = new ErrorQueue();
@@ -2777,7 +3066,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2791,7 +3080,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	public void testIllegalAssignToOwnRulenameAttr() throws Exception {
+	@Test public void testIllegalAssignToOwnRulenameAttr() throws Exception {
 		String action = "$rule.stop = 0;";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -2804,7 +3093,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2818,7 +3107,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testIllegalAssignToLocalAttr() throws Exception {
+	@Test public void testIllegalAssignToLocalAttr() throws Exception {
 		String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;";
 		String expecting = "retval.tree = null; retval.st = null;   ";
 		ErrorQueue equeue = new ErrorQueue();
@@ -2832,7 +3121,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2858,7 +3147,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testIllegalAssignRuleRefAttr() throws Exception {
+	@Test public void testIllegalAssignRuleRefAttr() throws Exception {
 		String action = "$other.tree = null;";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -2874,7 +3163,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2888,7 +3177,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testIllegalAssignTokenRefAttr() throws Exception {
+	@Test public void testIllegalAssignTokenRefAttr() throws Exception {
 		String action = "$ID.text = \"test\";";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -2903,7 +3192,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2917,7 +3206,7 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testAssignToTreeNodeAttribute() throws Exception {
+	@Test public void testAssignToTreeNodeAttribute() throws Exception {
 		String action = "$tree.scope = localScope;";
 		String expecting = "(()retval.tree).scope = localScope;";
 		ErrorQueue equeue = new ErrorQueue();
@@ -2938,7 +3227,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "rule",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2951,9 +3240,9 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testDoNotTranslateAttributeCompare() throws Exception {
+	@Test public void testDoNotTranslateAttributeCompare() throws Exception {
 		String action = "$a.line == $b.line";
-		String expecting = "a.getLine() == b.getLine()";
+		String expecting = "(a!=null?a.getLine():0) == (b!=null?b.getLine():0)";
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -2967,7 +3256,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer();
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "RULE",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -2980,7 +3269,7 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testDoNotTranslateScopeAttributeCompare() throws Exception {
+	@Test public void testDoNotTranslateScopeAttributeCompare() throws Exception {
 		String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
 		String expecting = "if (((rule_scope)rule_stack.peek()).foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
 		ErrorQueue equeue = new ErrorQueue();
@@ -3002,7 +3291,7 @@ public class TestAttributes extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer();
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "twoIDs",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -3026,9 +3315,9 @@ public class TestAttributes extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTreeRuleStopAttributeIsInvalid() throws Exception {
+	@Test public void testTreeRuleStopAttributeIsInvalid() throws Exception {
 		String action = "$r.x; $r.start; $r.stop";
-		String expecting = "r.x; ((CommonTree)r.start); $r.stop";
+		String expecting = "(r!=null?r.x:0); (r!=null?((CommonTree)r.start):null); $r.stop";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -3059,11 +3348,11 @@ public class TestAttributes extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testRefToTextAttributeForCurrentTreeRule() throws Exception {
+	@Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception {
 		String action = "$text";
 		String expecting = "input.getTokenStream().toString(\n" +
-			"              input.getTreeAdaptor().getTokenStartIndex(retval.start),\n" +
-			"              input.getTreeAdaptor().getTokenStopIndex(retval.start))";
+						   "              input.getTreeAdaptor().getTokenStartIndex(retval.start),\n" +
+						   "              input.getTreeAdaptor().getTokenStopIndex(retval.start))";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -3086,6 +3375,36 @@ public class TestAttributes extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
+	@Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception {
+		String action = "int x = $b::n;";
+		String expecting = "int x = ((b_scope)b_stack.peek()).n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"s : b ;\n"+
+			"b\n" +
+			"scope {\n" +
+			"  int n;\n" +
+			"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
+			"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		StringTemplateGroup templates =
+			new StringTemplateGroup(".", AngleBracketTemplateLexer.class);
+		StringTemplate actionST = new StringTemplate(templates, rawTranslation);
+		String found = actionST.toString();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
 
 	// S U P P O R T
 
@@ -3106,7 +3425,7 @@ public class TestAttributes extends BaseTest {
 			}
 		}
 		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size() > 0);
-		assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
+		assertNotNull("couldn't find expected error: "+expectedMessage.msgID+" in "+equeue, foundMsg);
 		assertTrue("error is not a GrammarSemanticsMessage",
 				   foundMsg instanceof GrammarSemanticsMessage);
 		assertEquals(expectedMessage.arg, foundMsg.arg);
diff --git a/src/org/antlr/test/TestAutoAST.java b/tool/src/test/java/org/antlr/test/TestAutoAST.java
similarity index 55%
rename from src/org/antlr/test/TestAutoAST.java
rename to tool/src/test/java/org/antlr/test/TestAutoAST.java
index 46dbcc3..fc39ce1 100644
--- a/src/org/antlr/test/TestAutoAST.java
+++ b/tool/src/test/java/org/antlr/test/TestAutoAST.java
@@ -27,10 +27,14 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 package org.antlr.test;
 
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestAutoAST extends BaseTest {
 	protected boolean debug = false;
 
-	public void testTokenList() throws Exception {
+	@Test public void testTokenList() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"options {output=AST;}\n" +
@@ -43,7 +47,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("abc 34\n", found);
 	}
 
-	public void testTokenListInSingleAltBlock() throws Exception {
+	@Test public void testTokenListInSingleAltBlock() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"options {output=AST;}\n" +
@@ -56,7 +60,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("abc 34\n", found);
 	}
 
-	public void testSimpleRootAtOuterLevel() throws Exception {
+	@Test public void testSimpleRootAtOuterLevel() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"options {output=AST;}\n" +
@@ -69,7 +73,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(abc 34)\n", found);
 	}
 
-	public void testSimpleRootAtOuterLevelReverse() throws Exception {
+	@Test public void testSimpleRootAtOuterLevelReverse() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -82,7 +86,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(abc 34)\n", found);
 	}
 
-	public void testBang() throws Exception {
+	@Test public void testBang() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -95,7 +99,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("abc 4532\n", found);
 	}
 
-	public void testOptionalThenRoot() throws Exception {
+	@Test public void testOptionalThenRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -108,7 +112,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(b a 1)\n", found);
 	}
 
-	public void testLabeledStringRoot() throws Exception {
+	@Test public void testLabeledStringRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -121,7 +125,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(void foo ;)\n", found);
 	}
 
-	public void testWildcard() throws Exception {
+	@Test public void testWildcard() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -134,7 +138,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(void foo ;)\n", found);
 	}
 
-	public void testWildcardRoot() throws Exception {
+	@Test public void testWildcardRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -147,7 +151,46 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(foo void ;)\n", found);
 	}
 
-	public void testRootRoot() throws Exception {
+	@Test public void testWildcardRootWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void' x=.^ ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(foo void ;)\n", found);
+	}
+
+    @Test public void testWildcardRootWithListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : v='void' x=.^ ';' ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                                  "a", "void foo;", debug);
+        assertEquals("(foo void ;)\n", found);
+    }
+
+    @Test public void testWildcardBangWithListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : v='void' x=.! ';' ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                                  "a", "void foo;", debug);
+        assertEquals("void ;\n", found);
+    }
+
+	@Test public void testRootRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -160,7 +203,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(34 a c)\n", found);
 	}
 
-	public void testRootRoot2() throws Exception {
+	@Test public void testRootRoot2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -173,7 +216,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(c (34 a))\n", found);
 	}
 
-	public void testRootThenRootInLoop() throws Exception {
+	@Test public void testRootThenRootInLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -186,7 +229,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(* (* (a 34) b 9) c)\n", found);
 	}
 
-	public void testNestedSubrule() throws Exception {
+	@Test public void testNestedSubrule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -199,7 +242,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("void a b ;\n", found);
 	}
 
-	public void testInvokeRule() throws Exception {
+	@Test public void testInvokeRule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -213,7 +256,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("int a\n", found);
 	}
 
-	public void testInvokeRuleAsRoot() throws Exception {
+	@Test public void testInvokeRuleAsRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -227,7 +270,35 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(int a)\n", found);
 	}
 
-	public void testRuleRootInLoop() throws Exception {
+	@Test public void testInvokeRuleAsRootWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : x=type^ ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("(int a)\n", found);
+	}
+
+	@Test public void testInvokeRuleAsRootWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : x+=type^ ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("(int a)\n", found);
+	}
+
+	@Test public void testRuleRootInLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -240,7 +311,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(+ (+ (+ a b) c) d)\n", found);
 	}
 
-	public void testRuleInvocationRuleRootInLoop() throws Exception {
+	@Test public void testRuleInvocationRuleRootInLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -254,7 +325,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(- (+ (+ a b) c) d)\n", found);
 	}
 
-	public void testTailRecursion() throws Exception {
+	@Test public void testTailRecursion() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -269,7 +340,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(exp 3 (exp 4 5))\n", found);
 	}
 
-	public void testSet() throws Exception {
+	@Test public void testSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -282,7 +353,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testSetRoot() throws Exception {
+	@Test public void testSetRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -295,7 +366,24 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(+ abc)\n", found);
 	}
 
-	public void testSetAsRuleRootInLoop() throws Exception {
+	@Ignore
+    // TODO: FAILS until I rebuild the antlr.g in v3
+    //
+    public void testSetRootWithLabel() throws Exception {
+		
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=('+' | '-')^ ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "+abc", debug);
+		assertEquals("(+ abc)\n", found);
+	}
+
+	@Test public void testSetAsRuleRootInLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -308,7 +396,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(- (+ a b) c)\n", found);
 	}
 
-	public void testNotSet() throws Exception {
+	@Test public void testNotSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -321,7 +409,59 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("34 + 2\n", found);
 	}
 
-	public void testNotSetRoot() throws Exception {
+	@Test public void testNotSetWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=~ID '+' INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34+2", debug);
+		assertEquals("34 + 2\n", found);
+	}
+
+	@Test public void testNotSetWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=~ID '+' INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34+2", debug);
+		assertEquals("34 + 2\n", found);
+	}
+
+	@Test public void testNotSetRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~'+'^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 55", debug);
+		assertEquals("(34 55)\n", found);
+	}
+
+	@Test public void testNotSetRootWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~'+'^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 55", debug);
+		assertEquals("(34 55)\n", found);
+	}
+
+	@Test public void testNotSetRootWithListLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -334,7 +474,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(34 55)\n", found);
 	}
 
-	public void testNotSetRuleRootInLoop() throws Exception {
+	@Test public void testNotSetRuleRootInLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -348,7 +488,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("(+ (+ 3 4) 5)\n", found);
 	}
 
-	public void testTokenLabelReuse() throws Exception {
+	@Test public void testTokenLabelReuse() throws Exception {
 		// check for compilation problem due to multiple defines
 		String grammar =
 			"grammar T;\n" +
@@ -362,7 +502,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("2nd id=b;a b\n", found);
 	}
 
-	public void testTokenLabelReuse2() throws Exception {
+	@Test public void testTokenLabelReuse2() throws Exception {
 		// check for compilation problem due to multiple defines
 		String grammar =
 			"grammar T;\n" +
@@ -376,7 +516,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("2nd id=b;(b a)\n", found);
 	}
 
-	public void testTokenListLabelReuse() throws Exception {
+	@Test public void testTokenListLabelReuse() throws Exception {
 		// check for compilation problem due to multiple defines
 		// make sure ids has both ID tokens
 		String grammar =
@@ -392,7 +532,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTokenListLabelReuse2() throws Exception {
+	@Test public void testTokenListLabelReuse2() throws Exception {
 		// check for compilation problem due to multiple defines
 		// make sure ids has both ID tokens
 		String grammar =
@@ -408,7 +548,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTokenListLabelRuleRoot() throws Exception {
+	@Test public void testTokenListLabelRuleRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -421,7 +561,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("a\n", found);
 	}
 
-	public void testTokenListLabelBang() throws Exception {
+	@Test public void testTokenListLabelBang() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -431,16 +571,16 @@ public class TestAutoAST extends BaseTest {
 			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
 		String found = execParser("T.g", grammar, "TParser", "TLexer",
 								  "a", "a", debug);
-		assertEquals("nil\n", found);
+		assertEquals("", found);
 	}
 
-	public void testRuleListLabel() throws Exception {
+	@Test public void testRuleListLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
 			"a : x+=b x+=b {" +
-				"Tree t=(Tree)$x.get(1);" +
-				"System.out.print(\"2nd x=\"+t.toStringTree()+';');} ;\n" +
+			"Tree t=(Tree)$x.get(1);" +
+			"System.out.print(\"2nd x=\"+t.toStringTree()+';');} ;\n" +
 			"b : ID;\n" +
 			"ID : 'a'..'z'+ ;\n" +
 			"INT : '0'..'9'+;\n" +
@@ -450,7 +590,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("2nd x=b;a b\n", found);
 	}
 
-	public void testRuleListLabelRuleRoot() throws Exception {
+	@Test public void testRuleListLabelRuleRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -465,7 +605,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("x=(b a);(b a)\n", found);
 	}
 
-	public void testRuleListLabelBang() throws Exception {
+	@Test public void testRuleListLabelBang() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -480,12 +620,12 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("1st x=a;b\n", found);
 	}
 
-	public void testComplicatedMelange() throws Exception {
+	@Test public void testComplicatedMelange() throws Exception {
 		// check for compilation problem
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
-			"a : A b=B b=B c+=C c+=C D {$D.text;} ;\n" +
+			"a : A b=B b=B c+=C c+=C D {String s = $D.text;} ;\n" +
 			"A : 'a' ;\n" +
 			"B : 'b' ;\n" +
 			"C : 'c' ;\n" +
@@ -496,7 +636,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("a b b c c d\n", found);
 	}
 
-	public void testReturnValueWithAST() throws Exception {
+	@Test public void testReturnValueWithAST() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"options {output=AST;}\n" +
@@ -510,7 +650,7 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("34\nabc 34\n", found);
 	}
 
-	public void testSetLoop() throws Exception {
+	@Test public void testSetLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options { output=AST; }\n" +
@@ -523,6 +663,148 @@ public class TestAutoAST extends BaseTest {
 		assertEquals("abc 34 d\n", found);
 	}
 
+	@Test public void testExtraTokenInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"decl : type^ ID '='! INT ';'! ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int 34 x=1;", debug);
+		assertEquals("line 1:4 extraneous input '34' expecting ID\n", this.stderrDuringParse);
+		assertEquals("(int x 1)\n", found); // tree gets correct x and 1 tokens
+	}
+
+	@Test public void testMissingIDInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type^ ID '='! INT ';'! ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int =1;", debug);
+		assertEquals("line 1:4 missing ID at '='\n", this.stderrDuringParse);
+		assertEquals("(int <missing ID> 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingSetInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type^ ID '='! INT ';'! ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "x=1;", debug);
+		assertEquals("line 1:0 mismatched input 'x' expecting set null\n", this.stderrDuringParse);
+		assertEquals("(<error: x> x 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" + // follow is EOF
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc", debug);
+		assertEquals("line 0:-1 missing INT at '<EOF>'\n", this.stderrDuringParse);
+		assertEquals("abc <missing INT>\n", found);
+	}
+
+	@Test public void testMissingTokenGivesErrorNodeInInvokedRule() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b ;\n" +
+			"b : ID INT ;\n" + // follow should see EOF
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc", debug);
+		assertEquals("line 0:-1 mismatched input '<EOF>' expecting INT\n", this.stderrDuringParse);
+		assertEquals("<mismatched token: [@-1,0:0='<no text>',<-1>,0:-1], resync=abc>\n", found);
+	}
+
+	@Test public void testExtraTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc ick 34", debug);
+		assertEquals("line 1:4 extraneous input 'ick' expecting INT\n", this.stderrDuringParse);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode2() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		// finds an error at the first token, 34, and re-syncs.
+		// re-synchronizing does not consume a token because 34 follows
+		// ref to rule b (start of c). It then matches 34 in c.
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testNoViableAltGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b | c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"S : '*' ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "*", debug);
+		assertEquals("line 1:0 no viable alternative at input '*'\n", this.stderrDuringParse);
+		assertEquals("<unexpected: [@0,0:0='*',<6>,1:0], resync=*>\n", found);
+	}
+
 
 	// S U P P O R T
 
diff --git a/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java b/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java
new file mode 100644
index 0000000..61ab1ca
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java
@@ -0,0 +1,71 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.tree.*;
+import org.antlr.runtime.CommonToken;
+
+import org.junit.Test;
+
+public class TestBufferedTreeNodeStream extends TestTreeNodeStream {
+    // inherits tests; these methods make it use a new buffer
+
+	public TreeNodeStream newStream(Object t) {
+		return new BufferedTreeNodeStream(t);
+	}
+
+    public String toTokenTypeString(TreeNodeStream stream) {
+        return ((BufferedTreeNodeStream)stream).toTokenTypeString();
+    }
+
+    @Test public void testSeek() throws Exception {
+        // ^(101 ^(102 103 ^(106 107) ) 104 105)
+        // stream has 7 real + 6 nav nodes
+        // Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        Tree r0 = new CommonTree(new CommonToken(101));
+        Tree r1 = new CommonTree(new CommonToken(102));
+        r0.addChild(r1);
+        r1.addChild(new CommonTree(new CommonToken(103)));
+        Tree r2 = new CommonTree(new CommonToken(106));
+        r2.addChild(new CommonTree(new CommonToken(107)));
+        r1.addChild(r2);
+        r0.addChild(new CommonTree(new CommonToken(104)));
+        r0.addChild(new CommonTree(new CommonToken(105)));
+
+        TreeNodeStream stream = newStream(r0);
+        stream.consume(); // consume 101
+        stream.consume(); // consume DN
+        stream.consume(); // consume 102
+        stream.seek(7);   // seek to 107
+        assertEquals(107, ((Tree)stream.LT(1)).getType());
+        stream.consume(); // consume 107
+        stream.consume(); // consume UP
+        stream.consume(); // consume UP
+        assertEquals(104, ((Tree)stream.LT(1)).getType());
+    }    
+}
diff --git a/src/org/antlr/test/TestCharDFAConversion.java b/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
similarity index 73%
rename from src/org/antlr/test/TestCharDFAConversion.java
rename to tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
index 2247096..4260985 100644
--- a/src/org/antlr/test/TestCharDFAConversion.java
+++ b/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
@@ -32,6 +32,11 @@ import org.antlr.analysis.DFAOptimizer;
 import org.antlr.codegen.CodeGenerator;
 import org.antlr.tool.*;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 import java.util.List;
 
 public class TestCharDFAConversion extends BaseTest {
@@ -42,20 +47,20 @@ public class TestCharDFAConversion extends BaseTest {
 
 	// R A N G E S  &  S E T S
 
-	public void testSimpleRangeVersusChar() throws Exception {
+	@Test public void testSimpleRangeVersusChar() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : 'a'..'z' '@' | 'k' '$' ;");
 		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'k'->.s1\n" +
-			".s0-{'a'..'j', 'l'..'z'}->:s3=>1\n" +
-			".s1-'$'->:s2=>2\n" +
-			".s1-'@'->:s3=>1\n";
+			".s0-{'a'..'j', 'l'..'z'}->:s2=>1\n" +
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testRangeWithDisjointSet() throws Exception {
+	@Test public void testRangeWithDisjointSet() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : 'a'..'z' '@'\n" +
@@ -64,49 +69,48 @@ public class TestCharDFAConversion extends BaseTest {
 		g.createLookaheadDFAs();
 		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'}
 		String expecting =
-			".s0-'9'->:s2=>2\n" +
-			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>1\n" +
+			".s0-'9'->:s3=>2\n" +
+			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
 			".s0-{'k', 'p'}->.s1\n" +
-			".s1-'$'->:s2=>2\n" +
-			".s1-'@'->:s3=>1\n";
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testDisjointSetCollidingWithTwoRanges() throws Exception {
+	@Test public void testDisjointSetCollidingWithTwoRanges() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : ('a'..'z'|'0'..'9') '@'\n" +
 			"  | ('k'|'9'|'p') '$'\n" +
 			"  ;\n");
-		g.createLookaheadDFAs();
+		g.createLookaheadDFAs(false);
 		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
 		// into 0..8
 		String expecting =
-			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>1\n" +
+			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
 			".s0-{'9', 'k', 'p'}->.s1\n" +
-			".s1-'$'->:s2=>2\n" +
-			".s1-'@'->:s3=>1\n";
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testDisjointSetCollidingWithTwoRangesCharsFirst() throws Exception {
+	@Test public void testDisjointSetCollidingWithTwoRangesCharsFirst() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : ('k'|'9'|'p') '$'\n" +
 			"  | ('a'..'z'|'0'..'9') '@'\n" +
 			"  ;\n");
-		g.createLookaheadDFAs();
 		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
 		// into 0..8
 		String expecting =
-			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>2\n" +
+			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>2\n" +
 			".s0-{'9', 'k', 'p'}->.s1\n" +
-			".s1-'$'->:s3=>1\n" +
-			".s1-'@'->:s2=>2\n";
+			".s1-'$'->:s2=>1\n" +
+			".s1-'@'->:s3=>2\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testDisjointSetCollidingWithTwoRangesAsSeparateAlts() throws Exception {
+	@Test public void testDisjointSetCollidingWithTwoRangesAsSeparateAlts() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : 'a'..'z' '@'\n" +
@@ -115,7 +119,6 @@ public class TestCharDFAConversion extends BaseTest {
 			"  | 'p' '$'\n" +
 			"  | '0'..'9' '@'\n" +
 			"  ;\n");
-		g.createLookaheadDFAs();
 		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
 		// into 0..8
 		String expecting =
@@ -123,17 +126,17 @@ public class TestCharDFAConversion extends BaseTest {
 			".s0-'9'->.s6\n" +
 			".s0-'k'->.s1\n" +
 			".s0-'p'->.s4\n" +
-			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>1\n" +
-			".s1-'$'->:s2=>2\n" +
-			".s1-'@'->:s3=>1\n" +
+			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n" +
 			".s4-'$'->:s5=>4\n" +
-			".s4-'@'->:s3=>1\n" +
+			".s4-'@'->:s2=>1\n" +
 			".s6-'$'->:s7=>3\n" +
 			".s6-'@'->:s8=>5\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testKeywordVersusID() throws Exception {
+	@Test public void testKeywordVersusID() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"IF : 'if' ;\n" + // choose this over ID
@@ -152,7 +155,7 @@ public class TestCharDFAConversion extends BaseTest {
 		checkDecision(g, 2, expecting, null);
 	}
 
-	public void testIdenticalRules() throws Exception {
+	@Test public void testIdenticalRules() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : 'a' ;\n" +
@@ -168,7 +171,7 @@ public class TestCharDFAConversion extends BaseTest {
 
 		assertEquals("unexpected number of expected problems",
 				    1, equeue.size());
-		Message msg = (Message)equeue.warnings.get(0);
+		Message msg = (Message)equeue.errors.get(0);
 		assertTrue("warning must be an unreachable alt",
 				    msg instanceof GrammarUnreachableAltsMessage);
 		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
@@ -176,7 +179,7 @@ public class TestCharDFAConversion extends BaseTest {
 
 	}
 
-	public void testAdjacentNotCharLoops() throws Exception {
+	@Test public void testAdjacentNotCharLoops() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : (~'r')+ ;\n" +
@@ -184,14 +187,14 @@ public class TestCharDFAConversion extends BaseTest {
 		String expecting =
 			".s0-'r'->:s3=>2\n" +
 			".s0-'s'->:s2=>1\n" +
-			".s0-{'\\u0000'..'q', 't'..'\\uFFFE'}->.s1\n" +
+			".s0-{'\\u0000'..'q', 't'..'\\uFFFF'}->.s1\n" +
 			".s1-'r'->:s3=>2\n" +
 			".s1-<EOT>->:s2=>1\n" +
-			".s1-{'\\u0000'..'q', 't'..'\\uFFFE'}->.s1\n";
+			".s1-{'\\u0000'..'q', 't'..'\\uFFFF'}->.s1\n";
 		checkDecision(g, 3, expecting, null);
 	}
 
-	public void testNonAdjacentNotCharLoops() throws Exception {
+	@Test public void testNonAdjacentNotCharLoops() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : (~'r')+ ;\n" +
@@ -199,20 +202,20 @@ public class TestCharDFAConversion extends BaseTest {
 		String expecting =
 			".s0-'r'->:s3=>2\n" +
 			".s0-'t'->:s2=>1\n" +
-			".s0-{'\\u0000'..'q', 's', 'u'..'\\uFFFE'}->.s1\n" +
+			".s0-{'\\u0000'..'q', 's', 'u'..'\\uFFFF'}->.s1\n" +
 			".s1-'r'->:s3=>2\n" +
 			".s1-<EOT>->:s2=>1\n" +
-			".s1-{'\\u0000'..'q', 's', 'u'..'\\uFFFE'}->.s1\n";
+			".s1-{'\\u0000'..'q', 's', 'u'..'\\uFFFF'}->.s1\n";
 		checkDecision(g, 3, expecting, null);
 	}
 
-	public void testLoopsWithOptimizedOutExitBranches() throws Exception {
+	@Test public void testLoopsWithOptimizedOutExitBranches() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : 'x'* ~'x'+ ;\n");
 		String expecting =
-			".s0-'x'->:s2=>1\n" +
-			".s0-{'\\u0000'..'w', 'y'..'\\uFFFE'}->:s1=>2\n";
+			".s0-'x'->:s1=>1\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s2=>2\n";
 		checkDecision(g, 1, expecting, null);
 
 		// The optimizer yanks out all exit branches from EBNF blocks
@@ -229,39 +232,39 @@ public class TestCharDFAConversion extends BaseTest {
 
 	// N O N G R E E D Y
 
-	public void testNonGreedy() throws Exception {
+	@Test public void testNonGreedy() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"CMT : '/*' ( options {greedy=false;} : . )* '*/' ;");
 		String expecting =
 			".s0-'*'->.s1\n" +
-			".s0-{'\\u0000'..')', '+'..'\\uFFFE'}->:s3=>1\n" +
+			".s0-{'\\u0000'..')', '+'..'\\uFFFF'}->:s3=>1\n" +
 			".s1-'/'->:s2=>2\n" +
-			".s1-{'\\u0000'..'.', '0'..'\\uFFFE'}->:s3=>1\n";
+			".s1-{'\\u0000'..'.', '0'..'\\uFFFF'}->:s3=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonGreedyWildcardStar() throws Exception {
+	@Test public void testNonGreedyWildcardStar() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"SLCMT : '//' ( options {greedy=false;} : . )* '\n' ;");
 		String expecting =
 			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonGreedyByDefaultWildcardStar() throws Exception {
+	@Test public void testNonGreedyByDefaultWildcardStar() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"SLCMT : '//' .* '\n' ;");
 		String expecting =
 			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonGreedyWildcardPlus() throws Exception {
+	@Test public void testNonGreedyWildcardPlus() throws Exception {
 		// same DFA as nongreedy .* but code gen checks number of
 		// iterations at runtime
 		Grammar g = new Grammar(
@@ -269,31 +272,31 @@ public class TestCharDFAConversion extends BaseTest {
 			"SLCMT : '//' ( options {greedy=false;} : . )+ '\n' ;");
 		String expecting =
 			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonGreedyByDefaultWildcardPlus() throws Exception {
+	@Test public void testNonGreedyByDefaultWildcardPlus() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"SLCMT : '//' .+ '\n' ;");
 		String expecting =
 			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonGreedyByDefaultWildcardPlusWithParens() throws Exception {
+	@Test public void testNonGreedyByDefaultWildcardPlusWithParens() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"SLCMT : '//' (.)+ '\n' ;");
 		String expecting =
 			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFE'}->:s2=>1\n";
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonWildcardNonGreedy() throws Exception {
+	@Test public void testNonWildcardNonGreedy() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"DUH : (options {greedy=false;}:'x'|'y')* 'xy' ;");
@@ -305,7 +308,7 @@ public class TestCharDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonWildcardEOTMakesItWorkWithoutNonGreedyOption() throws Exception {
+	@Test public void testNonWildcardEOTMakesItWorkWithoutNonGreedyOption() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"DUH : ('x'|'y')* 'xy' ;");
@@ -319,7 +322,7 @@ public class TestCharDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testAltConflictsWithLoopThenExit() throws Exception {
+	@Test public void testAltConflictsWithLoopThenExit() throws Exception {
 		// \" predicts alt 1, but wildcard then " can predict exit also
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
@@ -328,13 +331,13 @@ public class TestCharDFAConversion extends BaseTest {
 		String expecting =
 			".s0-'\"'->:s1=>3\n" +
 				".s0-'\\\\'->.s2\n" +
-				".s0-{'\\u0000'..'!', '#'..'[', ']'..'\\uFFFE'}->:s4=>2\n" +
+				".s0-{'\\u0000'..'!', '#'..'[', ']'..'\\uFFFF'}->:s4=>2\n" +
 				".s2-'\"'->:s3=>1\n" +
-				".s2-{'\\u0000'..'!', '#'..'\\uFFFE'}->:s4=>2\n";
+				".s2-{'\\u0000'..'!', '#'..'\\uFFFF'}->:s4=>2\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNonGreedyLoopThatNeverLoops() throws Exception {
+	@Test public void testNonGreedyLoopThatNeverLoops() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"DUH : (options {greedy=false;}:'x')+ ;"); // loop never matched
@@ -348,14 +351,14 @@ public class TestCharDFAConversion extends BaseTest {
 
 		assertEquals("unexpected number of expected problems",
 				    1, equeue.size());
-		Message msg = (Message)equeue.warnings.get(0);
+		Message msg = (Message)equeue.errors.get(0);
 		assertTrue("warning must be an unreachable alt",
 				   msg instanceof GrammarUnreachableAltsMessage);
 		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
 		assertEquals("[1]", u.alts.toString());
 	}
 
-	public void testRecursive() throws Exception {
+	@Test public void testRecursive() throws Exception {
 		// this is cool because the 3rd alt includes !(all other possibilities)
 		Grammar g = new Grammar(
 			"lexer grammar duh;\n" +
@@ -371,14 +374,14 @@ public class TestCharDFAConversion extends BaseTest {
 			"ESC     :       '\\\\' . ;");
 		g.createLookaheadDFAs();
 		String expecting =
-			".s0-'\\\\'->:s3=>2\n" +
-			".s0-'{'->:s2=>1\n" +
-			".s0-'}'->:s1=>4\n" +
-			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFE'}->:s4=>3\n";
+			".s0-'\\\\'->:s2=>2\n" +
+			".s0-'{'->:s1=>1\n" +
+			".s0-'}'->:s4=>4\n" +
+			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s3=>3\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testRecursive2() throws Exception {
+	@Test public void testRecursive2() throws Exception {
 		// this is also cool because it resolves \\ to be ESC alt; it's just
 		// less efficient of a DFA
 		Grammar g = new Grammar(
@@ -398,17 +401,17 @@ public class TestCharDFAConversion extends BaseTest {
 			".s0-'\\\\'->.s3\n" +
 			".s0-'{'->:s2=>1\n" +
 			".s0-'}'->:s1=>4\n" +
-			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFE'}->:s5=>3\n" +
+			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s5=>3\n" +
 			".s3-'\\\\'->:s8=>2\n" +
 			".s3-'{'->:s7=>2\n" +
 			".s3-'}'->.s4\n" +
-			".s3-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFE'}->:s6=>2\n" +
-			".s4-'\\u0000'..'\\uFFFE'->:s6=>2\n" +
+			".s3-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s6=>2\n" +
+			".s4-'\\u0000'..'\\uFFFF'->:s6=>2\n" +
 			".s4-<EOT>->:s5=>3\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNotFragmentInLexer() throws Exception {
+	@Test public void testNotFragmentInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar T;\n"+
 			"A : 'a' | ~B {;} ;\n" +
@@ -416,11 +419,11 @@ public class TestCharDFAConversion extends BaseTest {
 		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'a'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'b'..'\\uFFFE'}->:s2=>2\n";
+			".s0-{'\\u0000'..'`', 'b'..'\\uFFFF'}->:s2=>2\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNotSetFragmentInLexer() throws Exception {
+	@Test public void testNotSetFragmentInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar T;\n"+
 			"A : B | ~B {;} ;\n" +
@@ -428,11 +431,11 @@ public class TestCharDFAConversion extends BaseTest {
 		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'a'..'b'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'c'..'\\uFFFE'}->:s2=>2\n";
+			".s0-{'\\u0000'..'`', 'c'..'\\uFFFF'}->:s2=>2\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNotTokenInLexer() throws Exception {
+	@Test public void testNotTokenInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar T;\n"+
 			"A : 'x' ('a' | ~B {;}) ;\n" +
@@ -440,52 +443,49 @@ public class TestCharDFAConversion extends BaseTest {
 		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'a'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'b'..'\\uFFFE'}->:s2=>2\n";
+			".s0-{'\\u0000'..'`', 'b'..'\\uFFFF'}->:s2=>2\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNotComplicatedSetRuleInLexer() throws Exception {
+	@Test public void testNotComplicatedSetRuleInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar T;\n"+
 			"A : B | ~B {;} ;\n" +
 			"fragment B : 'a'|'b'|'c'..'e'|C ;\n" +
 			"fragment C : 'f' ;\n"); // has to seen from B to C
-		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'a'..'f'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'g'..'\\uFFFE'}->:s2=>2\n";
+			".s0-{'\\u0000'..'`', 'g'..'\\uFFFF'}->:s2=>2\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testNotSetWithRuleInLexer() throws Exception {
+	@Test public void testNotSetWithRuleInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar T;\n"+
 			"T : ~('a' | B) | 'a';\n" +
 			"fragment\n" +
 			"B : 'b' ;\n" +
 			"C : ~'x'{;} ;"); // force Tokens to not collapse T|C
-		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'b'->:s3=>2\n" +
 			".s0-'x'->:s2=>1\n" +
-			".s0-{'\\u0000'..'a', 'c'..'w', 'y'..'\\uFFFE'}->.s1\n" +
+			".s0-{'\\u0000'..'a', 'c'..'w', 'y'..'\\uFFFF'}->.s1\n" +
 			".s1-<EOT>->:s2=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testSetCallsRuleWithNot() throws Exception {
+	@Test public void testSetCallsRuleWithNot() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar A;\n" +
 			"T : ~'x' ;\n" +
 			"S : 'x' (T | 'x') ;\n");
-		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'x'->:s2=>2\n" +
-			".s0-{'\\u0000'..'w', 'y'..'\\uFFFE'}->:s1=>1\n";
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s1=>1\n";
 		checkDecision(g, 1, expecting, null);
 	}
 
-	public void testSynPredInLexer() throws Exception {
+	@Test public void testSynPredInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar T;\n"+
 			"LT:  '<' ' '*\n" +
@@ -493,7 +493,6 @@ public class TestCharDFAConversion extends BaseTest {
 			"  ;\n" +
 			"IDENT:    'a'+;\n");
 		// basically, Tokens rule should not do set compression test
-		g.createLookaheadDFAs();
 		String expecting =
 			".s0-'<'->:s1=>1\n" +
 			".s0-'a'->:s2=>2\n";
@@ -506,7 +505,6 @@ public class TestCharDFAConversion extends BaseTest {
 		Grammar g = new Grammar(
 			"grammar T;\n"+
 			"a : A | B;");
-		g.createLookaheadDFAs();
 		String expecting =
 			"\n";
 		checkDecision(g, 1, expecting, null);
@@ -523,8 +521,8 @@ public class TestCharDFAConversion extends BaseTest {
 		if ( g.getCodeGenerator()==null ) {
 			CodeGenerator generator = new CodeGenerator(null, g, "Java");
 			g.setCodeGenerator(generator);
-			g.createNFAs();
-			g.createLookaheadDFAs();
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
 		}
 
 		DFA dfa = g.getLookaheadDFA(decision);
@@ -537,14 +535,15 @@ public class TestCharDFAConversion extends BaseTest {
 
 		// first make sure nondeterministic alts are as expected
 		if ( expectingUnreachableAlts==null ) {
-			if ( nonDetAlts.size()!=0 ) {
+			if ( nonDetAlts!=null && nonDetAlts.size()!=0 ) {
 				System.err.println("nondeterministic alts (should be empty): "+nonDetAlts);
 			}
-			assertEquals("unreachable alts mismatch", 0, nonDetAlts.size());
+			assertEquals("unreachable alts mismatch", 0, nonDetAlts!=null?nonDetAlts.size():0);
 		}
 		else {
 			for (int i=0; i<expectingUnreachableAlts.length; i++) {
-				assertTrue("unreachable alts mismatch", nonDetAlts.contains(new Integer(expectingUnreachableAlts[i])));
+				assertTrue("unreachable alts mismatch",
+						   nonDetAlts!=null?nonDetAlts.contains(new Integer(expectingUnreachableAlts[i])):false);
 			}
 		}
 		assertEquals(expecting, result);
diff --git a/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java b/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java
new file mode 100644
index 0000000..9cbe7e6
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java
@@ -0,0 +1,895 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2007 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.tool.*;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestCompositeGrammars extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testWildcardStillWorks() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar S;\n" +
+			"a : B . C ;\n"; // not qualified ID
+		Grammar g = new Grammar(grammar);
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testDelegatorInvokesDelegateRule() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : B {System.out.println(\"S.a\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : a ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.a\n", found);
+	}
+
+	@Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception {
+		// must generate something like:
+		// public int a(int x) throws RecognitionException { return gS.a(x); }
+		// in M.
+		String slave =
+			"parser grammar S;\n" +
+			"a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : label=a[3] {System.out.println($label.y);} ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.a1000\n", found);
+	}
+
+	@Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception {
+		// must generate something like:
+		// public int a(int x) throws RecognitionException { return gS.a(x); }
+		// in M.
+		String slave =
+			"parser grammar S;\n" +
+			"a : B {System.out.print(\"S.a\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : a {System.out.println($a.text);} ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.ab\n", found);
+	}
+
+	@Test public void testDelegatorAccessesDelegateMembers() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"@members {\n" +
+			"  public void foo() {System.out.println(\"foo\");}\n" +
+			"}\n" +
+			"a : B ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +		// uses no rules from the import
+			"import S;\n" +
+			"s : 'b' {gS.foo();} ;\n" + // gS is import pointer
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("foo\n", found);
+	}
+
+	@Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : b {System.out.println(\"S.a\");} ;\n" +
+			"b : B ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a
+		writeFile(tmpdir, "T.g", slave2);
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : a ;\n" +
+			"B : 'b' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.a\n", found);
+	}
+
+	@Test public void testDelegatesSeeSameTokenType() throws Exception {
+		String slave =
+			"parser grammar S;\n" + // A, B, C token type order
+			"tokens { A; B; C; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { C; B; A; }\n" + // reverse order
+			"y : A {System.out.println(\"T.y\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave2);
+		// The lexer will create rules to match letters a, b, c.
+		// The associated token types A, B, C must have the same value
+		// and all import'd parsers.  Since ANTLR regenerates all imports
+		// for use with the delegator M, it can generate the same token type
+		// mapping in each parser:
+		// public static final int C=6;
+		// public static final int EOF=-1;
+		// public static final int B=5;
+		// public static final int WS=7;
+		// public static final int A=4;
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" + // matches AA, which should be "aa"
+			"B : 'b' ;\n" + // another order: B, A, C
+			"A : 'a' ;\n" +
+			"C : 'c' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "aa", debug);
+		assertEquals("S.x\n" +
+					 "T.y\n", found);
+	}
+
+	@Test public void testDelegatesSeeSameTokenType2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" + // A, B, C token type order
+			"tokens { A; B; C; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { C; B; A; }\n" + // reverse order
+			"y : A {System.out.println(\"T.y\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" + // matches AA, which should be "aa"
+			"B : 'b' ;\n" + // another order: B, A, C
+			"A : 'a' ;\n" +
+			"C : 'c' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[A, B, C, WS]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testCombinedImportsCombined() throws Exception {
+		// for now, we don't allow combined to import combined
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"grammar S;\n" + // A, B, C token type order
+			"tokens { A; B; C; }\n" +
+			"x : 'x' INT {System.out.println(\"S.x\");} ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x INT ;\n";
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+		String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S";
+		assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
+	}
+
+	@Test public void testSameStringTwoNames() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { X='a'; }\n" +
+			"y : X {System.out.println(\"T.y\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=4, WS=6, X=5]";
+		String expectedStringLiteralToTypeMap = "{'a'=4}";
+		String expectedTypeToTokenList = "[A, X, WS]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		Object expectedArg = "X='a'";
+		Object expectedArg2 = "A";
+		int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+		String expectedError =
+			"error(158): T.g:2:10: cannot alias X='a'; string already assigned to A";
+		assertEquals(expectedError, equeue.errors.get(0).toString());
+	}
+
+	@Test public void testSameNameTwoStrings() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { A='x'; }\n" +
+			"y : A {System.out.println(\"T.y\");} ;\n";
+		
+		writeFile(tmpdir, "T.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]";
+		String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}";
+		String expectedTypeToTokenList = "[A, WS, T__6]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap));
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		Object expectedArg = "A='x'";
+		Object expectedArg2 = "'a'";
+		int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+		String expectedError =
+			"error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'";
+		assertEquals(expectedError, equeue.errors.get(0).toString());
+	}
+
+	@Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"options {tokenVocab=whatever;}\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		Object expectedArg = "S";
+		int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsWarning(equeue, expectedMessage);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size());
+
+		String expectedError =
+			"warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S";
+		assertEquals(expectedError, equeue.warnings.get(0).toString());
+	}
+
+	@Test public void testImportedTokenVocabWorksInRoot() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String tokens =
+			"A=99\n";
+		writeFile(tmpdir, "Test.tokens", tokens);
+
+		String master =
+			"grammar M;\n" +
+			"options {tokenVocab=Test;}\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=99, WS=101]";
+		String expectedStringLiteralToTypeMap = "{'a'=100}";
+		String expectedTypeToTokenList = "[A, 'a', WS]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"options {toke\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		// whole bunch of errors from bad S.g file
+		assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size());
+	}
+
+	@Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			": A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		// whole bunch of errors from bad S.g file
+		assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size());
+	}
+
+	@Test public void testDelegatorRuleOverridesDelegate() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : b {System.out.println(\"S.a\");} ;\n" +
+			"b : B ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"b : 'b'|'c' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "a", "c", debug);
+		assertEquals("S.a\n", found);
+	}
+
+	@Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception {
+		String slave =
+			"parser grammar JavaDecl;\n" +
+			"type : 'int' ;\n" +
+			"decl : type ID ';'\n" +
+			"     | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" +
+			"     ;\n" +
+			"init : '=' INT ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "JavaDecl.g", slave);
+		String master =
+			"grammar Java;\n" +
+			"import JavaDecl;\n" +
+			"prog : decl ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"\n" +
+			"ID  : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		// for float to work in decl, type must be overridden
+		String found = execParser("Java.g", master, "JavaParser", "JavaLexer",
+								  "prog", "float x = 3;", debug);
+		assertEquals("JavaDecl: floatx=3;\n", found);
+	}
+
+    @Test public void testDelegatorRuleOverridesDelegates() throws Exception {
+        String slave =
+            "parser grammar S;\n" +
+            "a : b {System.out.println(\"S.a\");} ;\n" +
+            "b : B ;\n" ;
+        mkdir(tmpdir);
+        writeFile(tmpdir, "S.g", slave);
+
+        String slave2 =
+            "parser grammar T;\n" +
+            "tokens { A='x'; }\n" +
+            "b : B {System.out.println(\"T.b\");} ;\n";
+        writeFile(tmpdir, "T.g", slave2);
+
+        String master =
+            "grammar M;\n" +
+            "import S, T;\n" +
+            "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" +
+            "WS : (' '|'\\n') {skip();} ;\n" ;
+        String found = execParser("M.g", master, "MParser", "MLexer",
+                                  "a", "c", debug);
+        assertEquals("M.b\n" +
+                     "S.a\n", found);
+    }
+
+	// LEXER INHERITANCE
+
+	@Test public void testLexerDelegatorInvokesDelegateRule() throws Exception {
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n" +
+			"C : 'c' ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"lexer grammar M;\n" +
+			"import S;\n" +
+			"B : 'b' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execLexer("M.g", master, "M", "abc", debug);
+		assertEquals("S.A\nabc\n", found);
+	}
+
+	@Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception {
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n" +
+			"B : 'b' {System.out.println(\"S.B\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"lexer grammar M;\n" +
+			"import S;\n" +
+			"A : 'a' B {System.out.println(\"M.A\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execLexer("M.g", master, "M", "ab", debug);
+		assertEquals("S.B\n" +
+					 "M.A\n" +
+					 "ab\n", found);
+	}
+
+	@Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception {
+		// M.Tokens has nothing to predict tokens from S.  Should
+		// not include S.Tokens alt in this case?
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"lexer grammar M;\n" +
+			"import S;\n" +
+			"A : 'a' {System.out.println(\"M.A\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "/M.g", master);
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		composite.assignTokenTypes();
+		composite.defineGrammarSymbols();
+		composite.createNFAs();
+		g.createLookaheadDFAs(false);
+
+		// predict only alts from M not S
+		String expectingDFA =
+			".s0-'a'->.s1\n" +
+			".s0-{'\\n', ' '}->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n";
+		org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		assertEquals(expectingDFA, result);
+
+		// must not be a "unreachable alt: Tokens" error
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testInvalidImportMechanism() throws Exception {
+		// M.Tokens has nothing to predict tokens from S.  Should
+		// not include S.Tokens alt in this case?
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"tree grammar M;\n" +
+			"import S;\n" +
+			"a : A ;";
+		writeFile(tmpdir, "/M.g", master);
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+
+		String expectedError =
+			"error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S";
+		assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
+	}
+
+	@Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception {
+		// if this compiles, it means that synpred1_S is defined in S.java
+		// but not MParser.java.  MParser has its own synpred1_M which must
+		// be separate to compile.
+		String slave =
+			"parser grammar S;\n" +
+			"a : 'a' {System.out.println(\"S.a1\");}\n" +
+			"  | 'a' {System.out.println(\"S.a2\");}\n" +
+			"  ;\n" +
+			"b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"options {backtrack=true;}\n" +
+			"import S;\n" +
+			"start : a b ;\n" +
+			"nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "start", "ax", debug);
+		assertEquals("S.a1\n", found);
+	}
+
+	@Test public void testKeywordVSIDGivesNoWarning() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'abc' {System.out.println(\"S.A\");} ;\n" +
+			"ID : 'a'..'z'+ ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : A {System.out.println(\"M.a\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "a", "abc", debug);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size());
+
+		assertEquals("S.A\nM.a\n", found);
+	}
+
+	@Test public void testWarningForUndefinedToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'abc' {System.out.println(\"S.A\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : ABC A {System.out.println(\"M.a\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		// A is defined in S but M should still see it and not give warning.
+		// only problem is ABC.
+
+		rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size());
+
+		String expectedError =
+			"warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:3:5: no lexer rule corresponding to token: ABC";
+		assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+",""));
+	}
+
+	/** Make sure that M can import S that imports T. */
+	@Test public void test3LevelImport() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar T;\n" +
+			"a : T ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave);
+		String slave2 =
+			"parser grammar S;\n" + // A, B, C token type order
+			"import T;\n" +
+			"a : S ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : M ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[M=6, S=5, T=4]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[T, S, M]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		boolean ok =
+			rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, ok);
+	}
+
+	@Test public void testBigTreeOfImports() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar T;\n" +
+			"x : T ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave);
+		slave =
+			"parser grammar S;\n" +
+			"import T;\n" +
+			"y : S ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		slave =
+			"parser grammar C;\n" +
+			"i : C ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "C.g", slave);
+		slave =
+			"parser grammar B;\n" +
+			"j : B ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "B.g", slave);
+		slave =
+			"parser grammar A;\n" +
+			"import B,C;\n" +
+			"k : A ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "A.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S,A;\n" +
+			"a : M ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[A=8, B=6, C=7, M=9, S=5, T=4]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[T, S, B, C, A, M]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		boolean ok =
+			rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, ok);
+	}
+
+	@Test public void testRulesVisibleThroughMultilevelImport() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar T;\n" +
+			"x : T ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave);
+		String slave2 =
+			"parser grammar S;\n" + // A, B, C token type order
+			"import T;\n" +
+			"a : S ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : M x ;\n" ; // x MUST BE VISIBLE TO M
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[M=6, S=5, T=4]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[T, S, M]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+}
\ No newline at end of file
diff --git a/src/org/antlr/test/TestDFAConversion.java b/tool/src/test/java/org/antlr/test/TestDFAConversion.java
similarity index 60%
rename from src/org/antlr/test/TestDFAConversion.java
rename to tool/src/test/java/org/antlr/test/TestDFAConversion.java
index 5f5d1b4..5b315bb 100644
--- a/src/org/antlr/test/TestDFAConversion.java
+++ b/tool/src/test/java/org/antlr/test/TestDFAConversion.java
@@ -34,11 +34,16 @@ import org.antlr.tool.*;
 import org.antlr.Tool;
 import org.antlr.codegen.CodeGenerator;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 import java.util.*;
 
 public class TestDFAConversion extends BaseTest {
 
-	public void testA() throws Exception {
+	@Test public void testA() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A C | B;");
@@ -48,30 +53,30 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAB_or_AC() throws Exception {
+	@Test public void testAB_or_AC() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A B | A C;");
 		String expecting =
 			".s0-A->.s1\n" +
-			".s1-B->:s3=>1\n" +
-			".s1-C->:s2=>2\n";
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAB_or_AC_k2() throws Exception {
+	@Test public void testAB_or_AC_k2() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
 			"options {k=2;}\n"+
 			"a : A B | A C;");
 		String expecting =
 			".s0-A->.s1\n" +
-			".s1-B->:s3=>1\n" +
-			".s1-C->:s2=>2\n";
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAB_or_AC_k1() throws Exception {
+	@Test public void testAB_or_AC_k1() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
 			"options {k=1;}\n"+
@@ -82,30 +87,99 @@ public class TestDFAConversion extends BaseTest {
 		int[] nonDetAlts = new int[] {1,2};
 		String ambigInput = "A" ;
 		int[] danglingAlts = new int[] {2};
-		int numWarnings = 2; // non-LL(1) abort and ambig upon A
+		int numWarnings = 2; // ambig upon A
 		checkDecision(g, 1, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testselfRecurseNonDet() throws Exception {
+	@Test public void testselfRecurseNonDet() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
 			"a : A a X | A a Y;");
-		// nondeterministic from left edge; no stop state
+		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
+		assertNonLLStar(g, altsWithRecursion);
+	}
+
+	@Test public void testRecursionOverflow() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a Y | A A A A A X ;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		List expectedTargetRules = Arrays.asList(new Object[] {"a"});
+		int expectedAlt = 1;
+		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
+	}
+
+	@Test public void testRecursionOverflow2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a Y | A+ X ;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		List expectedTargetRules = Arrays.asList(new Object[] {"a"});
+		int expectedAlt = 1;
+		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
+	}
+
+	@Test public void testRecursionOverflowWithPredOk() throws Exception {
+		// overflows with k=*, but resolves with pred
+		// no warnings/errors
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : (a Y)=> a Y | A A A A A X ;\n" + // force recursion past m=4
+			"a : A a | Q;");
 		String expecting =
 			".s0-A->.s1\n" +
-			".s1-A->:s2=>1\n"; // gets this after failing to do LL(*)
-		int[] unreachableAlts = new int[] {1,2};
-		int[] nonDetAlts = new int[] {1,2};
+			".s0-Q&&{synpred1_t}?->:s11=>1\n" +
+			".s1-A->.s2\n" +
+			".s1-Q&&{synpred1_t}?->:s10=>1\n" +
+			".s2-A->.s3\n" +
+			".s2-Q&&{synpred1_t}?->:s9=>1\n" +
+			".s3-A->.s4\n" +
+			".s3-Q&&{synpred1_t}?->:s8=>1\n" +
+			".s4-A->.s5\n" +
+			".s4-Q&&{synpred1_t}?->:s6=>1\n" +
+			".s5-{synpred1_t}?->:s6=>1\n" +
+			".s5-{true}?->:s7=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
 		String ambigInput = null;
-		int[] danglingAlts = new int[] {1,2};
-		int numWarnings = 2; // non-LL(*) abort and ambig upon A A
+		int[] danglingAlts = null;
+		int numWarnings = 0;
 		checkDecision(g, 1, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testCannotSeePastRecursion() throws Exception {
+	@Test public void testRecursionOverflowWithPredOk2() throws Exception {
+		// must predict Z w/o predicate
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : (a Y)=> a Y | A A A A A X | Z;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-Q&&{synpred1_t}?->:s11=>1\n" +
+			".s0-Z->:s12=>3\n" +
+			".s1-A->.s2\n" +
+			".s1-Q&&{synpred1_t}?->:s10=>1\n" +
+			".s2-A->.s3\n" +
+			".s2-Q&&{synpred1_t}?->:s9=>1\n" +
+			".s3-A->.s4\n" +
+			".s3-Q&&{synpred1_t}?->:s8=>1\n" +
+			".s4-A->.s5\n" +
+			".s4-Q&&{synpred1_t}?->:s6=>1\n" +
+			".s5-{synpred1_t}?->:s6=>1\n" +
+			".s5-{true}?->:s7=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testCannotSeePastRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"x   : y X\n" +
@@ -114,41 +188,109 @@ public class TestDFAConversion extends BaseTest {
 			"y   : L y R\n" +
 			"    | B\n" +
 			"    ;");
+		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
+		assertNonLLStar(g, altsWithRecursion);
+	}
+
+	@Test public void testSynPredResolvesRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (y X)=> y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
 		String expecting =
 			".s0-B->.s4\n" +
 			".s0-L->.s1\n" +
-			".s1-B->.s3\n" +
-			".s1-L->:s2=>1\n";
-		int[] unreachableAlts = new int[] {1,2};
-		int[] nonDetAlts = new int[] {1,2};
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
 		String ambigInput = null;
 		int[] danglingAlts = null;
-		int numWarnings = 2;
+		int numWarnings = 0;
 		checkDecision(g, 1, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testSynPredResolvesRecursion() throws Exception {
+	@Test public void testSemPredResolvesRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
-			"x   : (y X)=> y X\n" +
+			"x   : {p}? y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{p}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSemPredResolvesRecursion2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x\n" +
+			"options {k=1;}\n" +
+			"   : {p}? y X\n" +
 			"    | y Y\n" +
 			"    ;\n" +
 			"y   : L y R\n" +
 			"    | B\n" +
 			"    ;");
 		String expecting =
-			".s0-B->.s7\n" +
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{p}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSemPredResolvesRecursion3() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x\n" +
+			"options {k=2;}\n" + // just makes bigger DFA
+			"   : {p}? y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s6\n" +
 			".s0-L->.s1\n" +
 			".s1-B->.s5\n" +
 			".s1-L->.s2\n" +
-			".s2-{synpred1}?->:s3=>1\n" +
+			".s2-{p}?->:s3=>1\n" +
 			".s2-{true}?->:s4=>2\n" +
-			".s5-R->.s6\n" +
-			".s6-X&&{synpred1}?->:s3=>1\n" +
-			".s6-Y->:s4=>2\n" +
-			".s7-X&&{synpred1}?->:s3=>1\n" +
-			".s7-Y->:s4=>2\n";
+			".s5-{p}?->:s3=>1\n" +
+			".s5-{true}?->:s4=>2\n" +
+			".s6-X->:s3=>1\n" +
+			".s6-Y->:s4=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = null;
 		String ambigInput = null;
@@ -158,7 +300,108 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testSynPredResolvesRecursionInLexer() throws Exception {
+	@Test public void testSynPredResolvesRecursion2() throws Exception {
+		// k=* fails and it retries/succeeds with k=1 silently
+		// because of predicate
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"statement\n" +
+			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
+			"    |     expr\n" +
+			"    ;\n" +
+			"expr:     reference\n" +
+			"    |     INT\n" +
+			"    |     FLOAT\n" +
+			"    ;\n" +
+			"reference\n" +
+			"    :     ID L argument_list R\n" +
+			"    ;\n" +
+			"argument_list\n" +
+			"    :     expr COMMA expr\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT..FLOAT->:s3=>2\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursion3() throws Exception {
+		// No errors with k=1; don't try k=* first
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"statement\n" +
+			"options {k=1;}\n" +
+			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
+			"    |     expr\n" +
+			"    ;\n" +
+			"expr:     reference\n" +
+			"    |     INT\n" +
+			"    |     FLOAT\n" +
+			"    ;\n" +
+			"reference\n" +
+			"    :     ID L argument_list R\n" +
+			"    ;\n" +
+			"argument_list\n" +
+			"    :     expr COMMA expr\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT..FLOAT->:s3=>2\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursion4() throws Exception {
+		// No errors with k=2; don't try k=* first
+		// Should be ok like k=1 'except bigger DFA
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"statement\n" +
+			"options {k=2;}\n" +
+			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
+			"    |     expr\n" +
+			"    ;\n" +
+			"expr:     reference\n" +
+			"    |     INT\n" +
+			"    |     FLOAT\n" +
+			"    ;\n" +
+			"reference\n" +
+			"    :     ID L argument_list R\n" +
+			"    ;\n" +
+			"argument_list\n" +
+			"    :     expr COMMA expr\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT..FLOAT->:s4=>2\n" +
+			".s1-L->.s2\n" +
+			".s2-{synpred1_t}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursionInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A :     (B ';')=> B ';'\n" +
@@ -170,16 +413,11 @@ public class TestDFAConversion extends BaseTest {
 			"  ;\n");
 		String expecting =
 			".s0-'('->.s1\n" +
-			".s0-'x'->.s7\n" +
-			".s1-'('->.s2\n" +
-			".s1-'x'->.s5\n" +
-			".s2-{synpred1}?->:s3=>1\n" +
-			".s2-{true}?->:s4=>2\n" +
-			".s5-')'->.s6\n" +
-			".s6-'.'->:s4=>2\n" +
-			".s6-';'&&{synpred1}?->:s3=>1\n" +
-			".s7-'.'->:s4=>2\n" +
-			".s7-';'&&{synpred1}?->:s3=>1\n";
+			".s0-'x'->.s4\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = null;
 		String ambigInput = null;
@@ -189,7 +427,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testAutoBacktrackResolvesRecursionInLexer() throws Exception {
+	@Test public void testAutoBacktrackResolvesRecursionInLexer() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"options {backtrack=true;}\n"+
@@ -202,16 +440,11 @@ public class TestDFAConversion extends BaseTest {
 			"  ;\n");
 		String expecting =
 			".s0-'('->.s1\n" +
-			".s0-'x'->.s7\n" +
-			".s1-'('->.s2\n" +
-			".s1-'x'->.s5\n" +
-			".s2-{synpred1}?->:s3=>1\n" +
-			".s2-{true}?->:s4=>2\n" +
-			".s5-')'->.s6\n" +
-			".s6-'.'->:s4=>2\n" +
-			".s6-';'->:s3=>1\n" +
-			".s7-'.'->:s4=>2\n" +
-			".s7-';'->:s3=>1\n";
+			".s0-'x'->.s4\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = null;
 		String ambigInput = null;
@@ -221,7 +454,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testAutoBacktrackResolvesRecursion() throws Exception {
+	@Test public void testAutoBacktrackResolvesRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
 			"options {backtrack=true;}\n"+
@@ -232,17 +465,12 @@ public class TestDFAConversion extends BaseTest {
 			"    | B\n" +
 			"    ;");
 		String expecting =
-			".s0-B->.s7\n" +
-				".s0-L->.s1\n" +
-				".s1-B->.s5\n" +
-				".s1-L->.s2\n" +
-				".s2-{synpred1}?->:s3=>1\n" +
-				".s2-{true}?->:s4=>2\n" +
-				".s5-R->.s6\n" +
-				".s6-X->:s3=>1\n" +
-				".s6-Y->:s4=>2\n" +
-				".s7-X->:s3=>1\n" +
-				".s7-Y->:s4=>2\n";
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = null;
 		String ambigInput = null;
@@ -252,7 +480,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testselfRecurseNonDet2() throws Exception {
+	@Test public void testselfRecurseNonDet2() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -271,7 +499,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testIndirectRecursionLoop() throws Exception {
+	@Test public void testIndirectRecursionLoop() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -282,12 +510,12 @@ public class TestDFAConversion extends BaseTest {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
-		Set leftRecursive = g.getLeftRecursiveRules();
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
 		Set expectedRules =
 			new HashSet() {{add("a"); add("b");}};
-		assertEquals(expectedRules, leftRecursive);
+		assertEquals(expectedRules, ruleNames(leftRecursive));
 
-		g.createLookaheadDFAs();
+		g.createLookaheadDFAs(false);
 
 		Message msg = (Message)equeue.warnings.get(0);
 		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
@@ -296,12 +524,11 @@ public class TestDFAConversion extends BaseTest {
 
 		// cycle of [a, b]
 		Collection result = cyclesMsg.cycles;
-		List expecting = new ArrayList();
-		expecting.add(new HashSet() {{add("a"); add("b");}});
-		assertEquals(expecting, result);
+		Set expecting = new HashSet() {{add("a"); add("b");}};
+		assertEquals(expecting, ruleNames2(result));
 	}
 
-	public void testIndirectRecursionLoop2() throws Exception {
+	@Test public void testIndirectRecursionLoop2() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -316,9 +543,9 @@ public class TestDFAConversion extends BaseTest {
 		Set leftRecursive = g.getLeftRecursiveRules();
 		Set expectedRules =
 			new HashSet() {{add("a"); add("b");}};
-		assertEquals(expectedRules, leftRecursive);
+		assertEquals(expectedRules, ruleNames(leftRecursive));
 
-		g.createLookaheadDFAs();
+		g.createLookaheadDFAs(false);
 
 		Message msg = (Message)equeue.warnings.get(0);
 		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
@@ -327,12 +554,11 @@ public class TestDFAConversion extends BaseTest {
 
 		// cycle of [a, b]
 		Collection result = cyclesMsg.cycles;
-		List expecting = new ArrayList();
-		expecting.add(new HashSet() {{add("a"); add("b");}});
-		assertEquals(expecting, result);
+		Set expecting = new HashSet() {{add("a"); add("b");}};
+		assertEquals(expecting, ruleNames2(result));
 	}
 
-	public void testIndirectRecursionLoop3() throws Exception {
+	@Test public void testIndirectRecursionLoop3() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -349,7 +575,7 @@ public class TestDFAConversion extends BaseTest {
 		Set leftRecursive = g.getLeftRecursiveRules();
 		Set expectedRules =
 			new HashSet() {{add("a"); add("b"); add("e"); add("d");}};
-		assertEquals(expectedRules, leftRecursive);
+		assertEquals(expectedRules, ruleNames(leftRecursive));
 
 		Message msg = (Message)equeue.warnings.get(0);
 		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
@@ -358,13 +584,11 @@ public class TestDFAConversion extends BaseTest {
 
 		// cycle of [a, b]
 		Collection result = cyclesMsg.cycles;
-		List expecting = new ArrayList();
-		expecting.add(new HashSet() {{add("a"); add("b");}});
-		expecting.add(new HashSet() {{add("d"); add("e");}});
-		assertEquals(expecting, result);
+		Set expecting = new HashSet() {{add("a"); add("b"); add("d"); add("e");}};
+		assertEquals(expecting, ruleNames2(result));
 	}
 
-	public void testifThenElse() throws Exception {
+	@Test public void testifThenElse() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : IF s (E s)? | B;\n" +
@@ -385,7 +609,7 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 2, expecting, null, null, null, null, 0);
 	}
 
-	public void testifThenElseChecksStackSuffixConflict() throws Exception {
+	@Test public void testifThenElseChecksStackSuffixConflict() throws Exception {
 		// if you don't check stack soon enough, this finds E B not just E
 		// as ambig input
 		Grammar g = new Grammar(
@@ -409,7 +633,8 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testInvokeRule() throws Exception {
+    @Test
+    public void testInvokeRule() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : b A\n" +
@@ -420,13 +645,14 @@ public class TestDFAConversion extends BaseTest {
 			"  ;\n");
 		String expecting =
 			".s0-C->:s4=>3\n" +
-			".s0-X->.s1\n" +
-			".s1-A->:s3=>1\n" +
-			".s1-B->:s2=>2\n";
+            ".s0-X->.s1\n" +
+            ".s1-A->:s2=>1\n" +
+            ".s1-B->:s3=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testDoubleInvokeRuleLeftEdge() throws Exception {
+	@Test
+    public void testDoubleInvokeRuleLeftEdge() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : b X\n" +
@@ -438,20 +664,20 @@ public class TestDFAConversion extends BaseTest {
 			"c : C ;\n");
 		String expecting =
 			".s0-C->.s1\n" +
-			".s1-B->.s4\n" +
-			".s1-X->:s2=>1\n" +
-			".s1-Y->:s3=>2\n" +
-			".s4-X->:s2=>1\n" +
-			".s4-Y->:s3=>2\n";
+            ".s1-B->.s2\n" +
+            ".s1-X->:s4=>1\n" +
+            ".s1-Y->:s3=>2\n" +
+            ".s2-X->:s4=>1\n" +
+            ".s2-Y->:s3=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 		expecting =
 			".s0-C->.s1\n" +
-			".s1-B->:s3=>1\n" +
-			".s1-X..Y->:s2=>2\n";
+            ".s1-B->:s2=>1\n" +
+            ".s1-X..Y->:s3=>2\n";
 		checkDecision(g, 2, expecting, null, null, null, null, 0);
 	}
 
-	public void testimmediateTailRecursion() throws Exception {
+	@Test public void testimmediateTailRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -463,7 +689,8 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAStar_immediateTailRecursion() throws Exception {
+	@Test
+    public void testAStar_immediateTailRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -480,7 +707,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testNoStartRule() throws Exception {
+	@Test public void testNoStartRule() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -498,15 +725,16 @@ public class TestDFAConversion extends BaseTest {
 				   msg instanceof GrammarSemanticsMessage);
 	}
 
-	public void testAStar_immediateTailRecursion2() throws Exception {
+	@Test
+    public void testAStar_immediateTailRecursion2() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
 			"a : A a | A ;");
 		String expecting =
 			".s0-A->.s1\n" +
-			".s1-A->:s3=>1\n" +
-			".s1-EOF->:s2=>2\n";
+            ".s1-A->:s2=>1\n" +
+            ".s1-EOF->:s3=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = null;
 		String ambigInput = null;
@@ -516,17 +744,17 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testimmediateLeftRecursion() throws Exception {
+	@Test public void testimmediateLeftRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
 			"a : a A | B;");
 		Set leftRecursive = g.getLeftRecursiveRules();
 		Set expectedRules = new HashSet() {{add("a");}};
-		assertEquals(expectedRules, leftRecursive);
+		assertEquals(expectedRules, ruleNames(leftRecursive));
 	}
 
-	public void testIndirectLeftRecursion() throws Exception {
+	@Test public void testIndirectLeftRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -535,10 +763,10 @@ public class TestDFAConversion extends BaseTest {
 			"c : a | C ;\n");
 		Set leftRecursive = g.getLeftRecursiveRules();
 		Set expectedRules = new HashSet() {{add("a"); add("b"); add("c");}};
-		assertEquals(expectedRules, leftRecursive);
+		assertEquals(expectedRules, ruleNames(leftRecursive));
 	}
 
-	public void testLeftRecursionInMultipleCycles() throws Exception {
+	@Test public void testLeftRecursionInMultipleCycles() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 				"s : a x ;\n" +
@@ -550,10 +778,10 @@ public class TestDFAConversion extends BaseTest {
 		Set leftRecursive = g.getLeftRecursiveRules();
 		Set expectedRules =
 			new HashSet() {{add("a"); add("b"); add("c"); add("x"); add("y");}};
-		assertEquals(expectedRules, leftRecursive);
+		assertEquals(expectedRules, ruleNames(leftRecursive));
 	}
 
-	public void testCycleInsideRuleDoesNotForceInfiniteRecursion() throws Exception {
+	@Test public void testCycleInsideRuleDoesNotForceInfiniteRecursion() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"s : a ;\n" +
@@ -567,58 +795,90 @@ public class TestDFAConversion extends BaseTest {
 
 	// L O O P S
 
-	public void testAStar() throws Exception {
+	@Test public void testAStar() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ( A )* ;");
 		String expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-EOF->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAorBorCStar() throws Exception {
+	@Test public void testAorBorCStar() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ( A | B | C )* ;");
 		String expecting =
-			".s0-A..C->:s2=>1\n" +
-			".s0-EOF->:s1=>2\n";
+			".s0-A..C->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAPlus() throws Exception {
+	@Test public void testAPlus() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ( A )+ ;");
 		String expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-EOF->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
 	}
 
-	public void testAPlusNonGreedyWhenDeterministic() throws Exception {
+	@Test public void testAPlusNonGreedyWhenDeterministic() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (options {greedy=false;}:A)+ ;\n");
 		// should look the same as A+ since no ambiguity
 		String expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-EOF->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAorBorCPlus() throws Exception {
+	@Test public void testAPlusNonGreedyWhenNonDeterministic() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (options {greedy=false;}:A)+ A+ ;\n");
+		// should look the same as A+ since no ambiguity
+		String expecting =
+			".s0-A->:s1=>2\n"; // always chooses to exit
+		int[] unreachableAlts = new int[] {1};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAPlusGreedyWhenNonDeterministic() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (options {greedy=true;}:A)+ A+ ;\n");
+		// should look the same as A+ since no ambiguity
+		String expecting =
+			".s0-A->:s1=>1\n"; // always chooses to enter loop upon A
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAorBorCPlus() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ( A | B | C )+ ;");
 		String expecting =
-			".s0-A..C->:s2=>1\n" +
-			".s0-EOF->:s1=>2\n";
+			".s0-A..C->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testAOptional() throws Exception {
+	@Test public void testAOptional() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ( A )? B ;");
@@ -628,7 +888,7 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
 	}
 
-	public void testAorBorCOptional() throws Exception {
+	@Test public void testAorBorCOptional() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ( A | B | C )? Z ;");
@@ -640,73 +900,75 @@ public class TestDFAConversion extends BaseTest {
 
 	// A R B I T R A R Y  L O O K A H E A D
 
-	public void testAStarBOrAStarC() throws Exception {
+	@Test
+    public void testAStarBOrAStarC() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (A)* B | (A)* C;");
 		String expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-B->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
 		expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-C->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-C->:s2=>2\n";
 		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
 		expecting =
 			".s0-A->.s1\n" +
-			".s0-B->:s2=>1\n" +
-			".s0-C->:s3=>2\n" +
-			".s1-A->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n";
+            ".s0-B->:s2=>1\n" +
+            ".s0-C->:s3=>2\n" +
+            ".s1-A->.s1\n" +
+            ".s1-B->:s2=>1\n" +
+            ".s1-C->:s3=>2\n";
 		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
 	}
 
-
-	public void testAStarBOrAPlusC() throws Exception {
+	@Test
+    public void testAStarBOrAPlusC() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (A)* B | (A)+ C;");
 		String expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-B->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
 		expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-C->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-C->:s2=>2\n";
 		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
 		expecting =
 			".s0-A->.s1\n" +
-			".s0-B->:s2=>1\n" +
-			".s1-A->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n";
+            ".s0-B->:s2=>1\n" +
+            ".s1-A->.s1\n" +
+            ".s1-B->:s2=>1\n" +
+            ".s1-C->:s3=>2\n";
 		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
 	}
 
 
-	public void testAOrBPlusOrAPlus() throws Exception {
+    @Test
+    public void testAOrBPlusOrAPlus() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (A|B)* X | (A)+ Y;");
 		String expecting =
-			".s0-A..B->:s2=>1\n" +
-			".s0-X->:s1=>2\n";
+			".s0-A..B->:s1=>1\n" +
+			".s0-X->:s2=>2\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback (A|B)*
 		expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-Y->:s1=>2\n";
+			".s0-A->:s1=>1\n" +
+			".s0-Y->:s2=>2\n";
 		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback (A)+
 		expecting =
 			".s0-A->.s1\n" +
-			".s0-B..X->:s2=>1\n" +
-			".s1-A->.s1\n" +
-			".s1-B..X->:s2=>1\n" +
-			".s1-Y->:s3=>2\n";
+            ".s0-B..X->:s2=>1\n" +
+            ".s1-A->.s1\n" +
+            ".s1-B..X->:s2=>1\n" +
+            ".s1-Y->:s3=>2\n";
 		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule
 	}
 
-	public void testLoopbackAndExit() throws Exception {
+	@Test public void testLoopbackAndExit() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (A|B)+ B;");
@@ -718,7 +980,7 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testOptionalAltAndBypass() throws Exception {
+	@Test public void testOptionalAltAndBypass() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (A|B)? B;");
@@ -732,7 +994,7 @@ public class TestDFAConversion extends BaseTest {
 
 	// R E S O L V E  S Y N  C O N F L I C T S
 
-	public void testResolveLL1ByChoosingFirst() throws Exception {
+	@Test public void testResolveLL1ByChoosingFirst() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A C | A C;");
@@ -748,7 +1010,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testResolveLL2ByChoosingFirst() throws Exception {
+	@Test public void testResolveLL2ByChoosingFirst() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A B | A B;");
@@ -764,7 +1026,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testResolveLL2MixAlt() throws Exception {
+	@Test public void testResolveLL2MixAlt() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A B | A C | A B | Z;");
@@ -782,7 +1044,7 @@ public class TestDFAConversion extends BaseTest {
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testIndirectIFThenElseStyleAmbig() throws Exception {
+	@Test public void testIndirectIFThenElseStyleAmbig() throws Exception {
 		// the (c)+ loopback is ambig because it could match "CASE"
 		// by entering the loop or by falling out and ignoring (s)*
 		// back falling back into (cg)* loop which stats over and
@@ -811,7 +1073,7 @@ public class TestDFAConversion extends BaseTest {
 
 	// S E T S
 
-	public void testComplement() throws Exception {
+	@Test public void testComplement() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ~(A | B | C) | C {;} ;\n" +
@@ -822,7 +1084,7 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testComplementToken() throws Exception {
+	@Test public void testComplementToken() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : ~C | C {;} ;\n" +
@@ -833,28 +1095,28 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testComplementChar() throws Exception {
+	@Test public void testComplementChar() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : ~'x' | 'x' {;} ;\n");
 		String expecting =
 			".s0-'x'->:s2=>2\n" +
-			".s0-{'\\u0000'..'w', 'y'..'\\uFFFE'}->:s1=>1\n";
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s1=>1\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testComplementCharSet() throws Exception {
+	@Test public void testComplementCharSet() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : ~(' '|'\t'|'x'|'y') | 'x';\n" + // collapse into single set
 			"B : 'y' ;");
 		String expecting =
 			".s0-'y'->:s2=>2\n" +
-			".s0-{'\\u0000'..'\\b', '\\n'..'\\u001F', '!'..'x', 'z'..'\\uFFFE'}->:s1=>1\n";
+			".s0-{'\\u0000'..'\\b', '\\n'..'\\u001F', '!'..'x', 'z'..'\\uFFFF'}->:s1=>1\n";
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testNoSetCollapseWithActions() throws Exception {
+	@Test public void testNoSetCollapseWithActions() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : (A | B {foo}) | C;");
@@ -864,7 +1126,7 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testRuleAltsSetCollapse() throws Exception {
+	@Test public void testRuleAltsSetCollapse() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A | B | C ;"
@@ -874,7 +1136,7 @@ public class TestDFAConversion extends BaseTest {
 		assertEquals(expecting, g.getGrammarTree().toStringTree());
 	}
 
-	public void testTokensRuleAltsDoNotCollapse() throws Exception {
+	@Test public void testTokensRuleAltsDoNotCollapse() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"A : 'a';" +
@@ -886,7 +1148,7 @@ public class TestDFAConversion extends BaseTest {
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	public void testMultipleSequenceCollision() throws Exception {
+	@Test public void testMultipleSequenceCollision() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
 			"a : (A{;}|B)\n" +
@@ -919,7 +1181,7 @@ As a result, alternative(s) 2 were disabled for that input
 */
 	}
 
-	public void testMultipleAltsSameSequenceCollision() throws Exception {
+	@Test public void testMultipleAltsSameSequenceCollision() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
 			"a : type ID \n" +
@@ -942,7 +1204,7 @@ As a result, alternative(s) 2 were disabled for that input
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testFollowReturnsToLoopReenteringSameRule() throws Exception {
+	@Test public void testFollowReturnsToLoopReenteringSameRule() throws Exception {
 		// D07 can be matched in the (...)? or fall out of esc back into (..)*
 		// loop in sl.  Note that D07 is matched by ~(R|SLASH).  No good
 		// way to write that grammar I guess
@@ -952,9 +1214,9 @@ As a result, alternative(s) 2 were disabled for that input
 			"\n" +
 			"esc : SLASH ( N | D03 (D07)? ) ;");
 		String expecting =
-			".s0-R->:s1=>3\n" +
-			".s0-SLASH->:s2=>1\n" +
-			".s0-{L, N..D07}->:s3=>2\n";
+			".s0-R->:s3=>3\n" +
+			".s0-SLASH->:s1=>1\n" +
+			".s0-{L, N..D07}->:s2=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = new int[] {1,2};
 		String ambigInput = "D07";
@@ -964,7 +1226,7 @@ As a result, alternative(s) 2 were disabled for that input
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testTokenCallsAnotherOnLeftEdge() throws Exception {
+	@Test public void testTokenCallsAnotherOnLeftEdge() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar t;\n"+
 			"F   :   I '.'\n" +
@@ -980,7 +1242,7 @@ As a result, alternative(s) 2 were disabled for that input
 	}
 
 
-	public void testSelfRecursionAmbigAlts() throws Exception {
+	@Test public void testSelfRecursionAmbigAlts() throws Exception {
 		// ambiguous grammar for "L ID R" (alts 1,2 of a)
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
@@ -1007,7 +1269,7 @@ As a result, alternative(s) 2 were disabled for that input
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testIndirectRecursionAmbigAlts() throws Exception {
+	@Test public void testIndirectRecursionAmbigAlts() throws Exception {
 		// ambiguous grammar for "L ID R" (alts 1,2 of a)
 		// This was derived from the java grammar 12/4/2004 when it
 		// was not handling a unaryExpression properly.  I traced it
@@ -1041,7 +1303,7 @@ As a result, alternative(s) 2 were disabled for that input
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testTailRecursionInvokedFromArbitraryLookaheadDecision() throws Exception {
+	@Test public void testTailRecursionInvokedFromArbitraryLookaheadDecision() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : b X\n" +
@@ -1051,20 +1313,11 @@ As a result, alternative(s) 2 were disabled for that input
 			"b : A\n" +
 			"  | A b\n" +
 			"  ;\n");
-		String expecting =
-			".s0-A->.s1\n" +
-				".s1-Y->:s3=>2\n" +
-				".s1-{X, A}->:s2=>1\n";
-		int[] unreachableAlts = new int[] {1,2};
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 2;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
+		assertNonLLStar(g, altsWithRecursion);
 	}
 
-	public void testWildcardStarK1AndNonGreedyByDefaultInParser() throws Exception {
+	@Test public void testWildcardStarK1AndNonGreedyByDefaultInParser() throws Exception {
 		// no error because .* assumes it should finish when it sees R
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
@@ -1082,7 +1335,7 @@ As a result, alternative(s) 2 were disabled for that input
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
-	public void testWildcardPlusK1AndNonGreedyByDefaultInParser() throws Exception {
+	@Test public void testWildcardPlusK1AndNonGreedyByDefaultInParser() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n" +
 			"s : A block EOF ;\n" +
@@ -1099,9 +1352,54 @@ As a result, alternative(s) 2 were disabled for that input
 					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
 	}
 
+	@Test public void testGatedSynPred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (X)=> X\n" +
+			"    | Y\n" +
+			"    ;\n");
+		String expecting =
+			".s0-X&&{synpred1_t}?->:s1=>1\n" + // does not hoist; it gates edges
+			".s0-Y->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+
+		Set<String> preds = g.synPredNamesUsedInDFA;
+		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
+		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
+	}
+
+	@Test public void testHoistedGatedSynPred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (X)=> X\n" +
+			"    | X\n" +
+			"    ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" + // hoists into decision
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+
+		Set<String> preds = g.synPredNamesUsedInDFA;
+		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
+		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
+	}
+
 	// Check state table creation
 
-	public void testCyclicTableCreation() throws Exception {
+	@Test public void testCyclicTableCreation() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : A+ X | A+ Y ;");
@@ -1122,7 +1420,85 @@ As a result, alternative(s) 2 were disabled for that input
 		checkDecision(g, 1, expecting, null, null, null, null, 0);
 	}
 
-	protected void checkDecision(Grammar g,
+	protected void assertNonLLStar(Grammar g, List expectedBadAlts) {
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+		NonRegularDecisionMessage msg = getNonRegularDecisionMessage(equeue.errors);
+		assertTrue("expected fatal non-LL(*) msg", msg!=null);
+		List<Integer> alts = new ArrayList();
+		alts.addAll(msg.altsWithRecursion);
+		Collections.sort(alts);
+		assertEquals(expectedBadAlts,alts);
+	}
+
+	protected void assertRecursionOverflow(Grammar g,
+										   List expectedTargetRules,
+										   int expectedAlt) {
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+		RecursionOverflowMessage msg = getRecursionOverflowMessage(equeue.errors);
+		assertTrue("missing expected recursion overflow msg"+msg, msg!=null);
+		assertEquals("target rules mismatch",
+					 expectedTargetRules.toString(), msg.targetRules.toString());
+		assertEquals("mismatched alt", expectedAlt, msg.alt);
+	}
+
+    @Test
+    public void testWildcardInTreeGrammar() throws Exception {
+        Grammar g = new Grammar(
+            "tree grammar t;\n" +
+            "a : A B | A . ;\n");
+        String expecting =
+            ".s0-A->.s1\n" +
+            ".s1-A->:s3=>2\n" +
+            ".s1-B->:s2=>1\n";
+        int[] unreachableAlts = null;
+        int[] nonDetAlts = new int[] {1,2};
+        String ambigInput = null;
+        int[] danglingAlts = null;
+        int numWarnings = 1;
+        checkDecision(g, 1, expecting, unreachableAlts,
+                      nonDetAlts, ambigInput, danglingAlts, numWarnings);
+    }
+
+    @Test
+    public void testWildcardInTreeGrammar2() throws Exception {
+        Grammar g = new Grammar(
+            "tree grammar t;\n" +
+            "a : ^(A X Y) | ^(A . .) ;\n");
+        String expecting =
+            ".s0-A->.s1\n" +
+            ".s1-DOWN->.s2\n" +
+            ".s2-X->.s3\n" +
+            ".s2-{A, Y}->:s6=>2\n" +
+            ".s3-Y->.s4\n" +
+            ".s3-{DOWN, A..X}->:s6=>2\n" +
+            ".s4-DOWN->:s6=>2\n" +
+            ".s4-UP->:s5=>1\n";
+        int[] unreachableAlts = null;
+        int[] nonDetAlts = new int[] {1,2};
+        String ambigInput = null;
+        int[] danglingAlts = null;
+        int numWarnings = 1;
+        checkDecision(g, 1, expecting, unreachableAlts,
+                      nonDetAlts, ambigInput, danglingAlts, numWarnings);
+    }
+
+    protected void checkDecision(Grammar g,
 								 int decision,
 								 String expecting,
 								 int[] expectingUnreachableAlts,
@@ -1138,8 +1514,8 @@ As a result, alternative(s) 2 were disabled for that input
 
 		// mimic actions of org.antlr.Tool first time for grammar g
 		if ( g.getNumberOfDecisions()==0 ) {
-			g.createNFAs();
-			g.createLookaheadDFAs();
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
 		}
 		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
 		g.setCodeGenerator(generator);
@@ -1167,7 +1543,8 @@ As a result, alternative(s) 2 were disabled for that input
 			assertEquals("unreachable alts mismatch", s, s2);
 		}
 		else {
-			assertEquals("number of unreachable alts", 0, unreachableAlts.size());
+			assertEquals("number of unreachable alts", 0,
+						 unreachableAlts!=null?unreachableAlts.size():0);
 		}
 
 		// check conflicting input
@@ -1230,6 +1607,16 @@ As a result, alternative(s) 2 were disabled for that input
 		return null;
 	}
 
+	protected NonRegularDecisionMessage getNonRegularDecisionMessage(List errors) {
+		for (int i = 0; i < errors.size(); i++) {
+			Message m = (Message) errors.get(i);
+			if ( m instanceof NonRegularDecisionMessage ) {
+				return (NonRegularDecisionMessage)m;
+			}
+		}
+		return null;
+	}
+
 	protected RecursionOverflowMessage getRecursionOverflowMessage(List warnings) {
 		for (int i = 0; i < warnings.size(); i++) {
 			Message m = (Message) warnings.get(i);
@@ -1272,4 +1659,19 @@ As a result, alternative(s) 2 were disabled for that input
 		return buf.toString();
 	}
 
+	protected Set<String> ruleNames(Set<Rule> rules) {
+		Set<String> x = new HashSet<String>();
+		for (Rule r : rules) {
+			x.add(r.name);
+		}
+		return x;
+	}
+
+	protected Set<String> ruleNames2(Collection<HashSet> rules) {
+		Set<String> x = new HashSet<String>();
+		for (HashSet s : rules) {
+			x.addAll(ruleNames(s));
+		}
+		return x;
+	}
 }
diff --git a/src/org/antlr/test/TestDFAMatching.java b/tool/src/test/java/org/antlr/test/TestDFAMatching.java
similarity index 85%
rename from src/org/antlr/test/TestDFAMatching.java
rename to tool/src/test/java/org/antlr/test/TestDFAMatching.java
index b340472..d38f647 100644
--- a/src/org/antlr/test/TestDFAMatching.java
+++ b/tool/src/test/java/org/antlr/test/TestDFAMatching.java
@@ -32,18 +32,23 @@ import org.antlr.analysis.NFA;
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.tool.Grammar;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestDFAMatching extends BaseTest {
 
     /** Public default constructor used by TestRig */
     public TestDFAMatching() {
     }
 
-    public void testSimpleAltCharTest() throws Exception {
+    @Test public void testSimpleAltCharTest() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : {;}'a' | 'b' | 'c';");
-		g.createNFAs();
-		g.createLookaheadDFAs();
+		g.buildNFA();
+		g.createLookaheadDFAs(false);
         DFA dfa = g.getLookaheadDFA(1);
         checkPrediction(dfa,"a",1);
         checkPrediction(dfa,"b",2);
@@ -51,12 +56,12 @@ public class TestDFAMatching extends BaseTest {
         checkPrediction(dfa,"d", NFA.INVALID_ALT_NUMBER);
     }
 
-    public void testSets() throws Exception {
+    @Test public void testSets() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : {;}'a'..'z' | ';' | '0'..'9' ;");
-		g.createNFAs();
-        g.createLookaheadDFAs();
+		g.buildNFA();
+        g.createLookaheadDFAs(false);
         DFA dfa = g.getLookaheadDFA(1);
         checkPrediction(dfa,"a",1);
         checkPrediction(dfa,"q",1);
@@ -65,12 +70,12 @@ public class TestDFAMatching extends BaseTest {
         checkPrediction(dfa,"9",3);
     }
 
-    public void testFiniteCommonLeftPrefixes() throws Exception {
+    @Test public void testFiniteCommonLeftPrefixes() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : 'a' 'b' | 'a' 'c' | 'd' 'e' ;");
-		g.createNFAs();
-        g.createLookaheadDFAs();
+		g.buildNFA();
+        g.createLookaheadDFAs(false);
         DFA dfa = g.getLookaheadDFA(1);
         checkPrediction(dfa,"ab",1);
         checkPrediction(dfa,"ac",2);
@@ -78,13 +83,13 @@ public class TestDFAMatching extends BaseTest {
         checkPrediction(dfa,"q", NFA.INVALID_ALT_NUMBER);
     }
 
-    public void testSimpleLoops() throws Exception {
+    @Test public void testSimpleLoops() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : (DIGIT)+ '.' DIGIT | (DIGIT)+ ;\n" +
                 "fragment DIGIT : '0'..'9' ;\n");
-		g.createNFAs();
-        g.createLookaheadDFAs();
+		g.buildNFA();
+        g.createLookaheadDFAs(false);
         DFA dfa = g.getLookaheadDFA(3);
         checkPrediction(dfa,"32",2);
         checkPrediction(dfa,"999.2",1);
diff --git a/tool/src/test/java/org/antlr/test/TestFastQueue.java b/tool/src/test/java/org/antlr/test/TestFastQueue.java
new file mode 100644
index 0000000..da9b68e
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestFastQueue.java
@@ -0,0 +1,131 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+import org.antlr.runtime.misc.FastQueue;
+
+import java.util.NoSuchElementException;
+
+public class TestFastQueue {
+    @Test public void testQueueNoRemove() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.add("c");
+        q.add("d");
+        q.add("e");
+        String expecting = "a b c d e";
+        String found = q.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testQueueThenRemoveAll() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.add("c");
+        q.add("d");
+        q.add("e");
+        StringBuffer buf = new StringBuffer();
+        while ( q.size()>0 ) {
+            String o = q.remove();
+            buf.append(o);
+            if ( q.size()>0 ) buf.append(" ");
+        }
+        assertEquals("queue should be empty", 0, q.size());
+        String expecting = "a b c d e";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testQueueThenRemoveOneByOne() throws Exception {
+        StringBuffer buf = new StringBuffer();
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        buf.append(q.remove());
+        q.add("b");
+        buf.append(q.remove());
+        q.add("c");
+        buf.append(q.remove());
+        q.add("d");
+        buf.append(q.remove());
+        q.add("e");
+        buf.append(q.remove());
+        assertEquals("queue should be empty", 0, q.size());
+        String expecting = "abcde";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    // E r r o r s
+
+    @Test public void testGetFromEmptyQueue() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        String msg = null;
+        try { q.remove(); }
+        catch (NoSuchElementException nsee) {
+            msg = nsee.getMessage();
+        }
+        String expecting = "queue index 0 > size 0";
+        String found = msg;
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testGetFromEmptyQueueAfterSomeAdds() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.remove();
+        q.remove();
+        String msg = null;
+        try { q.remove(); }
+        catch (NoSuchElementException nsee) {
+            msg = nsee.getMessage();
+        }
+        String expecting = "queue index 0 > size 0";
+        String found = msg;
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testGetFromEmptyQueueAfterClear() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.clear();
+        String msg = null;
+        try { q.remove(); }
+        catch (NoSuchElementException nsee) {
+            msg = nsee.getMessage();
+        }
+        String expecting = "queue index 0 > size 0";
+        String found = msg;
+        assertEquals(expecting, found);
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestHeteroAST.java b/tool/src/test/java/org/antlr/test/TestHeteroAST.java
new file mode 100644
index 0000000..097432b
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestHeteroAST.java
@@ -0,0 +1,517 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2007 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/** Test hetero trees in parsers and tree parsers */
+public class TestHeteroAST extends BaseTest {
+	protected boolean debug = false;
+
+	// PARSERS -- AUTO AST
+
+    @Test public void testToken() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "@members {static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "}\n"+
+            "a : ID<V> ;\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "a", debug);
+        assertEquals("a<V>\n", found);
+    }
+
+    @Test public void testTokenWithQualifiedType() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "@members {static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "}\n"+
+            "a : ID<TParser.V> ;\n"+ // TParser.V is qualified name
+            "ID : 'a'..'z'+ ;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "a", debug);
+        assertEquals("a<V>\n", found);
+    }
+
+	@Test public void testTokenWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : x=ID<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testTokenWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : x+=ID<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testTokenRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID<V>^ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testTokenRootWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : x+=ID<V>^ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testString() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin'<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin", debug);
+		assertEquals("begin<V>\n", found);
+	}
+
+	@Test public void testStringRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin'<V>^ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin", debug);
+		assertEquals("begin<V>\n", found);
+	}
+
+	// PARSERS -- REWRITE AST
+
+	@Test public void testRewriteToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ID<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testRewriteTokenWithArgs() throws Exception {
+		// arg to ID<V>[42,19,30] means you're constructing node not associated with ID
+		// so must pass in token manually
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {\n" +
+			"static class V extends CommonTree {\n" +
+			"  public int x,y,z;\n"+
+			"  public V(int ttype, int x, int y, int z) { this.x=x; this.y=y; this.z=z; token=new CommonToken(ttype,\"\"); }\n" +
+			"  public V(int ttype, Token t, int x) { token=t; this.x=x;}\n" +
+			"  public String toString() { return (token!=null?token.getText():\"\")+\"<V>;\"+x+y+z;}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ID<V>[42,19,30] ID<V>[$ID,99] ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("<V>;421930 a<V>;9900\n", found);
+	}
+
+	@Test public void testRewriteTokenRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID INT -> ^(ID<V> INT) ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 2", debug);
+		assertEquals("(a<V> 2)\n", found);
+	}
+
+	@Test public void testRewriteString() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin' -> 'begin'<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin", debug);
+		assertEquals("begin<V>\n", found);
+	}
+
+	@Test public void testRewriteStringRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin' INT -> ^('begin'<V> INT) ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin 2", debug);
+		assertEquals("(begin<V> 2)\n", found);
+	}
+
+    @Test public void testRewriteRuleResults() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "tokens {LIST;}\n" +
+            "@members {\n" +
+            "static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "static class W extends CommonTree {\n" +
+            "  public W(int tokenType, String txt) { super(new CommonToken(tokenType,txt)); }\n" +
+            "  public W(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<W>\";}\n" +
+            "}\n" +
+            "}\n"+
+            "a : id (',' id)* -> ^(LIST<W>[\"LIST\"] id+);\n" +
+            "id : ID -> ID<V>;\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "a,b,c", debug);
+        assertEquals("(LIST<W> a<V> b<V> c<V>)\n", found);
+    }
+
+    @Test public void testCopySemanticsWithHetero() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "@members {\n" +
+            "static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +  // for 'int'<V>
+            "  public V(V node) { super(node); }\n\n" + // for dupNode
+            "  public Tree dupNode() { return new V(this); }\n" + // for dup'ing type
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "}\n" +
+            "a : type ID (',' ID)* ';' -> ^(type ID)+;\n" +
+            "type : 'int'<V> ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "int a, b, c;", debug);
+        assertEquals("(int<V> a) (int<V> b) (int<V> c)\n", found);
+    }
+
+    // TREE PARSERS -- REWRITE AST
+
+	@Test public void testTreeParserRewriteFlatList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"@members {\n" +
+			"static class V extends CommonTree {\n" +
+			"  public V(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"static class W extends CommonTree {\n" +
+			"  public W(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<W>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID INT -> INT<V> ID<W>\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("34<V> abc<W>\n", found);
+	}
+
+	@Test public void testTreeParserRewriteTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"@members {\n" +
+			"static class V extends CommonTree {\n" +
+			"  public V(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"static class W extends CommonTree {\n" +
+			"  public W(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<W>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID INT -> ^(INT<V> ID<W>)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(34<V> abc<W>)\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginary() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ROOT<V> ID\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("ROOT<V> abc\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginaryWithArgs() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public int x;\n" +
+			"  public V(int tokenType, int x) { super(new CommonToken(tokenType)); this.x=x;}\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>;\"+x;}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ROOT<V>[42] ID\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("ROOT<V>;42 abc\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginaryRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ^(ROOT<V> ID)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("(ROOT<V> abc)\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginaryFromReal() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
+			"  public V(int tokenType, Object tree) { super((CommonTree)tree); token.setType(tokenType); }\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>@\"+token.getLine();}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ROOT<V>[$ID]\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("ROOT<V>@1\n", found); // at line 1; shows copy of ID's stuff
+	}
+
+	@Test public void testTreeParserAutoHeteroAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(CommonTree t) { super(t); }\n" + // NEEDS SPECIAL CTOR
+			"  public String toString() { return super.toString()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID<V> ';'<V>\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
+		assertEquals("abc<V> ;<V>\n", found);
+	}
+
+}
diff --git a/src/org/antlr/test/TestInterpretedLexing.java b/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
similarity index 92%
rename from src/org/antlr/test/TestInterpretedLexing.java
rename to tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
index 376d7b2..1a578cd 100644
--- a/src/org/antlr/test/TestInterpretedLexing.java
+++ b/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
@@ -31,6 +31,11 @@ import org.antlr.tool.Grammar;
 import org.antlr.tool.Interpreter;
 import org.antlr.runtime.*;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestInterpretedLexing extends BaseTest {
 
 	/*
@@ -72,7 +77,7 @@ public class TestInterpretedLexing extends BaseTest {
     public TestInterpretedLexing() {
     }
 
-	public void testSimpleAltCharTest() throws Exception {
+	@Test public void testSimpleAltCharTest() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : 'a' | 'b' | 'c';");
@@ -86,7 +91,7 @@ public class TestInterpretedLexing extends BaseTest {
 		assertEquals(result.getType(), Atype);
     }
 
-    public void testSingleRuleRef() throws Exception {
+    @Test public void testSingleRuleRef() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : 'a' B 'c' ;\n" +
@@ -97,7 +102,7 @@ public class TestInterpretedLexing extends BaseTest {
 		assertEquals(result.getType(), Atype);
     }
 
-    public void testSimpleLoop() throws Exception {
+    @Test public void testSimpleLoop() throws Exception {
         Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "INT : (DIGIT)+ ;\n"+
@@ -111,7 +116,7 @@ public class TestInterpretedLexing extends BaseTest {
 		assertEquals(result.getType(), INTtype);
     }
 
-    public void testMultAltLoop() throws Exception {
+    @Test public void testMultAltLoop() throws Exception {
 		Grammar g = new Grammar(
                 "lexer grammar t;\n"+
                 "A : ('0'..'9'|'a'|'b')+ ;\n");
@@ -138,7 +143,7 @@ public class TestInterpretedLexing extends BaseTest {
 		assertEquals(result.getType(), Atype);
     }
 
-	public void testSimpleLoops() throws Exception {
+	@Test public void testSimpleLoops() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar t;\n"+
 				"A : ('0'..'9')+ '.' ('0'..'9')* | ('0'..'9')+ ;\n");
@@ -149,7 +154,7 @@ public class TestInterpretedLexing extends BaseTest {
 		assertEquals(result.getType(), Atype);
 	}
 
-	public void testTokensRules() throws Exception {
+	@Test public void testTokensRules() throws Exception {
 		Grammar pg = new Grammar(
 			"grammar p;\n"+
 			"a : (INT|FLOAT|WS)+;\n");
diff --git a/src/org/antlr/test/TestInterpretedParsing.java b/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
similarity index 94%
rename from src/org/antlr/test/TestInterpretedParsing.java
rename to tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
index adb56f8..63e742f 100644
--- a/src/org/antlr/test/TestInterpretedParsing.java
+++ b/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
@@ -32,13 +32,18 @@ import org.antlr.tool.Interpreter;
 import org.antlr.runtime.*;
 import org.antlr.runtime.tree.ParseTree;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestInterpretedParsing extends BaseTest {
 
     /** Public default constructor used by TestRig */
     public TestInterpretedParsing() {
     }
 
-	public void testSimpleParse() throws Exception {
+	@Test public void testSimpleParse() throws Exception {
 		Grammar pg = new Grammar(
 			"parser grammar p;\n"+
 			"prog : WHILE ID LCURLY (assign)* RCURLY EOF;\n" +
@@ -73,7 +78,7 @@ public class TestInterpretedParsing extends BaseTest {
 		assertEquals(expecting, result);
 	}
 
-	public void testMismatchedTokenError() throws Exception {
+	@Test public void testMismatchedTokenError() throws Exception {
 		Grammar pg = new Grammar(
 			"parser grammar p;\n"+
 			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
@@ -108,7 +113,7 @@ public class TestInterpretedParsing extends BaseTest {
 		assertEquals(expecting, result);
 	}
 
-	public void testMismatchedSetError() throws Exception {
+	@Test public void testMismatchedSetError() throws Exception {
 		Grammar pg = new Grammar(
 			"parser grammar p;\n"+
 			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
@@ -143,7 +148,7 @@ public class TestInterpretedParsing extends BaseTest {
 		assertEquals(expecting, result);
 	}
 
-	public void testNoViableAltError() throws Exception {
+	@Test public void testNoViableAltError() throws Exception {
 		Grammar pg = new Grammar(
 			"parser grammar p;\n"+
 			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
@@ -174,7 +179,7 @@ public class TestInterpretedParsing extends BaseTest {
 		ParseTree t = parseEngine.parse("prog");
 		String result = t.toStringTree();
 		String expecting =
-			"(<grammar p> (prog while x { (assign i = (expr NoViableAltException(9!=[4:1: expr : ( INT | FLOAT | ID );])))))";
+			"(<grammar p> (prog while x { (assign i = (expr NoViableAltException(9@[4:1: expr : ( INT | FLOAT | ID );])))))";
 		assertEquals(expecting, result);
 	}
 
diff --git a/src/org/antlr/test/TestIntervalSet.java b/tool/src/test/java/org/antlr/test/TestIntervalSet.java
similarity index 82%
rename from src/org/antlr/test/TestIntervalSet.java
rename to tool/src/test/java/org/antlr/test/TestIntervalSet.java
index 4f5326b..e4eba97 100644
--- a/src/org/antlr/test/TestIntervalSet.java
+++ b/tool/src/test/java/org/antlr/test/TestIntervalSet.java
@@ -33,19 +33,25 @@ import org.antlr.misc.IntervalSet;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+
 public class TestIntervalSet extends BaseTest {
 
     /** Public default constructor used by TestRig */
     public TestIntervalSet() {
     }
 
-    public void testSingleElement() throws Exception {
+    @Test public void testSingleElement() throws Exception {
         IntervalSet s = IntervalSet.of(99);
         String expecting = "99";
         assertEquals(s.toString(), expecting);
     }
 
-    public void testIsolatedElements() throws Exception {
+    @Test public void testIsolatedElements() throws Exception {
         IntervalSet s = new IntervalSet();
         s.add(1);
         s.add('z');
@@ -54,7 +60,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(s.toString(), expecting);
     }
 
-    public void testMixedRangesAndElements() throws Exception {
+    @Test public void testMixedRangesAndElements() throws Exception {
         IntervalSet s = new IntervalSet();
         s.add(1);
         s.add('a','z');
@@ -63,7 +69,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(s.toString(), expecting);
     }
 
-    public void testSimpleAnd() throws Exception {
+    @Test public void testSimpleAnd() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(13,15);
         String expecting = "13..15";
@@ -71,7 +77,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testRangeAndIsolatedElement() throws Exception {
+    @Test public void testRangeAndIsolatedElement() throws Exception {
         IntervalSet s = IntervalSet.of('a','z');
         IntervalSet s2 = IntervalSet.of('d');
         String expecting = "100";
@@ -79,7 +85,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-	public void testEmptyIntersection() throws Exception {
+	@Test public void testEmptyIntersection() throws Exception {
 		IntervalSet s = IntervalSet.of('a','z');
 		IntervalSet s2 = IntervalSet.of('0','9');
 		String expecting = "{}";
@@ -87,7 +93,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testEmptyIntersectionSingleElements() throws Exception {
+	@Test public void testEmptyIntersectionSingleElements() throws Exception {
 		IntervalSet s = IntervalSet.of('a');
 		IntervalSet s2 = IntervalSet.of('d');
 		String expecting = "{}";
@@ -95,7 +101,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-    public void testNotSingleElement() throws Exception {
+    @Test public void testNotSingleElement() throws Exception {
         IntervalSet vocabulary = IntervalSet.of(1,1000);
         vocabulary.add(2000,3000);
         IntervalSet s = IntervalSet.of(50,50);
@@ -104,7 +110,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-	public void testNotSet() throws Exception {
+	@Test public void testNotSet() throws Exception {
 		IntervalSet vocabulary = IntervalSet.of(1,1000);
 		IntervalSet s = IntervalSet.of(50,60);
 		s.add(5);
@@ -114,7 +120,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testNotEqualSet() throws Exception {
+	@Test public void testNotEqualSet() throws Exception {
 		IntervalSet vocabulary = IntervalSet.of(1,1000);
 		IntervalSet s = IntervalSet.of(1,1000);
 		String expecting = "{}";
@@ -122,7 +128,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testNotSetEdgeElement() throws Exception {
+	@Test public void testNotSetEdgeElement() throws Exception {
 		IntervalSet vocabulary = IntervalSet.of(1,2);
 		IntervalSet s = IntervalSet.of(1);
 		String expecting = "2";
@@ -130,7 +136,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-    public void testNotSetFragmentedVocabulary() throws Exception {
+    @Test public void testNotSetFragmentedVocabulary() throws Exception {
         IntervalSet vocabulary = IntervalSet.of(1,255);
         vocabulary.add(1000,2000);
         vocabulary.add(9999);
@@ -143,7 +149,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testSubtractOfCompletelyContainedRange() throws Exception {
+    @Test public void testSubtractOfCompletelyContainedRange() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(12,15);
         String expecting = "{10..11, 16..20}";
@@ -151,7 +157,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testSubtractOfOverlappingRangeFromLeft() throws Exception {
+    @Test public void testSubtractOfOverlappingRangeFromLeft() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(5,11);
         String expecting = "12..20";
@@ -164,7 +170,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testSubtractOfOverlappingRangeFromRight() throws Exception {
+    @Test public void testSubtractOfOverlappingRangeFromRight() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(15,25);
         String expecting = "10..14";
@@ -177,7 +183,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testSubtractOfCompletelyCoveredRange() throws Exception {
+    @Test public void testSubtractOfCompletelyCoveredRange() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(1,25);
         String expecting = "{}";
@@ -185,7 +191,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testSubtractOfRangeSpanningMultipleRanges() throws Exception {
+    @Test public void testSubtractOfRangeSpanningMultipleRanges() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         s.add(30,40);
         s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60
@@ -203,7 +209,7 @@ public class TestIntervalSet extends BaseTest {
 	/** The following was broken:
 	 	{0..113, 115..65534}-{0..115, 117..65534}=116..65534
 	 */
-	public void testSubtractOfWackyRange() throws Exception {
+	@Test public void testSubtractOfWackyRange() throws Exception {
 		IntervalSet s = IntervalSet.of(0,113);
 		s.add(115,200);
 		IntervalSet s2 = IntervalSet.of(0,115);
@@ -213,7 +219,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-    public void testSimpleEquals() throws Exception {
+    @Test public void testSimpleEquals() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(10,20);
         Boolean expecting = new Boolean(true);
@@ -226,7 +232,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testEquals() throws Exception {
+    @Test public void testEquals() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         s.add(2);
         s.add(499,501);
@@ -244,7 +250,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testSingleElementMinusDisjointSet() throws Exception {
+    @Test public void testSingleElementMinusDisjointSet() throws Exception {
         IntervalSet s = IntervalSet.of(15,15);
         IntervalSet s2 = IntervalSet.of(1,5);
         s2.add(10,20);
@@ -253,7 +259,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testMembership() throws Exception {
+    @Test public void testMembership() throws Exception {
         IntervalSet s = IntervalSet.of(15,15);
         s.add(50,60);
         assertTrue(!s.member(0));
@@ -266,7 +272,7 @@ public class TestIntervalSet extends BaseTest {
     }
 
     // {2,15,18} & 10..20
-    public void testIntersectionWithTwoContainedElements() throws Exception {
+    @Test public void testIntersectionWithTwoContainedElements() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(2,2);
         s2.add(15);
@@ -276,7 +282,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testIntersectionWithTwoContainedElementsReversed() throws Exception {
+    @Test public void testIntersectionWithTwoContainedElementsReversed() throws Exception {
         IntervalSet s = IntervalSet.of(10,20);
         IntervalSet s2 = IntervalSet.of(2,2);
         s2.add(15);
@@ -286,7 +292,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testComplement() throws Exception {
+    @Test public void testComplement() throws Exception {
         IntervalSet s = IntervalSet.of(100,100);
         s.add(101,101);
         IntervalSet s2 = IntervalSet.of(100,102);
@@ -295,7 +301,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-	public void testComplement2() throws Exception {
+	@Test public void testComplement2() throws Exception {
 		IntervalSet s = IntervalSet.of(100,101);
 		IntervalSet s2 = IntervalSet.of(100,102);
 		String expecting = "102";
@@ -303,15 +309,15 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testComplement3() throws Exception {
+	@Test public void testComplement3() throws Exception {
 		IntervalSet s = IntervalSet.of(1,96);
-		s.add(99,65534);
+		s.add(99,Label.MAX_CHAR_VALUE);
 		String expecting = "97..98";
 		String result = (s.complement(1,Label.MAX_CHAR_VALUE)).toString();
 		assertEquals(result, expecting);
 	}
 
-    public void testMergeOfRangesAndSingleValues() throws Exception {
+    @Test public void testMergeOfRangesAndSingleValues() throws Exception {
         // {0..41, 42, 43..65534}
         IntervalSet s = IntervalSet.of(0,41);
         s.add(42);
@@ -321,7 +327,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testMergeOfRangesAndSingleValuesReverse() throws Exception {
+    @Test public void testMergeOfRangesAndSingleValuesReverse() throws Exception {
         IntervalSet s = IntervalSet.of(43,65534);
         s.add(42);
         s.add(0,41);
@@ -330,7 +336,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-    public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception {
+    @Test public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception {
         // 42, 10, {0..9, 11..41, 43..65534}
         IntervalSet s = IntervalSet.of(42);
         s.add(10);
@@ -342,7 +348,7 @@ public class TestIntervalSet extends BaseTest {
         assertEquals(result, expecting);
     }
 
-	public void testMergeWithDoubleOverlap() throws Exception {
+	@Test public void testMergeWithDoubleOverlap() throws Exception {
 		IntervalSet s = IntervalSet.of(1,10);
 		s.add(20,30);
 		s.add(5,25); // overlaps two!
@@ -351,7 +357,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testSize() throws Exception {
+	@Test public void testSize() throws Exception {
 		IntervalSet s = IntervalSet.of(20,30);
 		s.add(50,55);
 		s.add(5,19);
@@ -360,7 +366,7 @@ public class TestIntervalSet extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testToList() throws Exception {
+	@Test public void testToList() throws Exception {
 		IntervalSet s = IntervalSet.of(20,25);
 		s.add(50,55);
 		s.add(5,5);
@@ -376,7 +382,7 @@ public class TestIntervalSet extends BaseTest {
 	 	'q' is 113 ascii
 	 	'u' is 117
 	*/
-	public void testNotRIntersectionNotT() throws Exception {
+	@Test public void testNotRIntersectionNotT() throws Exception {
 		IntervalSet s = IntervalSet.of(0,'s');
 		s.add('u',200);
 		IntervalSet s2 = IntervalSet.of(0,'q');
diff --git a/src/org/antlr/test/TestJavaCodeGeneration.java b/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
similarity index 77%
rename from src/org/antlr/test/TestJavaCodeGeneration.java
rename to tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
index 64a23a3..b6218b5 100644
--- a/src/org/antlr/test/TestJavaCodeGeneration.java
+++ b/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
@@ -27,43 +27,46 @@
 */
 package org.antlr.test;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /** General code generation testing; compilation and/or execution.
  *  These tests are more about avoiding duplicate var definitions
  *  etc... than testing a particular ANTLR feature.
  */
 public class TestJavaCodeGeneration extends BaseTest {
-	public void testDupVarDefForPinchedState() {
+	@Test public void testDupVarDefForPinchedState() {
 		// so->s2 and s0->s3->s1 pinches back to s1
 		// LA3_1, s1 state for DFA 3, was defined twice in similar scope
 		// just wrapped in curlies and it's cool.
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"a : (| A | B) X Y\n" +
 			"  | (| A | B) X Z\n" +
 			"  ;\n" ;
 		boolean found =
 			rawGenerateAndBuildRecognizer(
-				"t.g", grammar, "tParser", null, false);
+				"T.g", grammar, "TParser", null, false);
 		boolean expecting = true; // should be ok
 		assertEquals(expecting, found);
 	}
 
-	public void testLabeledNotSetsInLexer() {
+	@Test public void testLabeledNotSetsInLexer() {
 		// d must be an int
 		String grammar =
-			"lexer grammar t;\n" +
+			"lexer grammar T;\n" +
 			"A : d=~('x'|'y') e='0'..'9'\n" +
 			"  ; \n" ;
 		boolean found =
 			rawGenerateAndBuildRecognizer(
-				"t.g", grammar, null, "tLexer", false);
+				"T.g", grammar, null, "T", false);
 		boolean expecting = true; // should be ok
 		assertEquals(expecting, found);
 	}
 
-	public void testLabeledSetsInLexer() {
+	@Test public void testLabeledSetsInLexer() {
 		// d must be an int
 		String grammar =
 			"grammar T;\n" +
@@ -75,7 +78,7 @@ public class TestJavaCodeGeneration extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testLabeledRangeInLexer() {
+	@Test public void testLabeledRangeInLexer() {
 		// d must be an int
 		String grammar =
 			"grammar T;\n" +
@@ -87,7 +90,7 @@ public class TestJavaCodeGeneration extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testLabeledWildcardInLexer() {
+	@Test public void testLabeledWildcardInLexer() {
 		// d must be an int
 		String grammar =
 			"grammar T;\n" +
@@ -99,7 +102,7 @@ public class TestJavaCodeGeneration extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testSynpredWithPlusLoop() {
+	@Test public void testSynpredWithPlusLoop() {
 		String grammar =
 			"grammar T; \n" +
 			"a : (('x'+)=> 'x'+)?;\n";
@@ -110,13 +113,17 @@ public class TestJavaCodeGeneration extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testDoubleQuoteEscape() {
+	@Test public void testDoubleQuoteEscape() {
 		String grammar =
 			"lexer grammar T; \n" +
-			"A : '\\\\\"';\n"; // this is A : '\\"';
+			"A : '\\\\\"';\n" +          // this is A : '\\"', which should give "\\\"" at Java level;
+            "B : '\\\"';\n" +            // this is B: '\"', which shodl give "\"" at Java level;
+            "C : '\\'\\'';\n" +          // this is C: '\'\'', which shoudl give "''" at Java level
+            "D : '\\k';\n";              // this is D: '\k', which shoudl give just "k" at Java level;
+        
 		boolean found =
 			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "TLexer", false);
+				"T.g", grammar, null, "T", false);
 		boolean expecting = true; // should be ok
 		assertEquals(expecting, found);
 	}
diff --git a/src/org/antlr/test/TestLexer.java b/tool/src/test/java/org/antlr/test/TestLexer.java
similarity index 67%
rename from src/org/antlr/test/TestLexer.java
rename to tool/src/test/java/org/antlr/test/TestLexer.java
index 3889140..b17d639 100644
--- a/src/org/antlr/test/TestLexer.java
+++ b/tool/src/test/java/org/antlr/test/TestLexer.java
@@ -27,6 +27,13 @@
 */
 package org.antlr.test;
 
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import org.antlr.Tool;
+import org.antlr.tool.Grammar;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.codegen.CodeGenerator;
+
 public class TestLexer extends BaseTest {
 	protected boolean debug = false;
 
@@ -34,7 +41,7 @@ public class TestLexer extends BaseTest {
 	public TestLexer() {
 	}
 
-	public void testSetText() throws Exception {
+	@Test public void testSetText() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -47,7 +54,7 @@ public class TestLexer extends BaseTest {
 		assertEquals("\t\n", found);
 	}
 
-	public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception {
+	@Test public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -61,7 +68,21 @@ public class TestLexer extends BaseTest {
 		assertEquals("-34\n", found);
 	}
 
-	public void testWeCanSetType() throws Exception {
+	@Test public void testRefToRuleDoesNotSetChannel() throws Exception {
+		// this must set channel of A to HIDDEN.  $channel is local to rule
+		// like $type.
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF {System.out.println($A.text+\", channel=\"+$A.channel);} ;\n"+
+			"A : '-' WS I ;\n" +
+			"I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "- 34", debug);
+		assertEquals("- 34, channel=0\n", found);
+	}
+
+	@Test public void testWeCanSetType() throws Exception {
 		String grammar =
 			"grammar P;\n"+
 			"tokens {X;}\n" +
@@ -74,7 +95,7 @@ public class TestLexer extends BaseTest {
 		assertEquals("-34\n", found);
 	}
 
-	public void testRefToFragment() throws Exception {
+	@Test public void testRefToFragment() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -88,7 +109,7 @@ public class TestLexer extends BaseTest {
 		assertEquals("-34\n", found);
 	}
 
-	public void testMultipleRefToFragment() throws Exception {
+	@Test public void testMultipleRefToFragment() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -102,7 +123,7 @@ public class TestLexer extends BaseTest {
 		assertEquals("3.14159\n", found);
 	}
 
-	public void testLabelInSubrule() throws Exception {
+	@Test public void testLabelInSubrule() throws Exception {
 		// can we see v outside?
 		String grammar =
 			"grammar P;\n"+
@@ -115,7 +136,7 @@ public class TestLexer extends BaseTest {
 		assertEquals("342\n", found);
 	}
 
-	public void testRefToTokenInLexer() throws Exception {
+	@Test public void testRefToTokenInLexer() throws Exception {
 		String grammar =
 			"grammar P;\n"+
 			"a : A EOF ;\n"+
@@ -127,7 +148,7 @@ public class TestLexer extends BaseTest {
 		assertEquals("342\n", found);
 	}
 
-	public void testListLabelInLexer() throws Exception {
+	@Test public void testListLabelInLexer() throws Exception {
 		String grammar =
 			"grammar P;\n"+
 			"a : A ;\n"+
@@ -139,7 +160,7 @@ public class TestLexer extends BaseTest {
 		assertEquals(" 33 297\n", found);
 	}
 
-	public void testDupListRefInLexer() throws Exception {
+	@Test public void testDupListRefInLexer() throws Exception {
 		String grammar =
 			"grammar P;\n"+
 			"a : A ;\n"+
@@ -151,7 +172,7 @@ public class TestLexer extends BaseTest {
 		assertEquals(" 33 297\n", found);
 	}
 
-	public void testCharLabelInLexer() {
+	@Test public void testCharLabelInLexer() {
 		String grammar =
 			"grammar T;\n" +
 			"a : B ;\n" +
@@ -161,39 +182,74 @@ public class TestLexer extends BaseTest {
 		assertEquals("a\n", found);
 	}
 
-	public void testRepeatedLabelInLexer() {
+	@Test public void testRepeatedLabelInLexer() {
 		String grammar =
-			"lexer grammar t;\n" +
+			"lexer grammar T;\n" +
 			"B : x='a' x='b' ;\n" ;
 		boolean found =
 			rawGenerateAndBuildRecognizer(
-				"t.g", grammar, null, "tLexer", false);
+				"T.g", grammar, null, "T", false);
 		boolean expecting = true; // should be ok
 		assertEquals(expecting, found);
 	}
 
-	public void testRepeatedRuleLabelInLexer() {
+	@Test public void testRepeatedRuleLabelInLexer() {
 		String grammar =
-			"lexer grammar t;\n" +
+			"lexer grammar T;\n" +
 			"B : x=A x=A ;\n" +
 			"fragment A : 'a' ;\n" ;
 		boolean found =
 			rawGenerateAndBuildRecognizer(
-				"t.g", grammar, null, "tLexer", false);
+				"T.g", grammar, null, "T", false);
 		boolean expecting = true; // should be ok
 		assertEquals(expecting, found);
 	}
 
-	public void testIsolatedEOTEdge() {
+	@Test public void testIsolatedEOTEdge() {
 		String grammar =
 			"lexer grammar T;\n" +
 			"QUOTED_CONTENT \n" +
 			"        : 'q' (~'q')* (('x' 'q') )* 'q' ; \n";
 		boolean found =
 			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "TLexer", false);
+				"T.g", grammar, null, "T", false);
 		boolean expecting = true; // should be ok
 		assertEquals(expecting, found);
 	}	
 
+	@Test public void testEscapedLiterals() {
+		/* Grammar:
+			A : '\"' ;  should match a single double-quote: "
+			B : '\\\"' ; should match input \"
+		*/
+		String grammar =
+			"lexer grammar T;\n" +
+			"A : '\\\"' ;\n" +
+			"B : '\\\\\\\"' ;\n" ; // '\\\"'
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+    @Test public void testNewlineLiterals() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar T;\n" +
+            "A : '\\n\\n' ;\n"  // ANTLR sees '\n\n'
+        );
+        String expecting = "match(\"\\n\\n\")";
+
+        Tool antlr = newTool();
+        antlr.setOutputDirectory(null); // write to /dev/null
+        CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+        g.setCodeGenerator(generator);
+        generator.genRecognizer(); // codegen phase sets some vars we need
+        StringTemplate codeST = generator.getRecognizerST();
+        String code = codeST.toString();
+        int m = code.indexOf("match(\"");
+        String found = code.substring(m,m+expecting.length());
+
+        assertEquals(expecting, found);
+    }
 }
diff --git a/tool/src/test/java/org/antlr/test/TestMessages.java b/tool/src/test/java/org/antlr/test/TestMessages.java
new file mode 100644
index 0000000..c6a2ad1
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestMessages.java
@@ -0,0 +1,78 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v2.ANTLRParser;
+import org.antlr.grammar.v3.ActionTranslator;
+import org.antlr.tool.*;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestMessages extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestMessages() {
+	}
+
+
+	@Test public void testMessageStringificationIsConsistent() throws Exception {
+		String action = "$other.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"options { output = AST;}" +
+			"otherrule\n" +
+			"    : 'y' ;" +
+			"rule\n" +
+			"    : other=otherrule {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+																	"rule",
+																	new antlr.CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "other";
+		Object expectedArg2 = "tree";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		String expectedMessageString = expectedMessage.toString();
+		assertEquals(expectedMessageString, expectedMessage.toString());
+	}
+}
diff --git a/src/org/antlr/test/TestNFAConstruction.java b/tool/src/test/java/org/antlr/test/TestNFAConstruction.java
similarity index 60%
rename from src/org/antlr/test/TestNFAConstruction.java
rename to tool/src/test/java/org/antlr/test/TestNFAConstruction.java
index 5d8f103..3ac2875 100644
--- a/src/org/antlr/test/TestNFAConstruction.java
+++ b/tool/src/test/java/org/antlr/test/TestNFAConstruction.java
@@ -31,68 +31,74 @@ import org.antlr.analysis.State;
 import org.antlr.tool.FASerializer;
 import org.antlr.tool.Grammar;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestNFAConstruction extends BaseTest {
 
-    /** Public default constructor used by TestRig */
-    public TestNFAConstruction() {
-    }
+	/** Public default constructor used by TestRig */
+	public TestNFAConstruction() {
+	}
 
-    public void testA() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-				"a : A;");
+	@Test public void testA() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s2-A->.s3\n" +
 			".s3->:s4\n" +
 			":s4-EOF->.s5\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testAB() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : A B ;");
-        String expecting =
-                ".s0->.s1\n" +
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAB() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A B ;");
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s2-A->.s3\n" +
 			".s3-B->.s4\n" +
 			".s4->:s5\n" +
 			":s5-EOF->.s6\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testAorB() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : A | B {;} ;");
-        /* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
-                                |                            ^
-                               (6)--Ep-->(7)--B-->(8)--------|
-         */
-        String expecting =
-                ".s0->.s1\n" +
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorB() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A | B {;} ;");
+		/* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
+										|                            ^
+									   (6)--Ep-->(7)--B-->(8)--------|
+				 */
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s1->.s7\n" +
+			".s10->.s4\n" +
 			".s2-A->.s3\n" +
 			".s3->.s4\n" +
 			".s4->:s5\n" +
 			".s7->.s8\n" +
 			".s8-B->.s9\n" +
-			".s9->.s4\n" +
+			".s9-{}->.s10\n" +
 			":s5-EOF->.s6\n";
-        checkRule(g, "a", expecting);
-    }
+		checkRule(g, "a", expecting);
+	}
 
-	public void testRangeOrRange() throws Exception {
+	@Test public void testRangeOrRange() throws Exception {
 		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"A : ('a'..'c' 'h' | 'q' 'j'..'l') ;"
+			"lexer grammar P;\n"+
+			"A : ('a'..'c' 'h' | 'q' 'j'..'l') ;"
 		);
-        String expecting =
-                ".s0->.s1\n" +
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s10-'q'->.s11\n" +
 			".s11-'j'..'l'->.s12\n" +
@@ -105,27 +111,27 @@ public class TestNFAConstruction extends BaseTest {
 			".s6->:s7\n" +
 			".s9->.s10\n" +
 			":s7-<EOT>->.s8\n";
-        checkRule(g, "A", expecting);
+		checkRule(g, "A", expecting);
 	}
 
-	public void testRange() throws Exception {
+	@Test public void testRange() throws Exception {
 		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"A : 'a'..'c' ;"
+			"lexer grammar P;\n"+
+			"A : 'a'..'c' ;"
 		);
-        String expecting =
-                ".s0->.s1\n" +
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s2-'a'..'c'->.s3\n" +
 			".s3->:s4\n" +
 			":s4-<EOT>->.s5\n";
-        checkRule(g, "A", expecting);
+		checkRule(g, "A", expecting);
 	}
 
-	public void testCharSetInParser() throws Exception {
+	@Test public void testCharSetInParser() throws Exception {
 		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : A|'b' ;"
+			"grammar P;\n"+
+			"a : A|'b' ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
@@ -136,12 +142,12 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testABorCD() throws Exception {
-			Grammar g = new Grammar(
-					"parser grammar P;\n"+
-					"a : A B | C D;");
-        String expecting =
-                ".s0->.s1\n" +
+	@Test public void testABorCD() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A B | C D;");
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s1->.s8\n" +
 			".s10-D->.s11\n" +
@@ -153,14 +159,14 @@ public class TestNFAConstruction extends BaseTest {
 			".s8->.s9\n" +
 			".s9-C->.s10\n" +
 			":s6-EOF->.s7\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testbA() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : b A ;\n"+
-                "b : B ;");
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testbA() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b A ;\n"+
+			"b : B ;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -173,17 +179,17 @@ public class TestNFAConstruction extends BaseTest {
 			".s9->:s10\n" +
 			":s10-EOF->.s11\n" +
 			":s7->.s8\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testbA_bC() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : b A ;\n"+
-                "b : B ;\n"+
-                "c : b C;");
-        String expecting =
-                ".s0->.s1\n" +
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testbA_bC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b A ;\n"+
+			"b : B ;\n"+
+			"c : b C;");
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s12->.s13\n" +
 			".s13-C->.s14\n" +
@@ -199,17 +205,17 @@ public class TestNFAConstruction extends BaseTest {
 			":s15-EOF->.s16\n" +
 			":s7->.s12\n" +
 			":s7->.s8\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testAorEpsilon() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : A | ;");
-        /* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
-                                |                            ^
-                               (6)--Ep-->(7)--Ep-->(8)-------|
-         */
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorEpsilon() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A | ;");
+		/* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
+										|                            ^
+									   (6)--Ep-->(7)--Ep-->(8)-------|
+				 */
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -221,13 +227,13 @@ public class TestNFAConstruction extends BaseTest {
 			".s8->.s9\n" +
 			".s9->.s4\n" +
 			":s5-EOF->.s6\n";
-        checkRule(g, "a", expecting);
-    }
+		checkRule(g, "a", expecting);
+	}
 
-	public void testAOptional() throws Exception {
+	@Test public void testAOptional() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (A)?;");
+			"parser grammar P;\n"+
+			"a : (A)?;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -241,10 +247,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testNakedAoptional() throws Exception {
+	@Test public void testNakedAoptional() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : A?;");
+			"parser grammar P;\n"+
+			"a : A?;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -258,22 +264,22 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-    public void testAorBthenC() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : (A | B) C;");
-        /* expecting
+	@Test public void testAorBthenC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B) C;");
+		/* expecting
 
-        (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5)--C-->(6)--Ep-->(7,end)
-                   |                            ^
-                  (8)--Ep-->(9)--B-->(10)-------|
-         */
-    }
+				(0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5)--C-->(6)--Ep-->(7,end)
+						   |                            ^
+						  (8)--Ep-->(9)--B-->(10)-------|
+				 */
+	}
 
-	public void testAplus() throws Exception {
+	@Test public void testAplus() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (A)+;");
+			"parser grammar P;\n"+
+			"a : (A)+;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -287,10 +293,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testNakedAplus() throws Exception {
+	@Test public void testNakedAplus() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : A+;");
+			"parser grammar P;\n"+
+			"a : A+;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -304,10 +310,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAplusNonGreedy() throws Exception {
+	@Test public void testAplusNonGreedy() throws Exception {
 		Grammar g = new Grammar(
-				"lexer grammar t;\n"+
-				"A : (options {greedy=false;}:'0'..'9')+ ;\n");
+			"lexer grammar t;\n"+
+			"A : (options {greedy=false;}:'0'..'9')+ ;\n");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -321,16 +327,17 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "A", expecting);
 	}
 
-    public void testAorBplus() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : (A | B{action})+ ;");
+	@Test public void testAorBplus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B{action})+ ;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s10->.s11\n" +
 			".s11-B->.s12\n" +
-			".s12->.s6\n" +
+			".s12-{}->.s13\n" +
+			".s13->.s6\n" +
 			".s2->.s3\n" +
 			".s3->.s10\n" +
 			".s3->.s4\n" +
@@ -340,15 +347,15 @@ public class TestNFAConstruction extends BaseTest {
 			".s6->.s7\n" +
 			".s7->:s8\n" +
 			":s8-EOF->.s9\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testAorBorEmptyPlus() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : (A | B | )+ ;");
-        String expecting =
-            ".s0->.s1\n" +
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorBorEmptyPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B | )+ ;");
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s10->.s11\n" +
 			".s10->.s13\n" +
@@ -366,13 +373,13 @@ public class TestNFAConstruction extends BaseTest {
 			".s6->.s7\n" +
 			".s7->:s8\n" +
 			":s8-EOF->.s9\n";
-        checkRule(g, "a", expecting);
-    }
+		checkRule(g, "a", expecting);
+	}
 
-	public void testAStar() throws Exception {
+	@Test public void testAStar() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (A)*;");
+			"parser grammar P;\n"+
+			"a : (A)*;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -388,10 +395,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testNestedAstar() throws Exception {
+	@Test public void testNestedAstar() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (A*)*;");
+			"parser grammar P;\n"+
+			"a : (A*)*;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -414,10 +421,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testPlusNestedInStar() throws Exception {
+	@Test public void testPlusNestedInStar() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (A+)*;");
+			"parser grammar P;\n"+
+			"a : (A+)*;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -438,10 +445,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testStarNestedInPlus() throws Exception {
+	@Test public void testStarNestedInPlus() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (A*)+;");
+			"parser grammar P;\n"+
+			"a : (A*)+;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -462,10 +469,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testNakedAstar() throws Exception {
+	@Test public void testNakedAstar() throws Exception {
 		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : A*;");
+			"parser grammar P;\n"+
+			"a : A*;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -481,18 +488,19 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-    public void testAorBstar() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : (A | B{action})* ;");
+	@Test public void testAorBstar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B{action})* ;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s10->.s11\n" +
 			".s11-B->.s12\n" +
-			".s12->.s6\n" +
-			".s13->.s7\n" +
-			".s2->.s13\n" +
+			".s12-{}->.s13\n" +
+			".s13->.s6\n" +
+			".s14->.s7\n" +
+			".s2->.s14\n" +
 			".s2->.s3\n" +
 			".s3->.s10\n" +
 			".s3->.s4\n" +
@@ -502,15 +510,15 @@ public class TestNFAConstruction extends BaseTest {
 			".s6->.s7\n" +
 			".s7->:s8\n" +
 			":s8-EOF->.s9\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testAorBOptionalSubrule() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : ( A | B )? ;");
-        String expecting =
-            ".s0->.s1\n" +
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorBOptionalSubrule() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( A | B )? ;");
+		String expecting =
+			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s2->.s3\n" +
 			".s2->.s8\n" +
@@ -519,13 +527,13 @@ public class TestNFAConstruction extends BaseTest {
 			".s5->:s6\n" +
 			".s8->.s5\n" +
 			":s6-EOF->.s7\n";
-        checkRule(g, "a", expecting);
-    }
+		checkRule(g, "a", expecting);
+	}
 
-    public void testPredicatedAorB() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : {p1}? A | {p2}? B ;");
+	@Test public void testPredicatedAorB() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {p2}? B ;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -539,14 +547,14 @@ public class TestNFAConstruction extends BaseTest {
 			".s8->.s9\n" +
 			".s9-{p2}?->.s10\n" +
 			":s6-EOF->.s7\n";
-        checkRule(g, "a", expecting);
-    }
-
-    public void testMultiplePredicates() throws Exception {
-        Grammar g = new Grammar(
-                "parser grammar P;\n"+
-                "a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" +
-                "b : {p4}? B ;");
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testMultiplePredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" +
+			"b : {p4}? B ;");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -555,26 +563,27 @@ public class TestNFAConstruction extends BaseTest {
 			".s11-B->.s12\n" +
 			".s12->.s6\n" +
 			".s13->.s14\n" +
-			".s14->.s15\n" +
+			".s14-{}->.s15\n" +
 			".s15->.s16\n" +
 			".s16->.s17\n" +
-			".s17-{p4}?->.s18\n" +
-			".s18-B->.s19\n" +
-			".s19->:s20\n" +
+			".s17->.s18\n" +
+			".s18-{p4}?->.s19\n" +
+			".s19-B->.s20\n" +
 			".s2-{p1}?->.s3\n" +
-			".s21->.s6\n" +
+			".s20->:s21\n" +
+			".s22->.s6\n" +
 			".s3-{p1a}?->.s4\n" +
 			".s4-A->.s5\n" +
 			".s5->.s6\n" +
 			".s6->:s7\n" +
 			".s9->.s10\n" +
 			".s9->.s13\n" +
-			":s20->.s21\n" +
+			":s21->.s22\n" +
 			":s7-EOF->.s8\n";
-        checkRule(g, "a", expecting);
+		checkRule(g, "a", expecting);
 	}
 
-	public void testSets() throws Exception {
+	@Test public void testSets() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : ( A | B )+ ;\n" +
@@ -598,7 +607,8 @@ public class TestNFAConstruction extends BaseTest {
 			".s1->.s2\n" +
 			".s10->.s11\n" +
 			".s11-B->.s12\n" +
-			".s12->.s6\n" +
+			".s12-{}->.s13\n" +
+			".s13->.s6\n" +
 			".s2->.s3\n" +
 			".s3->.s10\n" +
 			".s3->.s4\n" +
@@ -643,7 +653,7 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "e", expecting);
 	}
 
-	public void testNotSet() throws Exception {
+	@Test public void testNotSet() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"tokens { A; B; C; }\n"+
@@ -662,7 +672,7 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testNotSingletonBlockSet() throws Exception {
+	@Test public void testNotSingletonBlockSet() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"tokens { A; B; C; }\n"+
@@ -681,33 +691,33 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testNotCharSet() throws Exception {
+	@Test public void testNotCharSet() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : ~'3' ;\n");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'\\uFFFE'}->.s3\n" +
+			".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" +
 			".s3->:s4\n" +
 			":s4-<EOT>->.s5\n";
 		checkRule(g, "A", expecting);
 
 		String expectingGrammarStr =
 			"1:7: lexer grammar P;\n" +
-				"A : ~ '3' ;\n"+
-				"Tokens : A ;";
+			"A : ~ '3' ;\n"+
+			"Tokens : A ;";
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testNotBlockSet() throws Exception {
+	@Test public void testNotBlockSet() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : ~('3'|'b') ;\n");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFE'}->.s3\n" +
+			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" +
 			".s3->:s4\n" +
 			":s4-<EOT>->.s5\n";
 		checkRule(g, "A", expecting);
@@ -719,7 +729,7 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testNotSetLoop() throws Exception {
+	@Test public void testNotSetLoop() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : ~('3')* ;\n");
@@ -729,7 +739,7 @@ public class TestNFAConstruction extends BaseTest {
 			".s2->.s3\n" +
 			".s2->.s9\n" +
 			".s3->.s4\n" +
-			".s4-{'\\u0000'..'2', '4'..'\\uFFFE'}->.s5\n" +
+			".s4-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s5\n" +
 			".s5->.s3\n" +
 			".s5->.s6\n" +
 			".s6->:s7\n" +
@@ -744,7 +754,7 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testNotBlockSetLoop() throws Exception {
+	@Test public void testNotBlockSetLoop() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : ~('3'|'b')* ;\n");
@@ -754,7 +764,7 @@ public class TestNFAConstruction extends BaseTest {
 			".s2->.s3\n" +
 			".s2->.s9\n" +
 			".s3->.s4\n" +
-			".s4-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFE'}->.s5\n" +
+			".s4-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s5\n" +
 			".s5->.s3\n" +
 			".s5->.s6\n" +
 			".s6->:s7\n" +
@@ -769,7 +779,7 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testSetsInCombinedGrammarSentToLexer() throws Exception {
+	@Test public void testSetsInCombinedGrammarSentToLexer() throws Exception {
 		// not sure this belongs in this test suite, but whatever.
 		Grammar g = new Grammar(
 			"grammar t;\n"+
@@ -783,7 +793,7 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(result, expecting);
 	}
 
-	public void testLabeledNotSet() throws Exception {
+	@Test public void testLabeledNotSet() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"tokens { A; B; C; }\n"+
@@ -802,33 +812,33 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testLabeledNotCharSet() throws Exception {
+	@Test public void testLabeledNotCharSet() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : t=~'3' ;\n");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'\\uFFFE'}->.s3\n" +
+			".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" +
 			".s3->:s4\n" +
 			":s4-<EOT>->.s5\n";
 		checkRule(g, "A", expecting);
 
 		String expectingGrammarStr =
 			"1:7: lexer grammar P;\n" +
-				"A : t=~ '3' ;\n"+
-				"Tokens : A ;";
+			"A : t=~ '3' ;\n"+
+			"Tokens : A ;";
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testLabeledNotBlockSet() throws Exception {
+	@Test public void testLabeledNotBlockSet() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : t=~('3'|'b') ;\n");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFE'}->.s3\n" +
+			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" +
 			".s3->:s4\n" +
 			":s4-<EOT>->.s5\n";
 		checkRule(g, "A", expecting);
@@ -840,10 +850,10 @@ public class TestNFAConstruction extends BaseTest {
 		assertEquals(expectingGrammarStr, g.toString());
 	}
 
-	public void testEscapedCharLiteral() throws Exception {
+	@Test public void testEscapedCharLiteral() throws Exception {
 		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : '\\n';");
+			"grammar P;\n"+
+			"a : '\\n';");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -853,10 +863,10 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testEscapedStringLiteral() throws Exception {
+	@Test public void testEscapedStringLiteral() throws Exception {
 		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : 'a\\nb\\u0030c\\'';");
+			"grammar P;\n"+
+			"a : 'a\\nb\\u0030c\\'';");
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
@@ -868,69 +878,71 @@ public class TestNFAConstruction extends BaseTest {
 
 	// AUTO BACKTRACKING STUFF
 
-	public void testAutoBacktracking_RuleBlock() throws Exception {
+	@Test public void testAutoBacktracking_RuleBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : 'a'{;}|'b';"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : 'a'{;}|'b';"
 		);
 		String expecting =
 			".s0->.s1\n" +
-				".s1->.s2\n" +
-				".s1->.s8\n" +
-				".s10->.s5\n" +
-				".s2-{synpred1}?->.s3\n" +
-				".s3-'a'->.s4\n" +
-				".s4->.s5\n" +
-				".s5->:s6\n" +
-				".s8->.s9\n" +
-				".s9-'b'->.s10\n" +
-				":s6-EOF->.s7\n";
+			".s1->.s2\n" +
+			".s1->.s9\n" +
+			".s10-'b'->.s11\n" +
+			".s11->.s6\n" +
+			".s2-{synpred1_t}?->.s3\n" +
+			".s3-'a'->.s4\n" +
+			".s4-{}->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			":s7-EOF->.s8\n";
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_RuleSetBlock() throws Exception {
+	@Test public void testAutoBacktracking_RuleSetBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : 'a'|'b';"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : 'a'|'b';"
 		);
 		String expecting =
 			".s0->.s1\n" +
-				".s1->.s2\n" +
-				".s2-'a'..'b'->.s3\n" +
-				".s3->:s4\n" +
-				":s4-EOF->.s5\n";
+			".s1->.s2\n" +
+			".s2-'a'..'b'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_SimpleBlock() throws Exception {
+	@Test public void testAutoBacktracking_SimpleBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'{;}|'b') ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b') ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
-			".s10-'b'->.s11\n" +
-			".s11->.s6\n" +
+			".s10->.s11\n" +
+			".s11-'b'->.s12\n" +
+			".s12->.s7\n" +
+			".s2->.s10\n" +
 			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3-{synpred1}?->.s4\n" +
+			".s3-{synpred1_t}?->.s4\n" +
 			".s4-'a'->.s5\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s10\n" +
-			":s7-EOF->.s8\n";
+			".s5-{}->.s6\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_SetBlock() throws Exception {
+	@Test public void testAutoBacktracking_SetBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'|'b') ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b') ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
@@ -941,39 +953,40 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_StarBlock() throws Exception {
+	@Test public void testAutoBacktracking_StarBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'{;}|'b')* ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b')* ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
-				".s1->.s2\n" +
-				".s11->.s12\n" +
-				".s12-{synpred2}?->.s13\n" +
-				".s13-'b'->.s14\n" +
-				".s14->.s7\n" +
-				".s15->.s8\n" +
-				".s2->.s15\n" +
-				".s2->.s3\n" +
-				".s3->.s11\n" +
-				".s3->.s4\n" +
-				".s4-{synpred1}?->.s5\n" +
-				".s5-'a'->.s6\n" +
-				".s6->.s7\n" +
-				".s7->.s3\n" +
-				".s7->.s8\n" +
-				".s8->:s9\n" +
-				":s9-EOF->.s10\n";
+			".s1->.s2\n" +
+			".s12->.s13\n" +
+			".s13-{synpred2_t}?->.s14\n" +
+			".s14-'b'->.s15\n" +
+			".s15->.s8\n" +
+			".s16->.s9\n" +
+			".s2->.s16\n" +
+			".s2->.s3\n" +
+			".s3->.s12\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6-{}->.s7\n" +
+			".s7->.s8\n" +
+			".s8->.s3\n" +
+			".s8->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n";
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception {
+	@Test public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'|'b')* ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b')* ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
@@ -990,25 +1003,26 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_StarSetBlock() throws Exception {
+	@Test public void testAutoBacktracking_StarSetBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'|'b'{;})* ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b'{;})* ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s11->.s12\n" +
-			".s12-{synpred2}?->.s13\n" +
+			".s12-{synpred2_t}?->.s13\n" +
 			".s13-'b'->.s14\n" +
-			".s14->.s7\n" +
-			".s15->.s8\n" +
-			".s2->.s15\n" +
+			".s14-{}->.s15\n" +
+			".s15->.s7\n" +
+			".s16->.s8\n" +
+			".s2->.s16\n" +
 			".s2->.s3\n" +
 			".s3->.s11\n" +
 			".s3->.s4\n" +
-			".s4-{synpred1}?->.s5\n" +
+			".s4-{synpred1_t}?->.s5\n" +
 			".s5-'a'->.s6\n" +
 			".s6->.s7\n" +
 			".s7->.s3\n" +
@@ -1018,11 +1032,11 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_StarBlock1Alt() throws Exception {
+	@Test public void testAutoBacktracking_StarBlock1Alt() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a')* ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')* ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
@@ -1031,7 +1045,7 @@ public class TestNFAConstruction extends BaseTest {
 			".s2->.s10\n" +
 			".s2->.s3\n" +
 			".s3->.s4\n" +
-			".s4-{synpred1}?->.s5\n" +
+			".s4-{synpred1_t}?->.s5\n" +
 			".s5-'a'->.s6\n" +
 			".s6->.s3\n" +
 			".s6->.s7\n" +
@@ -1040,49 +1054,51 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_PlusBlock() throws Exception {
+	@Test public void testAutoBacktracking_PlusBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'{;}|'b')+ ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b')+ ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
-				".s1->.s2\n" +
-				".s11->.s12\n" +
-				".s12-{synpred2}?->.s13\n" +
-				".s13-'b'->.s14\n" +
-				".s14->.s7\n" +
-				".s2->.s3\n" +
-				".s3->.s11\n" +
-				".s3->.s4\n" +
-				".s4-{synpred1}?->.s5\n" +
-				".s5-'a'->.s6\n" +
-				".s6->.s7\n" +
-				".s7->.s3\n" +
-				".s7->.s8\n" +
-				".s8->:s9\n" +
-				":s9-EOF->.s10\n";
+			".s1->.s2\n" +
+			".s12->.s13\n" +
+			".s13-{synpred2_t}?->.s14\n" +
+			".s14-'b'->.s15\n" +
+			".s15->.s8\n" +
+			".s2->.s3\n" +
+			".s3->.s12\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6-{}->.s7\n" +
+			".s7->.s8\n" +
+			".s8->.s3\n" +
+			".s8->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n";
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_PlusSetBlock() throws Exception {
+	@Test public void testAutoBacktracking_PlusSetBlock() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'|'b'{;})+ ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b'{;})+ ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s11->.s12\n" +
-			".s12-{synpred2}?->.s13\n" +
+			".s12-{synpred2_t}?->.s13\n" +
 			".s13-'b'->.s14\n" +
-			".s14->.s7\n" +
+			".s14-{}->.s15\n" +
+			".s15->.s7\n" +
 			".s2->.s3\n" +
 			".s3->.s11\n" +
 			".s3->.s4\n" +
-			".s4-{synpred1}?->.s5\n" +
+			".s4-{synpred1_t}?->.s5\n" +
 			".s5-'a'->.s6\n" +
 			".s6->.s7\n" +
 			".s7->.s3\n" +
@@ -1092,18 +1108,18 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_PlusBlock1Alt() throws Exception {
+	@Test public void testAutoBacktracking_PlusBlock1Alt() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a')+ ;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')+ ;"
 		);
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s2->.s3\n" +
 			".s3->.s4\n" +
-			".s4-{synpred1}?->.s5\n" +
+			".s4-{synpred1_t}?->.s5\n" +
 			".s5-'a'->.s6\n" +
 			".s6->.s3\n" +
 			".s6->.s7\n" +
@@ -1112,43 +1128,44 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_OptionalBlock2Alts() throws Exception {
+	@Test public void testAutoBacktracking_OptionalBlock2Alts() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a'{;}|'b')?;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b')?;"
 		);
 		String expecting =
 			".s0->.s1\n" +
-				".s1->.s2\n" +
-				".s10-{synpred2}?->.s11\n" +
-				".s11-'b'->.s12\n" +
-				".s12->.s6\n" +
-				".s13->.s6\n" +
-				".s2->.s3\n" +
-				".s2->.s9\n" +
-				".s3-{synpred1}?->.s4\n" +
-				".s4-'a'->.s5\n" +
-				".s5->.s6\n" +
-				".s6->:s7\n" +
-				".s9->.s10\n" +
-				".s9->.s13\n" +
-				":s7-EOF->.s8\n";
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s10->.s14\n" +
+			".s11-{synpred2_t}?->.s12\n" +
+			".s12-'b'->.s13\n" +
+			".s13->.s7\n" +
+			".s14->.s7\n" +
+			".s2->.s10\n" +
+			".s2->.s3\n" +
+			".s3-{synpred1_t}?->.s4\n" +
+			".s4-'a'->.s5\n" +
+			".s5-{}->.s6\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_OptionalBlock1Alt() throws Exception {
+	@Test public void testAutoBacktracking_OptionalBlock1Alt() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a')?;"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')?;"
 		);
 		String expecting =
 			".s0->.s1\n" +
 			".s1->.s2\n" +
 			".s2->.s3\n" +
 			".s2->.s9\n" +
-			".s3-{synpred1}?->.s4\n" +
+			".s3-{synpred1_t}?->.s4\n" +
 			".s4-'a'->.s5\n" +
 			".s5->.s6\n" +
 			".s6->:s7\n" +
@@ -1157,36 +1174,36 @@ public class TestNFAConstruction extends BaseTest {
 		checkRule(g, "a", expecting);
 	}
 
-	public void testAutoBacktracking_ExistingPred() throws Exception {
+	@Test public void testAutoBacktracking_ExistingPred() throws Exception {
 		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"options {backtrack=true;}\n"+
-				"a : ('a')=> 'a' | 'b';"
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')=> 'a' | 'b';"
 		);
 		String expecting =
 			".s0->.s1\n" +
-				".s1->.s2\n" +
-				".s1->.s8\n" +
-				".s10->.s5\n" +
-				".s2-{synpred1}?->.s3\n" +
-				".s3-'a'->.s4\n" +
-				".s4->.s5\n" +
-				".s5->:s6\n" +
-				".s8->.s9\n" +
-				".s9-'b'->.s10\n" +
-				":s6-EOF->.s7\n";
+			".s1->.s2\n" +
+			".s1->.s8\n" +
+			".s10->.s5\n" +
+			".s2-{synpred1_t}?->.s3\n" +
+			".s3-'a'->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s9\n" +
+			".s9-'b'->.s10\n" +
+			":s6-EOF->.s7\n";
 		checkRule(g, "a", expecting);
 	}
 
 	private void checkRule(Grammar g, String rule, String expecting)
-    {
-        g.createNFAs();
-        State startState = g.getRuleStartState(rule);
-        FASerializer serializer = new FASerializer(g);
-        String result = serializer.serialize(startState);
-
-        //System.out.print(result);
-        assertEquals(expecting, result);
-    }
+	{
+		g.buildNFA();
+		State startState = g.getRuleStartState(rule);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(startState);
+
+		//System.out.print(result);
+		assertEquals(expecting, result);
+	}
 
 }
diff --git a/src/org/antlr/test/TestRewriteAST.java b/tool/src/test/java/org/antlr/test/TestRewriteAST.java
similarity index 73%
rename from src/org/antlr/test/TestRewriteAST.java
rename to tool/src/test/java/org/antlr/test/TestRewriteAST.java
index b68da12..d76deb8 100644
--- a/src/org/antlr/test/TestRewriteAST.java
+++ b/tool/src/test/java/org/antlr/test/TestRewriteAST.java
@@ -29,15 +29,18 @@ package org.antlr.test;
 
 import org.antlr.tool.ErrorManager;
 import org.antlr.tool.GrammarSemanticsMessage;
-import org.antlr.tool.Message;
 import org.antlr.tool.Grammar;
 import org.antlr.Tool;
 import org.antlr.codegen.CodeGenerator;
 
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestRewriteAST extends BaseTest {
 	protected boolean debug = false;
 
-	public void testDelete() throws Exception {
+	@Test public void testDelete() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -50,7 +53,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("", found);
 	}
 
-	public void testSingleToken() throws Exception {
+	@Test public void testSingleToken() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -63,7 +66,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testSingleTokenToNewNode() throws Exception {
+	@Test public void testSingleTokenToNewNode() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -76,7 +79,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testSingleTokenToNewNodeRoot() throws Exception {
+	@Test public void testSingleTokenToNewNodeRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -89,21 +92,21 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(x INT)\n", found);
 	}
 
-	public void testSingleTokenToNewNode2() throws Exception {
-		// currently this Fails.  Allow creation of new nodes w/o args.
+	@Test public void testSingleTokenToNewNode2() throws Exception {
+		// Allow creation of new nodes w/o args.
 		String grammar =
-			"grammar T;\n" +
+			"grammar TT;\n" +
 			"options {output=AST;}\n" +
 			"a : ID -> ID[ ];\n" +
 			"ID : 'a'..'z'+ ;\n" +
 			"INT : '0'..'9'+;\n" +
 			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
+		String found = execParser("TT.g", grammar, "TTParser", "TTLexer",
 				    "a", "abc", debug);
-		assertEquals("abc\n", found);
+		assertEquals("ID\n", found);
 	}
 
-	public void testSingleCharLiteral() throws Exception {
+	@Test public void testSingleCharLiteral() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -116,7 +119,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("c\n", found);
 	}
 
-	public void testSingleStringLiteral() throws Exception {
+	@Test public void testSingleStringLiteral() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -129,7 +132,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("ick\n", found);
 	}
 
-	public void testSingleRule() throws Exception {
+	@Test public void testSingleRule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -143,7 +146,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testReorderTokens() throws Exception {
+	@Test public void testReorderTokens() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -156,7 +159,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("34 abc\n", found);
 	}
 
-	public void testReorderTokenAndRule() throws Exception {
+	@Test public void testReorderTokenAndRule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -170,7 +173,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("34 abc\n", found);
 	}
 
-	public void testTokenTree() throws Exception {
+	@Test public void testTokenTree() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -183,7 +186,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(34 abc)\n", found);
 	}
 
-	public void testTokenTreeAfterOtherStuff() throws Exception {
+	@Test public void testTokenTreeAfterOtherStuff() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -196,7 +199,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("void (34 abc)\n", found);
 	}
 
-	public void testNestedTokenTreeWithOuterLoop() throws Exception {
+	@Test public void testNestedTokenTreeWithOuterLoop() throws Exception {
 		// verify that ID and INT both iterate over outer index variable
 		String grammar =
 			"grammar T;\n" +
@@ -211,7 +214,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))\n", found);
 	}
 
-	public void testOptionalSingleToken() throws Exception {
+	@Test public void testOptionalSingleToken() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -224,7 +227,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testClosureSingleToken() throws Exception {
+	@Test public void testClosureSingleToken() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -237,7 +240,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testPositiveClosureSingleToken() throws Exception {
+	@Test public void testPositiveClosureSingleToken() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -250,7 +253,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testOptionalSingleRule() throws Exception {
+	@Test public void testOptionalSingleRule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -264,7 +267,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testClosureSingleRule() throws Exception {
+	@Test public void testClosureSingleRule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -278,7 +281,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testClosureOfLabel() throws Exception {
+	@Test public void testClosureOfLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -292,7 +295,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testOptionalLabelNoListLabel() throws Exception {
+	@Test public void testOptionalLabelNoListLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -305,7 +308,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a\n", found);
 	}
 
-	public void testPositiveClosureSingleRule() throws Exception {
+	@Test public void testPositiveClosureSingleRule() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -319,7 +322,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testSinglePredicateT() throws Exception {
+	@Test public void testSinglePredicateT() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -332,7 +335,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testSinglePredicateF() throws Exception {
+	@Test public void testSinglePredicateF() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -345,7 +348,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("", found);
 	}
 
-	public void testMultiplePredicate() throws Exception {
+	@Test public void testMultiplePredicate() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -361,7 +364,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("2\n", found);
 	}
 
-	public void testMultiplePredicateTrees() throws Exception {
+	@Test public void testMultiplePredicateTrees() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -377,7 +380,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(2 a)\n", found);
 	}
 
-	public void testSimpleTree() throws Exception {
+	@Test public void testSimpleTree() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -391,7 +394,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(- 34)\n", found);
 	}
 
-	public void testSimpleTree2() throws Exception {
+	@Test public void testSimpleTree2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -406,7 +409,7 @@ public class TestRewriteAST extends BaseTest {
 	}
 
 
-	public void testNestedTrees() throws Exception {
+	@Test public void testNestedTrees() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -420,7 +423,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(var (: a int) (: b float))\n", found);
 	}
 
-	public void testImaginaryTokenCopy() throws Exception {
+	@Test public void testImaginaryTokenCopy() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -435,7 +438,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(VAR a) (VAR b) (VAR c)\n", found);
 	}
 
-	public void testTokenUnreferencedOnLeftButDefined() throws Exception {
+	@Test public void testTokenUnreferencedOnLeftButDefined() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -450,7 +453,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("ID\n", found);
 	}
 
-	public void testImaginaryTokenCopySetText() throws Exception {
+	@Test public void testImaginaryTokenCopySetText() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -465,7 +468,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(var a) (var b) (var c)\n", found);
 	}
 
-	public void testImaginaryTokenNoCopyFromToken() throws Exception {
+	@Test public void testImaginaryTokenNoCopyFromToken() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -480,7 +483,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("({ a b c)\n", found);
 	}
 
-	public void testImaginaryTokenNoCopyFromTokenSetText() throws Exception {
+	@Test public void testImaginaryTokenNoCopyFromTokenSetText() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -495,7 +498,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(block a b c)\n", found);
 	}
 
-	public void testMixedRewriteAndAutoAST() throws Exception {
+	@Test public void testMixedRewriteAndAutoAST() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -512,7 +515,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(2 1 a)\n", found);
 	}
 
-	public void testSubruleWithRewrite() throws Exception {
+	@Test public void testSubruleWithRewrite() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -528,7 +531,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("1 a 2 3\n", found);
 	}
 
-	public void testSubruleWithRewrite2() throws Exception {
+	@Test public void testSubruleWithRewrite2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -548,7 +551,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(TYPE int a) (TYPE int b 3)\n", found);
 	}
 
-	public void testNestedRewriteShutsOffAutoAST() throws Exception {
+	@Test public void testNestedRewriteShutsOffAutoAST() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -565,7 +568,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("d 42\n", found);
 	}
 
-	public void testRewriteActions() throws Exception {
+	@Test public void testRewriteActions() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -579,7 +582,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(9 3)\n", found);
 	}
 
-	public void testRewriteActions2() throws Exception {
+	@Test public void testRewriteActions2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -593,7 +596,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("9 3\n", found);
 	}
 
-	public void testRefToOldValue() throws Exception {
+	@Test public void testRefToOldValue() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -608,7 +611,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(+ (+ 3 4) 5)\n", found);
 	}
 
-	public void testCopySemanticsForRules() throws Exception {
+	@Test public void testCopySemanticsForRules() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -623,7 +626,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(3 3)\n", found);
 	}
 
-	public void testCopySemanticsForRules2() throws Exception {
+	@Test public void testCopySemanticsForRules2() throws Exception {
 		// copy type as a root for each invocation of (...)+ in rewrite
 		String grammar =
 			"grammar T;\n" +
@@ -637,7 +640,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int a) (int b) (int c)\n", found);
 	}
 
-	public void testCopySemanticsForRules3() throws Exception {
+	@Test public void testCopySemanticsForRules3() throws Exception {
 		// copy type *and* modifier even though it's optional
 		// for each invocation of (...)+ in rewrite
 		String grammar =
@@ -653,7 +656,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int public a) (int public b) (int public c)\n", found);
 	}
 
-	public void testCopySemanticsForRules3Double() throws Exception {
+	@Test public void testCopySemanticsForRules3Double() throws Exception {
 		// copy type *and* modifier even though it's optional
 		// for each invocation of (...)+ in rewrite
 		String grammar =
@@ -669,7 +672,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)\n", found);
 	}
 
-	public void testCopySemanticsForRules4() throws Exception {
+	@Test public void testCopySemanticsForRules4() throws Exception {
 		// copy type *and* modifier even though it's optional
 		// for each invocation of (...)+ in rewrite
 		String grammar =
@@ -686,7 +689,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)\n", found);
 	}
 
-	public void testCopySemanticsLists() throws Exception {
+	@Test public void testCopySemanticsLists() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -699,7 +702,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b c a b c\n", found);
 	}
 
-	public void testCopyRuleLabel() throws Exception {
+	@Test public void testCopyRuleLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -713,7 +716,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a a\n", found);
 	}
 
-	public void testCopyRuleLabel2() throws Exception {
+	@Test public void testCopyRuleLabel2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -727,7 +730,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(a a)\n", found);
 	}
 
-	public void testQueueingOfTokens() throws Exception {
+	@Test public void testQueueingOfTokens() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -741,7 +744,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int a b c)\n", found);
 	}
 
-	public void testCopyOfTokens() throws Exception {
+	@Test public void testCopyOfTokens() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -755,7 +758,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("int a int a\n", found);
 	}
 
-	public void testTokenCopyInLoop() throws Exception {
+	@Test public void testTokenCopyInLoop() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -769,7 +772,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int a) (int b) (int c)\n", found);
 	}
 
-	public void testTokenCopyInLoopAgainstTwoOthers() throws Exception {
+	@Test public void testTokenCopyInLoopAgainstTwoOthers() throws Exception {
 		// must smear 'int' copies across as root of multiple trees
 		String grammar =
 			"grammar T;\n" +
@@ -784,7 +787,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("(int a 1) (int b 2) (int c 3)\n", found);
 	}
 
-	public void testListRefdOneAtATime() throws Exception {
+	@Test public void testListRefdOneAtATime() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -798,7 +801,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b c\n", found);
 	}
 
-	public void testSplitListWithLabels() throws Exception {
+	@Test public void testSplitListWithLabels() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -813,12 +816,12 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a VAR b c\n", found);
 	}
 
-	public void testComplicatedMelange() throws Exception {
+	@Test public void testComplicatedMelange() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
 			"tokens {BLOCK;}\n" +
-			"a : A A b=B B b=B c+=C C c+=C D {$D.text;} -> A+ B+ C+ D ;\n" +
+			"a : A A b=B B b=B c+=C C c+=C D {String s=$D.text;} -> A+ B+ C+ D ;\n" +
 			"type : 'int' | 'float' ;\n" +
 			"A : 'a' ;\n" +
 			"B : 'b' ;\n" +
@@ -830,7 +833,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a a b b b c c c d\n", found);
 	}
 
-	public void testRuleLabel() throws Exception {
+	@Test public void testRuleLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -844,7 +847,42 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a\n", found);
 	}
 
-	public void testRuleListLabel() throws Exception {
+	@Test public void testAmbiguousRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID a -> a | INT ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT: '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testWeirdRuleRef() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID a -> $a | INT ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT: '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		// $a is ambig; is it previous root or ref to a ref in alt?
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());		
+	}
+
+	@Test public void testRuleListLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -858,7 +896,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testRuleListLabel2() throws Exception {
+	@Test public void testRuleListLabel2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -872,7 +910,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testOptional() throws Exception {
+	@Test public void testOptional() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -886,7 +924,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a\n", found);
 	}
 
-	public void testOptional2() throws Exception {
+	@Test public void testOptional2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -900,7 +938,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testOptional3() throws Exception {
+	@Test public void testOptional3() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -914,7 +952,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testOptional4() throws Exception {
+	@Test public void testOptional4() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -928,7 +966,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a b\n", found);
 	}
 
-	public void testOptional5() throws Exception {
+	@Test public void testOptional5() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -942,7 +980,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("a\n", found);
 	}
 
-	public void testArbitraryExprType() throws Exception {
+	@Test public void testArbitraryExprType() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -953,12 +991,10 @@ public class TestRewriteAST extends BaseTest {
 			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
 		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "a b", debug);
-		//assertEquals("[not sure what this should be!]\n", found);
-//ATTENTION: I changed this one's behavior from the above.  Is it right?
-		assertEquals("nil\n", found);
+		assertEquals("", found);
 	}
 
-	public void testSet() throws Exception {
+	@Test public void testSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options { output = AST; } \n" +
@@ -971,7 +1007,38 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("2 34 a de\n", found);
 	}
 
-	public void testRewriteAction() throws Exception {
+	@Test public void testSet2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options { output = AST; } \n" +
+			"a: (INT|ID) -> INT? ID? ;\n" +
+			"INT: '0'..'9'+;\n" +
+			"ID : 'a'..'z'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "2", debug);
+		assertEquals("2\n", found);
+	}
+
+	@Ignore 
+    // TODO: FAILS. The should probably generate a warning from antlr
+    // See http://www.antlr.org:8888/browse/ANTLR-162
+    //
+    public void testSetWithLabel() throws Exception {
+		
+		String grammar =
+			"grammar T;\n" +
+			"options { output = AST; } \n" +
+			"a : x=(INT|ID) -> $x ;\n" +
+			"INT: '0'..'9'+;\n" +
+			"ID : 'a'..'z'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "2", debug);
+		assertEquals("2\n", found);
+	}
+
+	@Test public void testRewriteAction() throws Exception {
 		String grammar =
 			"grammar T; \n" +
 			"options { output = AST; }\n" +
@@ -986,7 +1053,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals("25.0\n", found);
 	}
 
-	public void testOptionalSubruleWithoutRealElements() throws Exception {
+	@Test public void testOptionalSubruleWithoutRealElements() throws Exception {
 		// copy type *and* modifier even though it's optional
 		// for each invocation of (...)+ in rewrite
 		String grammar =
@@ -1007,7 +1074,7 @@ public class TestRewriteAST extends BaseTest {
 
 	// C A R D I N A L I T Y  I S S U E S
 
-	public void testCardinality() throws Exception {
+	@Test public void testCardinality() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -1024,7 +1091,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCardinality2() throws Exception {
+	@Test public void testCardinality2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -1041,7 +1108,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCardinality3() throws Exception {
+	@Test public void testCardinality3() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -1058,7 +1125,7 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testLoopCardinality() throws Exception {
+	@Test public void testLoopCardinality() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -1075,9 +1142,22 @@ public class TestRewriteAST extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
+	@Test public void testWildcard() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID c=. -> $c;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34\n", found);
+	}
+
 	// E R R O R S
 
-	public void testUnknownRule() throws Exception {
+	@Test public void testUnknownRule() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -1105,7 +1185,7 @@ public class TestRewriteAST extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testKnownRuleButNotInLHS() throws Exception {
+	@Test public void testKnownRuleButNotInLHS() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -1134,7 +1214,7 @@ public class TestRewriteAST extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testUnknownToken() throws Exception {
+	@Test public void testUnknownToken() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -1162,7 +1242,7 @@ public class TestRewriteAST extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testUnknownLabel() throws Exception {
+	@Test public void testUnknownLabel() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -1190,7 +1270,7 @@ public class TestRewriteAST extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testUnknownCharLiteralToken() throws Exception {
+	@Test public void testUnknownCharLiteralToken() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -1218,7 +1298,7 @@ public class TestRewriteAST extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testUnknownStringLiteralToken() throws Exception {
+	@Test public void testUnknownStringLiteralToken() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -1246,28 +1326,136 @@ public class TestRewriteAST extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	// S U P P O R T
-
-	protected void checkError(ErrorQueue equeue,
-							  GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		//System.out.println("errors="+equeue);
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			Message m = (Message)equeue.errors.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
-		assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
-		assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
-		assertTrue("error is not a GrammarSemanticsMessage",
-				   foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
-		assertEquals(expectedMessage.arg2, foundMsg.arg2);
-		ErrorManager.resetErrorState(); // wack errors for next test
+	@Test public void testExtraTokenInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int 34 x=1;", debug);
+		assertEquals("line 1:4 extraneous input '34' expecting ID\n", this.stderrDuringParse);
+		assertEquals("(EXPR int x 1)\n", found); // tree gets correct x and 1 tokens
+	}
+
+	@Test public void testMissingIDInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int =1;", debug);
+		assertEquals("line 1:4 missing ID at '='\n", this.stderrDuringParse);
+		assertEquals("(EXPR int <missing ID> 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingSetInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "x=1;", debug);
+		assertEquals("line 1:0 mismatched input 'x' expecting set null\n", this.stderrDuringParse);
+		assertEquals("(EXPR <error: x> x 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc", debug);
+		assertEquals("line 0:-1 missing INT at '<EOF>'\n", this.stderrDuringParse);
+		// doesn't do in-line recovery for sets (yet?)
+		assertEquals("abc <missing INT>\n", found);
+	}
+
+	@Test public void testExtraTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c -> b c;\n" +
+			"b : ID -> ID ;\n" +
+			"c : INT -> INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc ick 34", debug);
+		assertEquals("line 1:4 extraneous input 'ick' expecting INT\n", this.stderrDuringParse);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode2() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c -> b c;\n" +
+			"b : ID -> ID ;\n" +
+			"c : INT -> INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		// finds an error at the first token, 34, and re-syncs.
+		// re-synchronizing does not consume a token because 34 follows
+		// ref to rule b (start of c). It then matches 34 in c.
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testNoViableAltGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b -> b | c -> c;\n" +
+			"b : ID -> ID ;\n" +
+			"c : INT -> INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"S : '*' ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "*", debug);
+		// finds an error at the first token, 34, and re-syncs.
+		// re-synchronizing does not consume a token because 34 follows
+		// ref to rule b (start of c). It then matches 34 in c.
+		assertEquals("line 1:0 no viable alternative at input '*'\n", this.stderrDuringParse);
+		assertEquals("<unexpected: [@0,0:0='*',<6>,1:0], resync=*>\n", found);
 	}
 
 }
diff --git a/src/org/antlr/test/TestRewriteTemplates.java b/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
similarity index 88%
rename from src/org/antlr/test/TestRewriteTemplates.java
rename to tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
index fbe7498..9459cf1 100644
--- a/src/org/antlr/test/TestRewriteTemplates.java
+++ b/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
@@ -32,10 +32,15 @@ import org.antlr.codegen.CodeGenerator;
 import org.antlr.tool.ErrorManager;
 import org.antlr.tool.Grammar;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestRewriteTemplates extends BaseTest {
 	protected boolean debug = false;
 
-	public void testDelete() throws Exception {
+	@Test public void testDelete() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -48,7 +53,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("", found);
 	}
 
-	public void testAction() throws Exception {
+	@Test public void testAction() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -61,7 +66,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testEmbeddedLiteralConstructor() throws Exception {
+	@Test public void testEmbeddedLiteralConstructor() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -74,7 +79,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testInlineTemplate() throws Exception {
+	@Test public void testInlineTemplate() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -87,7 +92,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("x:abc, y:34;\n", found);
 	}
 
-	public void testNamedTemplate() throws Exception {
+	@Test public void testNamedTemplate() throws Exception {
 		// the support code adds template group in it's output Test.java
 		// that defines template foo.
 		String grammar =
@@ -102,7 +107,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("abc 34\n", found);
 	}
 
-	public void testIndirectTemplate() throws Exception {
+	@Test public void testIndirectTemplate() throws Exception {
 		// the support code adds template group in it's output Test.java
 		// that defines template foo.
 		String grammar =
@@ -117,7 +122,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("abc 34\n", found);
 	}
 
-	public void testInlineTemplateInvokingLib() throws Exception {
+	@Test public void testInlineTemplateInvokingLib() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -130,7 +135,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("abc 34\n", found);
 	}
 
-	public void testPredicatedAlts() throws Exception {
+	@Test public void testPredicatedAlts() throws Exception {
 		// the support code adds template group in it's output Test.java
 		// that defines template foo.
 		String grammar =
@@ -147,7 +152,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("hi abc\n", found);
 	}
 
-	public void testTemplateReturn() throws Exception {
+	@Test public void testTemplateReturn() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -161,7 +166,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("abc 34\n", found);
 	}
 
-	public void testReturnValueWithTemplate() throws Exception {
+	@Test public void testReturnValueWithTemplate() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -175,7 +180,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("8\n", found);
 	}
 
-	public void testTemplateRefToDynamicAttributes() throws Exception {
+	@Test public void testTemplateRefToDynamicAttributes() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=template;}\n" +
@@ -193,7 +198,7 @@ public class TestRewriteTemplates extends BaseTest {
 
 	// tests for rewriting templates in tree parsers
 
-	public void testSingleNode() throws Exception {
+	@Test public void testSingleNode() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -213,8 +218,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("|abc|\n", found);
 	}
 
-	/** tree parsing with output=template and rewrite=true */
-	public void testSingleNodeRewriteMode() throws Exception {
+	@Test public void testSingleNodeRewriteMode() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -234,7 +238,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("|abc|\n", found);
 	}
 
-	public void testRewriteRuleAndRewriteModeOnSimpleElements() throws Exception {
+	@Test public void testRewriteRuleAndRewriteModeOnSimpleElements() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -255,7 +259,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
 	}
 
-	public void testRewriteRuleAndRewriteModeIgnoreActionsPredicates() throws Exception {
+	@Test public void testRewriteRuleAndRewriteModeIgnoreActionsPredicates() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -277,7 +281,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
 	}
 
-	public void testRewriteRuleAndRewriteModeNotSimple() throws Exception {
+	@Test public void testRewriteRuleAndRewriteModeNotSimple() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -296,7 +300,7 @@ public class TestRewriteTemplates extends BaseTest {
 		assertEquals("unexpected errors: "+equeue, 2, equeue.warnings.size());
 	}
 
-	public void testRewriteRuleAndRewriteModeRefRule() throws Exception {
+	@Test public void testRewriteRuleAndRewriteModeRefRule() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
diff --git a/src/org/antlr/test/TestSemanticPredicateEvaluation.java b/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
similarity index 87%
rename from src/org/antlr/test/TestSemanticPredicateEvaluation.java
rename to tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
index c2eacc9..8cdb63e 100644
--- a/src/org/antlr/test/TestSemanticPredicateEvaluation.java
+++ b/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
@@ -27,8 +27,13 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 package org.antlr.test;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestSemanticPredicateEvaluation extends BaseTest {
-	public void testSimpleCyclicDFAWithPredicate() throws Exception {
+	@Test public void testSimpleCyclicDFAWithPredicate() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
@@ -39,7 +44,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("alt2\n", found);
 	}
 
-	public void testSimpleCyclicDFAWithInstanceVarPredicate() throws Exception {
+	@Test public void testSimpleCyclicDFAWithInstanceVarPredicate() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"@members {boolean v=true;}\n" +
@@ -51,7 +56,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("alt2\n", found);
 	}
 
-	public void testPredicateValidation() throws Exception {
+	@Test public void testPredicateValidation() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"@members {\n" +
@@ -67,7 +72,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("error: FailedPredicateException(a,{false}?)\n", found);
 	}
 
-	public void testLexerPreds() throws Exception {
+	@Test public void testLexerPreds() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"@lexer::members {boolean p=false;}\n" +
@@ -80,7 +85,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("token 2\n", found);
 	}
 
-	public void testLexerPreds2() throws Exception {
+	@Test public void testLexerPreds2() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"@lexer::members {boolean p=true;}\n" +
@@ -93,7 +98,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("token 1\n", found);
 	}
 
-	public void testLexerPredInExitBranch() throws Exception {
+	@Test public void testLexerPredInExitBranch() throws Exception {
 		// p says it's ok to exit; it has precendence over the !p loopback branch
 		String grammar =
 			"grammar foo;" +
@@ -107,7 +112,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("222\n", found);
 	}
 
-	public void testLexerPredInExitBranch2() throws Exception {
+	@Test public void testLexerPredInExitBranch2() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"@lexer::members {boolean p=true;}\n" +
@@ -119,7 +124,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("111\n", found);
 	}
 
-	public void testLexerPredInExitBranch3() throws Exception {
+	@Test public void testLexerPredInExitBranch3() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"@lexer::members {boolean p=true;}\n" +
@@ -131,7 +136,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("122\n", found);
 	}
 
-	public void testLexerPredInExitBranch4() throws Exception {
+	@Test public void testLexerPredInExitBranch4() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"a : (A|B)+ ;\n" +
@@ -142,7 +147,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("01xxx\n", found);
 	}
 
-	public void testLexerPredsInCyclicDFA() throws Exception {
+	@Test public void testLexerPredsInCyclicDFA() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"@lexer::members {boolean p=false;}\n" +
@@ -154,7 +159,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("token 2\n", found);
 	}
 
-	public void testLexerPredsInCyclicDFA2() throws Exception {
+	@Test public void testLexerPredsInCyclicDFA2() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"@lexer::members {boolean p=false;}\n" +
@@ -166,7 +171,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("token 2\n", found);
 	}
 
-	public void testGatedPred() throws Exception {
+	@Test public void testGatedPred() throws Exception {
 		String grammar =
 			"grammar foo;" +
 			"a : (A|B)+ ;\n" +
@@ -178,7 +183,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("token 1\ntoken 1\n", found);
 	}
 
-	public void testGatedPred2() throws Exception {
+	@Test public void testGatedPred2() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"@lexer::members {boolean sig=false;}\n"+
@@ -191,7 +196,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("AC\n", found);
 	}
 
-	public void testPredWithActionTranslation() throws Exception {
+	@Test public void testPredWithActionTranslation() throws Exception {
 		String grammar =
 			"grammar foo;\n" +
 			"a : b[2] ;\n" +
@@ -204,7 +209,7 @@ public class TestSemanticPredicateEvaluation extends BaseTest {
 		assertEquals("alt 2\n", found);
 	}
 
-	public void testPredicatesOnEOTTarget() throws Exception {
+	@Test public void testPredicatesOnEOTTarget() throws Exception {
 		String grammar =
 			"grammar foo; \n" +
 			"@lexer::members {boolean p=true, q=false;}" +
diff --git a/src/org/antlr/test/TestSemanticPredicates.java b/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
similarity index 61%
rename from src/org/antlr/test/TestSemanticPredicates.java
rename to tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
index 4568808..7b61287 100644
--- a/src/org/antlr/test/TestSemanticPredicates.java
+++ b/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
@@ -33,7 +33,14 @@ import org.antlr.codegen.CodeGenerator;
 import org.antlr.misc.BitSet;
 import org.antlr.tool.*;
 
-import java.util.List;
+import java.util.*;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import antlr.Token;
 
 public class TestSemanticPredicates extends BaseTest {
 
@@ -41,17 +48,17 @@ public class TestSemanticPredicates extends BaseTest {
 	public TestSemanticPredicates() {
 	}
 
-	public void testPredsButSyntaxResolves() throws Exception {
+	@Test public void testPredsButSyntaxResolves() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : {p1}? A | {p2}? B ;");
 		String expecting =
 			".s0-A->:s1=>1\n" +
 			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testLL_1_Pred() throws Exception {
+	@Test public void testLL_1_Pred() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : {p1}? A | {p2}? A ;");
@@ -59,10 +66,22 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-A->.s1\n" +
 			".s1-{p1}?->:s2=>1\n" +
 			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testLL_2_Pred() throws Exception {
+	@Test public void testLL_1_Pred_forced_k_1() throws Exception {
+		// should stop just like before w/o k set.
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a options {k=1;} : {p1}? A | {p2}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLL_2_Pred() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : {p1}? A B | {p2}? A B ;");
@@ -71,10 +90,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s1-B->.s2\n" +
 			".s2-{p1}?->:s3=>1\n" +
 			".s2-{p2}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testPredicatedLoop() throws Exception {
+	@Test public void testPredicatedLoop() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : ( {p1}? A | {p2}? A )+;");
@@ -83,10 +102,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-EOF->:s1=>3\n" +
 			".s2-{p1}?->:s3=>1\n" +
 			".s2-{p2}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testPredicatedToStayInLoop() throws Exception {
+	@Test public void testPredicatedToStayInLoop() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : ( {p1}? A )+ (A)+;");
@@ -96,7 +115,7 @@ public class TestSemanticPredicates extends BaseTest {
 			".s1-{p1}?->:s3=>2\n";       // loop back
 	}
 
-	public void testAndPredicates() throws Exception {
+	@Test public void testAndPredicates() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : {p1}? {p1a}? A | {p2}? A ;");
@@ -104,32 +123,71 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-A->.s1\n" +
 			".s1-{(p1&&p1a)}?->:s2=>1\n" +
 			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testOrPredicates() throws Exception {
+	@Test
+    public void testOrPredicates() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : b | {p2}? A ;\n" +
 			"b : {p1}? A | {p1a}? A ;");
 		String expecting =
 			".s0-A->.s1\n" +
-			".s1-{(p1||p1a)}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+            ".s1-{(p1a||p1)}?->:s2=>1\n" +
+            ".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testIgnoresHoistingDepthGreaterThanZero() throws Exception {
+	@Test public void testIgnoresHoistingDepthGreaterThanZero() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : A {p1}? | A {p2}?;");
 		String expecting =
 			".s0-A->:s1=>1\n";
 		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "A", null, null, 2);
+					  new int[] {1,2}, "A", null, null, 2, false);
+	}
+
+	@Test public void testIgnoresPredsHiddenByActions() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {a1} {p1}? A | {a2} {p2}? A ;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A", null, null, 2, true);
 	}
 
-	public void testHoist2() throws Exception {
+	@Test public void testIgnoresPredsHiddenByActionsOneAlt() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {a2} {p2}? A ;"); // ok since 1 pred visible
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null,
+					  null, null, null, null, 0, true);
+	}
+
+	/*
+	@Test public void testIncompleteSemanticHoistedContextk2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | A B;\n" +
+			"b : {p1}? A B | A B ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A B", new int[] {1}, null, 3);
+	}	
+	 */
+
+	@Test public void testHoist2() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : b | c ;\n" +
@@ -139,10 +197,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-A->.s1\n" +
 			".s1-{p1}?->:s2=>1\n" +
 			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testHoistCorrectContext() throws Exception {
+	@Test public void testHoistCorrectContext() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : b | {p2}? ID ;\n" +
@@ -152,10 +210,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-INT->:s2=>1\n" +
 			".s1-{p1}?->:s2=>1\n" +
 			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testDefaultPredNakedAltIsLast() throws Exception {
+	@Test public void testDefaultPredNakedAltIsLast() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : b | ID ;\n" +
@@ -165,10 +223,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-INT->:s2=>1\n" +
 			".s1-{p1}?->:s2=>1\n" +
 			".s1-{true}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testDefaultPredNakedAltNotLast() throws Exception {
+	@Test public void testDefaultPredNakedAltNotLast() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : ID | b ;\n" +
@@ -178,10 +236,12 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-INT->:s3=>2\n" +
 			".s1-{!(p1)}?->:s2=>1\n" +
 			".s1-{p1}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testLeftRecursivePred() throws Exception {
+	@Test public void testLeftRecursivePred() throws Exception {
+		// No analysis possible. but probably good to fail.  Not sure we really want
+		// left-recursion even if guarded with pred.
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"s : a ;\n" +
@@ -197,22 +257,25 @@ public class TestSemanticPredicates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
 		g.setCodeGenerator(generator);
 		if ( g.getNumberOfDecisions()==0 ) {
-			g.createNFAs();
-			g.createLookaheadDFAs();
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
 		}
 
 		DFA dfa = g.getLookaheadDFA(1);
-		FASerializer serializer = new FASerializer(g);
+		assertEquals(null, dfa); // can't analyze.
+
+		/*
 		String result = serializer.serialize(dfa.startState);
 		assertEquals(expecting, result);
+		*/
 
 		assertEquals("unexpected number of expected problems", 1, equeue.size());
 		Message msg = (Message)equeue.warnings.get(0);
-		assertTrue("warning must be a recursion overflow msg",
-				    msg instanceof RecursionOverflowMessage);
+		assertTrue("warning must be a left recursion msg",
+				    msg instanceof LeftRecursionCyclesMessage);
 	}
 
-	public void testIgnorePredFromLL2AltLastAltIsDefaultTrue() throws Exception {
+	@Test public void testIgnorePredFromLL2AltLastAltIsDefaultTrue() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : {p1}? A B | A C | {p2}? A | {p3}? A | A ;\n");
@@ -230,10 +293,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s1-{p2}?->:s4=>3\n" +
 			".s1-{p3}?->:s5=>4\n" +
 			".s1-{true}?->:s6=>5\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testIgnorePredFromLL2AltPredUnionNeeded() throws Exception {
+	@Test public void testIgnorePredFromLL2AltPredUnionNeeded() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : {p1}? A B | A C | {p2}? A | A | {p3}? A ;\n");
@@ -251,10 +314,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s1-{!((p3||p2))}?->:s5=>4\n" +
 			".s1-{p2}?->:s4=>3\n" +
 			".s1-{p3}?->:s6=>5\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testPredGets2SymbolSyntacticContext() throws Exception {
+	@Test public void testPredGets2SymbolSyntacticContext() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : b | A B | C ;\n" +
@@ -265,10 +328,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s1-B->.s2\n" +
 			".s2-{p1}?->:s3=>1\n" +
 			".s2-{true}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testMatchesLongestThenTestPred() throws Exception {
+	@Test public void testMatchesLongestThenTestPred() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar P;\n"+
 			"a : b | c ;\n" +
@@ -279,41 +342,73 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-B->:s3=>2\n" +
 			".s1-{p}?->:s2=>1\n" +
 			".s1-{q}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testPredsUsedAfterRecursionOverflow() throws Exception {
+	@Test public void testPredsUsedAfterRecursionOverflow() throws Exception {
+		// analysis must bail out due to non-LL(*) nature (ovf)
+		// retries with k=1 (but with LL(*) algorithm not optimized version
+		// as it has preds)
 		Grammar g = new Grammar(
-			"grammar P;\n"+
+			"parser grammar P;\n"+
 			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
 			"e : '(' e ')' | INT ;\n");
 		String expecting =
 			".s0-'('->.s1\n" +
-			".s0-INT->.s7\n" +
+			".s0-INT->.s4\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n" +
+			".s4-{p1}?->:s2=>1\n" +
+			".s4-{p2}?->:s3=>2\n";
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+
+		assertEquals("unexpected number of expected problems", 0, equeue.size());
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testPredsUsedAfterK2FailsNoRecursionOverflow() throws Exception {
+		// analysis must bail out due to non-LL(*) nature (ovf)
+		// retries with k=1 (but with LL(*) algorithm not optimized version
+		// as it has preds)
+		Grammar g = new Grammar(
+			"grammar P;\n" +
+			"options {k=2;}\n"+
+			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
+			"e : '(' e ')' | INT ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-INT->.s6\n" +
 			".s1-'('->.s2\n" +
 			".s1-INT->.s5\n" +
 			".s2-{p1}?->:s3=>1\n" +
 			".s2-{p2}?->:s4=>2\n" +
-			".s5-')'->.s6\n" +
+			".s5-{p1}?->:s3=>1\n" +
+			".s5-{p2}?->:s4=>2\n" +
 			".s6-'.'->:s3=>1\n" +
-			".s6-':'->:s4=>2\n" +
-			".s7-'.'->:s3=>1\n" +
-			".s7-':'->:s4=>2\n";
+			".s6-':'->:s4=>2\n";
 		DecisionProbe.verbose=true; // make sure we get all error info
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
 		g.setCodeGenerator(generator);
 		if ( g.getNumberOfDecisions()==0 ) {
-			g.createNFAs();
-			g.createLookaheadDFAs();
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
 		}
 
 		assertEquals("unexpected number of expected problems", 0, equeue.size());
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testLexerMatchesLongestThenTestPred() throws Exception {
+	@Test public void testLexerMatchesLongestThenTestPred() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"B : {p}? 'a' ;\n" +
@@ -325,26 +420,40 @@ public class TestSemanticPredicates extends BaseTest {
 			".s1-<EOT>->.s2\n" +
 			".s2-{p}?->:s3=>1\n" +
 			".s2-{q}?->:s4=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLexerMatchesLongestMinusPred() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"B : 'a' ;\n" +
+			"C : ('a'|'b')+ ;");
+		String expecting =
+			".s0-'a'->.s1\n" +
+			".s0-'b'->:s3=>2\n" +
+			".s1-'a'..'b'->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testGatedPred() throws Exception {
+    @Test
+    public void testGatedPred() throws Exception {
 		// gated preds are present on all arcs in predictor
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"B : {p}? => 'a' ;\n" +
 			"C : {q}? => ('a'|'b')+ ;");
 		String expecting =
-			".s0-'a'&&{(p||q)}?->.s1\n" +
-			".s0-'b'&&{q}?->:s4=>2\n" +
-			".s1-'a'..'b'&&{q}?->:s4=>2\n" +
-			".s1-<EOT>&&{(p||q)}?->.s2\n" +
-			".s2-{p}?->:s3=>1\n" +
-			".s2-{q}?->:s4=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, null, 0);
+			".s0-'a'&&{(q||p)}?->.s1\n" +
+            ".s0-'b'&&{q}?->:s4=>2\n" +
+            ".s1-'a'..'b'&&{q}?->:s4=>2\n" +
+            ".s1-<EOT>&&{(q||p)}?->.s2\n" +
+            ".s2-{p}?->:s3=>1\n" +
+            ".s2-{q}?->:s4=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testGatedPredHoistsAndCanBeInStopState() throws Exception {
+	@Test public void testGatedPredHoistsAndCanBeInStopState() throws Exception {
 		// I found a bug where merging stop states made us throw away
 		// a stop state with a gated pred!
 		Grammar g = new Grammar(
@@ -355,27 +464,28 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-'x'->:s2=>1\n" +
 			".s0-'y'&&{p}?->:s3=>1\n" +
 			".s0-EOF->:s1=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testGatedPredInCyclicDFA() throws Exception {
+	@Test
+    public void testGatedPredInCyclicDFA() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : {p}?=> ('a')+ 'x' ;\n" +
 			"B : {q}?=> ('a'|'b')+ 'x' ;");
 		String expecting =
-			".s0-'a'&&{(p||q)}?->.s1\n" +
-			".s0-'b'&&{q}?->:s5=>2\n" +
-			".s1-'a'&&{(p||q)}?->.s1\n" +
-			".s1-'b'&&{q}?->:s5=>2\n" +
-			".s1-'x'&&{(p||q)}?->.s2\n" +
-			".s2-<EOT>&&{(p||q)}?->.s3\n" +
-			".s3-{p}?->:s4=>1\n" +
-			".s3-{q}?->:s5=>2\n";
-		checkDecision(g, 3, expecting, null, null, null, null, null, 0);
+			".s0-'a'&&{(q||p)}?->.s1\n" +
+            ".s0-'b'&&{q}?->:s5=>2\n" +
+            ".s1-'a'&&{(q||p)}?->.s1\n" +
+            ".s1-'b'&&{q}?->:s5=>2\n" +
+            ".s1-'x'&&{(q||p)}?->.s2\n" +
+            ".s2-<EOT>&&{(q||p)}?->.s3\n" +
+            ".s3-{p}?->:s4=>1\n" +
+            ".s3-{q}?->:s5=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testGatedPredNotActuallyUsedOnEdges() throws Exception {
+	@Test public void testGatedPredNotActuallyUsedOnEdges() throws Exception {
 		Grammar g = new Grammar(
 			"lexer grammar P;\n"+
 			"A : ('a' | {p}?=> 'a')\n" +
@@ -394,11 +504,11 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-'a'->.s1\n" +
 			".s1-'b'->:s2=>2\n" +
 			".s1-<EOT>->:s3=>1\n";
-		checkDecision(g, 1, expecting1, null, null, null, null, null, 0);
-		checkDecision(g, 2, expecting2, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting1, null, null, null, null, null, 0, false);
+		checkDecision(g, 2, expecting2, null, null, null, null, null, 0, false);
 	}
 
-	public void testGatedPredDoesNotForceAllToBeGated() throws Exception {
+	@Test public void testGatedPredDoesNotForceAllToBeGated() throws Exception {
 		Grammar g = new Grammar(
 			"grammar w;\n" +
 			"a : b | c ;\n" +
@@ -408,10 +518,10 @@ public class TestSemanticPredicates extends BaseTest {
 		String expecting =
 			".s0-B->:s1=>1\n" +
 			".s0-C&&{q}?->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testGatedPredDoesNotForceAllToBeGated2() throws Exception {
+	@Test public void testGatedPredDoesNotForceAllToBeGated2() throws Exception {
 		Grammar g = new Grammar(
 			"grammar w;\n" +
 			"a : b | c ;\n" +
@@ -425,10 +535,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-C&&{(q&&r)}?->:s3=>2\n" +
 			".s1-{p}?->:s2=>1\n" +
 			".s1-{q}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
-	public void testORGatedPred() throws Exception {
+	@Test public void testORGatedPred() throws Exception {
 		Grammar g = new Grammar(
 			"grammar w;\n" +
 			"a : b | c ;\n" +
@@ -442,13 +552,13 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-C&&{(q&&r)}?->:s3=>2\n" +
 			".s1-{(q&&s)}?->:s3=>2\n" +
 			".s1-{p}?->:s2=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0);
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
 	/** The following grammar should yield an error that rule 'a' has
 	 *  insufficient semantic info pulled from 'b'.
 	 */
-	public void testIncompleteSemanticHoistedContext() throws Exception {
+	@Test public void testIncompleteSemanticHoistedContext() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -458,7 +568,65 @@ public class TestSemanticPredicates extends BaseTest {
 		String expecting =
 			".s0-B->:s1=>1\n";
 		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "B", new int[] {1}, null, 3);
+					  new int[] {1,2}, "B", new int[] {1}, null, 3, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextk2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | A B;\n" +
+			"b : {p1}? A B | A B ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A B", new int[] {1}, null, 3, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextInFOLLOW() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"options {k=1;}\n" + // limit to k=1 because it's LL(2); force pred hoist
+			"a : A? ;\n" + // need FOLLOW
+			"b : X a {p1}? A | Y a A ;"); // only one A is covered
+		String expecting =
+			".s0-A->:s1=>1\n"; // s0-EOF->s2 branch pruned during optimization
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A", new int[] {2}, null, 3, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextInFOLLOWk2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A B)? ;\n" + // need FOLLOW
+			"b : X a {p1}? A B | Y a A B | Z a ;"); // only first alt is covered
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-EOF->:s3=>2\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, null,
+					  new int[] {1,2}, "A B", new int[] {2}, null, 2, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextInFOLLOWDueToHiddenPred() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A B)? ;\n" + // need FOLLOW
+			"b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ;"); // only first alt is covered
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-EOF->:s3=>2\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, null,
+					  new int[] {1,2}, "A B", new int[] {2}, null, 2, true);
 	}
 
 	/** The following grammar should yield an error that rule 'a' has
@@ -471,7 +639,7 @@ public class TestSemanticPredicates extends BaseTest {
 	 *  conversion to include an edge for D.  Alt 1 is the only possible
 	 *  prediction because we resolve the ambiguity by choosing alt 1.
 	 */
-	public void testIncompleteSemanticHoistedContext2() throws Exception {
+	@Test public void testIncompleteSemanticHoistedContext2() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -482,10 +650,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-B->:s1=>1\n";
 		checkDecision(g, 1, expecting, new int[] {2},
 					  new int[] {1,2}, "B", new int[] {1},
-					  null, 3);
+					  null, 3, false);
 	}
 
-	public void testTooFewSemanticPredicates() throws Exception {
+	@Test public void testTooFewSemanticPredicates() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar t;\n"+
 			"a : {p1}? A | A | A ;");
@@ -493,10 +661,10 @@ public class TestSemanticPredicates extends BaseTest {
 			".s0-A->:s1=>1\n";
 		checkDecision(g, 1, expecting, new int[] {2,3},
 					  new int[] {1,2,3}, "A",
-					  null, null, 2);
+					  null, null, 2, false);
 	}
 
-	public void testPredWithK1() throws Exception {
+	@Test public void testPredWithK1() throws Exception {
 		Grammar g = new Grammar(
 			"\tlexer grammar TLexer;\n" +
 			"A\n" +
@@ -518,10 +686,10 @@ public class TestSemanticPredicates extends BaseTest {
 		int numWarnings = 0;
 		checkDecision(g, 3, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings);
+					  danglingAlts, numWarnings, false);
 	}
 
-	public void testPredWithArbitraryLookahead() throws Exception {
+	@Test public void testPredWithArbitraryLookahead() throws Exception {
 		Grammar g = new Grammar(
 			"\tlexer grammar TLexer;\n" +
 			"A : {p1}? ('x')+ '.'\n" +
@@ -541,16 +709,16 @@ public class TestSemanticPredicates extends BaseTest {
 		int numWarnings = 0;
 		checkDecision(g, 3, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings);
+					  danglingAlts, numWarnings, false);
 	}
 
-
-	/** For a DFA state with lots of configurations that have the same
+	@Test
+    /** For a DFA state with lots of configurations that have the same
 	 *  predicate, don't just OR them all together as it's a waste to
 	 *  test a||a||b||a||a etc...  ANTLR makes a unique set and THEN
 	 *  OR's them together.
 	 */
-	public void testUniquePredicateOR() throws Exception {
+    public void testUniquePredicateOR() throws Exception {
 		Grammar g = new Grammar(
 			"parser grammar v;\n" +
 			"\n" +
@@ -565,8 +733,8 @@ public class TestSemanticPredicates extends BaseTest {
 			"  ;\n");
 		String expecting =
 			".s0-X->.s1\n" +
-			".s1-{((b&&c)||(a&&c))}?->:s2=>1\n" +
-			".s1-{c}?->:s3=>2\n";
+            ".s1-{((a&&c)||(b&&c))}?->:s2=>1\n" +
+            ".s1-{c}?->:s3=>2\n";
 		int[] unreachableAlts = null;
 		int[] nonDetAlts = null;
 		String ambigInput = null;
@@ -575,7 +743,26 @@ public class TestSemanticPredicates extends BaseTest {
 		int numWarnings = 0;
 		checkDecision(g, 3, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings);
+					  danglingAlts, numWarnings, false);
+	}
+
+    @Test
+    public void testSemanticContextPreventsEarlyTerminationOfClosure() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar T;\n" +
+			"a : loop SEMI | ID SEMI\n" +
+			"  ;\n" +
+			"loop\n" +
+			"    : {while}? ID\n" +
+			"    | {do}? ID\n" +
+			"    | {for}? ID\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+            ".s1-SEMI->.s2\n" +
+            ".s2-{(for||do||while)}?->:s3=>1\n" +
+            ".s2-{true}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
 	}
 
 	// S U P P O R T
@@ -594,7 +781,7 @@ public class TestSemanticPredicates extends BaseTest {
 		int numWarnings = 1;
 		checkDecision(g, 1, expecting, unreachableAlts,
 					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings);
+					  danglingAlts, numWarnings, false);
 	}
 
 	protected void checkDecision(Grammar g,
@@ -605,7 +792,8 @@ public class TestSemanticPredicates extends BaseTest {
 								 String expectingAmbigInput,
 								 int[] expectingInsufficientPredAlts,
 								 int[] expectingDanglingAlts,
-								 int expectingNumWarnings)
+								 int expectingNumWarnings,
+								 boolean hasPredHiddenByAction)
 		throws Exception
 	{
 		DecisionProbe.verbose=true; // make sure we get all error info
@@ -615,8 +803,8 @@ public class TestSemanticPredicates extends BaseTest {
 		g.setCodeGenerator(generator);
 		// mimic actions of org.antlr.Tool first time for grammar g
 		if ( g.getNumberOfDecisions()==0 ) {
-			g.createNFAs();
-			g.createLookaheadDFAs();
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
 		}
 
 		if ( equeue.size()!=expectingNumWarnings ) {
@@ -641,13 +829,15 @@ public class TestSemanticPredicates extends BaseTest {
 			assertEquals("unreachable alts mismatch", s, s2);
 		}
 		else {
-			assertEquals("unreachable alts mismatch", 0, unreachableAlts.size());
+			assertEquals("unreachable alts mismatch", 0,
+						 unreachableAlts!=null?unreachableAlts.size():0);
 		}
 
 		// check conflicting input
 		if ( expectingAmbigInput!=null ) {
 			// first, find nondet message
-			Message msg = (Message)equeue.warnings.get(0);
+			Message msg = getNonDeterminismMessage(equeue.warnings);
+			assertNotNull("no nondeterminism warning?", msg);
 			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
 			msg instanceof GrammarNonDeterminismMessage);
 			GrammarNonDeterminismMessage nondetMsg =
@@ -672,6 +862,8 @@ public class TestSemanticPredicates extends BaseTest {
 			BitSet s2 = new BitSet();
 			s2.addAll(nonDetAlts);
 			assertEquals("nondet alts mismatch", s, s2);
+			assertEquals("mismatch between expected hasPredHiddenByAction", hasPredHiddenByAction,
+						 nondetMsg.problemState.dfa.hasPredicateBlockedByAction);
 		}
 		else {
 			// not expecting any nondet alts, make sure there are none
@@ -680,6 +872,31 @@ public class TestSemanticPredicates extends BaseTest {
 			assertNull("found nondet alts, but expecting none", nondetMsg);
 		}
 
+		if ( expectingInsufficientPredAlts!=null ) {
+			GrammarInsufficientPredicatesMessage insuffPredMsg =
+				getGrammarInsufficientPredicatesMessage(equeue.warnings);
+			assertNotNull("found no GrammarInsufficientPredicatesMessage alts; expecting: "+
+										str(expectingNonDetAlts), insuffPredMsg);
+			Map<Integer, Set<Token>> locations = insuffPredMsg.altToLocations;
+			Set actualAlts = locations.keySet();
+			BitSet s = new BitSet();
+			s.addAll(expectingInsufficientPredAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(actualAlts);
+			assertEquals("mismatch between insufficiently covered alts", s, s2);
+			assertEquals("mismatch between expected hasPredHiddenByAction", hasPredHiddenByAction,
+						 insuffPredMsg.problemState.dfa.hasPredicateBlockedByAction);
+		}
+		else {
+			// not expecting any nondet alts, make sure there are none
+			GrammarInsufficientPredicatesMessage nondetMsg =
+				getGrammarInsufficientPredicatesMessage(equeue.warnings);
+			if ( nondetMsg!=null ) {
+				System.out.println(equeue.warnings);
+			}
+			assertNull("found insufficiently covered alts, but expecting none", nondetMsg);
+		}
+
 		assertEquals(expecting, result);
 	}
 
@@ -693,6 +910,16 @@ public class TestSemanticPredicates extends BaseTest {
 		return null;
 	}
 
+	protected GrammarInsufficientPredicatesMessage getGrammarInsufficientPredicatesMessage(List warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = (Message) warnings.get(i);
+			if ( m instanceof GrammarInsufficientPredicatesMessage ) {
+				return (GrammarInsufficientPredicatesMessage)m;
+			}
+		}
+		return null;
+	}
+
 	protected String str(int[] elements) {
 		StringBuffer buf = new StringBuffer();
 		for (int i = 0; i < elements.length; i++) {
diff --git a/src/org/antlr/test/TestSets.java b/tool/src/test/java/org/antlr/test/TestSets.java
similarity index 68%
rename from src/org/antlr/test/TestSets.java
rename to tool/src/test/java/org/antlr/test/TestSets.java
index 614fcc0..2399470 100644
--- a/src/org/antlr/test/TestSets.java
+++ b/tool/src/test/java/org/antlr/test/TestSets.java
@@ -1,5 +1,37 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2008 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 package org.antlr.test;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 /** Test the set stuff in lexer and parser */
 public class TestSets extends BaseTest {
 	protected boolean debug = false;
@@ -8,7 +40,7 @@ public class TestSets extends BaseTest {
 	public TestSets() {
 	}
 
-	public void testSeqDoesNotBecomeSet() throws Exception {
+	@Test public void testSeqDoesNotBecomeSet() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -22,7 +54,7 @@ public class TestSets extends BaseTest {
 		assertEquals("34\n", found);
 	}
 
-	public void testParserSet() throws Exception {
+	@Test public void testParserSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : t=('x'|'y') {System.out.println($t.text);} ;\n";
@@ -31,7 +63,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testParserNotSet() throws Exception {
+	@Test public void testParserNotSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n";
@@ -40,7 +72,7 @@ public class TestSets extends BaseTest {
 		assertEquals("z\n", found);
 	}
 
-	public void testParserNotToken() throws Exception {
+	@Test public void testParserNotToken() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : ~'x' 'z' {System.out.println(input);} ;\n";
@@ -49,7 +81,7 @@ public class TestSets extends BaseTest {
 		assertEquals("zz\n", found);
 	}
 
-	public void testParserNotTokenWithLabel() throws Exception {
+	@Test public void testParserNotTokenWithLabel() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : t=~'x' 'z' {System.out.println($t.text);} ;\n";
@@ -58,7 +90,7 @@ public class TestSets extends BaseTest {
 		assertEquals("z\n", found);
 	}
 
-	public void testRuleAsSet() throws Exception {
+	@Test public void testRuleAsSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a @after {System.out.println(input);} : 'a' | 'b' |'c' ;\n";
@@ -67,7 +99,7 @@ public class TestSets extends BaseTest {
 		assertEquals("b\n", found);
 	}
 
-	public void testRuleAsSetAST() throws Exception {
+	@Test public void testRuleAsSetAST() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -77,7 +109,7 @@ public class TestSets extends BaseTest {
 		assertEquals("b\n", found);
 	}
 
-	public void testNotChar() throws Exception {
+	@Test public void testNotChar() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println($A.text);} ;\n" +
@@ -87,7 +119,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testOptionalSingleElement() throws Exception {
+	@Test public void testOptionalSingleElement() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A? 'c' {System.out.println(input);} ;\n" +
@@ -97,7 +129,7 @@ public class TestSets extends BaseTest {
 		assertEquals("bc\n", found);
 	}
 
-	public void testOptionalLexerSingleElement() throws Exception {
+	@Test public void testOptionalLexerSingleElement() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println(input);} ;\n" +
@@ -107,7 +139,7 @@ public class TestSets extends BaseTest {
 		assertEquals("bc\n", found);
 	}
 
-	public void testStarLexerSingleElement() throws Exception {
+	@Test public void testStarLexerSingleElement() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println(input);} ;\n" +
@@ -120,7 +152,7 @@ public class TestSets extends BaseTest {
 		assertEquals("c\n", found);
 	}
 
-	public void testPlusLexerSingleElement() throws Exception {
+	@Test public void testPlusLexerSingleElement() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println(input);} ;\n" +
@@ -130,7 +162,7 @@ public class TestSets extends BaseTest {
 		assertEquals("bbbbc\n", found);
 	}
 
-	public void testOptionalSet() throws Exception {
+	@Test public void testOptionalSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : ('a'|'b')? 'c' {System.out.println(input);} ;\n";
@@ -139,7 +171,7 @@ public class TestSets extends BaseTest {
 		assertEquals("ac\n", found);
 	}
 
-	public void testStarSet() throws Exception {
+	@Test public void testStarSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : ('a'|'b')* 'c' {System.out.println(input);} ;\n";
@@ -148,7 +180,7 @@ public class TestSets extends BaseTest {
 		assertEquals("abaac\n", found);
 	}
 
-	public void testPlusSet() throws Exception {
+	@Test public void testPlusSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : ('a'|'b')+ 'c' {System.out.println(input);} ;\n";
@@ -157,7 +189,7 @@ public class TestSets extends BaseTest {
 		assertEquals("abaac\n", found);
 	}
 
-	public void testLexerOptionalSet() throws Exception {
+	@Test public void testLexerOptionalSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println(input);} ;\n" +
@@ -167,7 +199,7 @@ public class TestSets extends BaseTest {
 		assertEquals("ac\n", found);
 	}
 
-	public void testLexerStarSet() throws Exception {
+	@Test public void testLexerStarSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println(input);} ;\n" +
@@ -177,7 +209,7 @@ public class TestSets extends BaseTest {
 		assertEquals("abaac\n", found);
 	}
 
-	public void testLexerPlusSet() throws Exception {
+	@Test public void testLexerPlusSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println(input);} ;\n" +
@@ -187,7 +219,7 @@ public class TestSets extends BaseTest {
 		assertEquals("abaac\n", found);
 	}
 
-	public void testNotCharSet() throws Exception {
+	@Test public void testNotCharSet() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println($A.text);} ;\n" +
@@ -197,7 +229,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testNotCharSetWithLabel() throws Exception {
+	@Test public void testNotCharSetWithLabel() throws Exception {
 		// This doesn't work in lexer yet.
 		// Generates: h=input.LA(1); but h is defined as a Token
 		String grammar =
@@ -209,7 +241,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testNotCharSetWithRuleRef() throws Exception {
+	@Test public void testNotCharSetWithRuleRef() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println($A.text);} ;\n" +
@@ -220,7 +252,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testNotCharSetWithRuleRef2() throws Exception {
+	@Test public void testNotCharSetWithRuleRef2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println($A.text);} ;\n" +
@@ -231,7 +263,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testNotCharSetWithRuleRef3() throws Exception {
+	@Test public void testNotCharSetWithRuleRef3() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println($A.text);} ;\n" +
@@ -243,7 +275,7 @@ public class TestSets extends BaseTest {
 		assertEquals("x\n", found);
 	}
 
-	public void testNotCharSetWithRuleRef4() throws Exception {
+	@Test public void testNotCharSetWithRuleRef4() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"a : A {System.out.println($A.text);} ;\n" +
diff --git a/src/org/antlr/test/TestSymbolDefinitions.java b/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
similarity index 74%
rename from src/org/antlr/test/TestSymbolDefinitions.java
rename to tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
index 6160972..57a1634 100644
--- a/src/org/antlr/test/TestSymbolDefinitions.java
+++ b/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
@@ -33,16 +33,20 @@ import org.antlr.codegen.CodeGenerator;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.tool.*;
 
-import java.io.StringReader;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 import java.util.*;
 
 public class TestSymbolDefinitions extends BaseTest {
 
-    /** Public default constructor used by TestRig */
-    public TestSymbolDefinitions() {
-    }
+	/** Public default constructor used by TestRig */
+	public TestSymbolDefinitions() {
+	}
 
-	public void testParserSimpleTokens() throws Exception {
+	@Test public void testParserSimpleTokens() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar t;\n"+
 				"a : A | B;\n" +
@@ -52,7 +56,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testParserTokensSection() throws Exception {
+	@Test public void testParserTokensSection() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar t;\n" +
 				"tokens {\n" +
@@ -66,7 +70,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testLexerTokensSection() throws Exception {
+	@Test public void testLexerTokensSection() throws Exception {
 		Grammar g = new Grammar(
 				"lexer grammar t;\n" +
 				"tokens {\n" +
@@ -80,7 +84,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testTokensSectionWithAssignmentSection() throws Exception {
+	@Test public void testTokensSectionWithAssignmentSection() throws Exception {
 		Grammar g = new Grammar(
 				"grammar t;\n" +
 				"tokens {\n" +
@@ -94,7 +98,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testCombinedGrammarLiterals() throws Exception {
+	@Test public void testCombinedGrammarLiterals() throws Exception {
 		Grammar g = new Grammar(
 				"grammar t;\n"+
 				"a : 'begin' b 'end';\n" +
@@ -107,7 +111,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testLiteralInParserAndLexer() throws Exception {
+	@Test public void testLiteralInParserAndLexer() throws Exception {
 		// 'x' is token and char in lexer rule
 		Grammar g = new Grammar(
 				"grammar t;\n" +
@@ -120,14 +124,14 @@ public class TestSymbolDefinitions extends BaseTest {
 		String implicitLexer =
 			"lexer grammar t;\n" +
 			"\n" +
-			"T5 : 'x' ;\n" +
+			"T__5 : 'x' ;\n" +
 			"\n" +
 			"// $ANTLR src \"<string>\" 3\n" +
 			"E: 'x' '0' ;\n";
 		assertEquals(implicitLexer, g.getLexerGrammar());
 	}
 
-	public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception {
+	@Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception {
 		Grammar g = new Grammar(
 				"grammar t;\n"+
 				"a : 'a' ;\n" +
@@ -137,7 +141,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testSetDoesNotMissTokenAliases() throws Exception {
+	@Test public void testSetDoesNotMissTokenAliases() throws Exception {
 		Grammar g = new Grammar(
 				"grammar t;\n"+
 				"a : 'a'|'b' ;\n" +
@@ -148,7 +152,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkSymbols(g, rules, tokenNames);
 	}
 
-	public void testSimplePlusEqualLabel() throws Exception {
+	@Test public void testSimplePlusEqualLabel() throws Exception {
 		Grammar g = new Grammar(
 				"parser grammar t;\n"+
 				"a : ids+=ID ( COMMA ids+=ID )* ;\n");
@@ -158,7 +162,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
 	}
 
-	public void testMixedPlusEqualLabel() throws Exception {
+	@Test public void testMixedPlusEqualLabel() throws Exception {
 		Grammar g = new Grammar(
 				"grammar t;\n"+
 				"options {output=AST;}\n" +
@@ -173,7 +177,7 @@ public class TestSymbolDefinitions extends BaseTest {
 
 	// T E S T  L I T E R A L  E S C A P E S
 
-	public void testParserCharLiteralWithEscape() throws Exception {
+	@Test public void testParserCharLiteralWithEscape() throws Exception {
 		Grammar g = new Grammar(
 				"grammar t;\n"+
 				"a : '\\n';\n");
@@ -182,7 +186,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		assertEquals("'\\n'", literals.toArray()[0]);
 	}
 
-	public void testTokenInTokensSectionAndTokenRuleDef() throws Exception {
+	@Test public void testTokenInTokensSectionAndTokenRuleDef() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -197,7 +201,7 @@ public class TestSymbolDefinitions extends BaseTest {
 		assertEquals("a}\n", found);
 	}
 
-	public void testTokenInTokensSectionAndTokenRuleDef2() throws Exception {
+	@Test public void testTokenInTokensSectionAndTokenRuleDef2() throws Exception {
 		// this must return A not I to the parser; calling a nonfragment rule
 		// from a nonfragment rule does not set the overall token.
 		String grammar =
@@ -213,7 +217,7 @@ public class TestSymbolDefinitions extends BaseTest {
 	}
 
 
-	public void testRefToRuleWithNoReturnValue() throws Exception {
+	@Test public void testRefToRuleWithNoReturnValue() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 
@@ -236,7 +240,7 @@ public class TestSymbolDefinitions extends BaseTest {
 
 	// T E S T  E R R O R S
 
-	public void testParserStringLiterals() throws Exception {
+	@Test public void testParserStringLiterals() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -247,10 +251,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testParserCharLiterals() throws Exception {
+	@Test public void testParserCharLiterals() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
@@ -261,52 +265,52 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testEmptyNotChar() throws Exception {
+	@Test public void testEmptyNotChar() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 				"grammar foo;\n" +
 				"a : (~'x')+ ;\n");
-		g.createNFAs();
+		g.buildNFA();
 		Object expectedArg = "'x'";
 		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testEmptyNotToken() throws Exception {
+	@Test public void testEmptyNotToken() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 				"grammar foo;\n" +
 				"a : (~A)+ ;\n");
-		g.createNFAs();
+		g.buildNFA();
 		Object expectedArg = "A";
 		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testEmptyNotSet() throws Exception {
+	@Test public void testEmptyNotSet() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
 		Grammar g = new Grammar(
 				"grammar foo;\n" +
 				"a : (~(A|B))+ ;\n");
-		g.createNFAs();
+		g.buildNFA();
 		Object expectedArg = null;
 		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testStringLiteralInParserTokensSection() throws Exception {
+	@Test public void testStringLiteralInParserTokensSection() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -320,10 +324,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testCharLiteralInParserTokensSection() throws Exception {
+	@Test public void testCharLiteralInParserTokensSection() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -337,10 +341,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testCharLiteralInLexerTokensSection() throws Exception {
+	@Test public void testCharLiteralInLexerTokensSection() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -353,10 +357,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testRuleRedefinition() throws Exception {
+	@Test public void testRuleRedefinition() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -368,10 +372,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLexerRuleRedefinition() throws Exception {
+	@Test public void testLexerRuleRedefinition() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -383,10 +387,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testCombinedRuleRedefinition() throws Exception {
+	@Test public void testCombinedRuleRedefinition() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -399,10 +403,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testUndefinedToken() throws Exception {
+	@Test public void testUndefinedToken() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -413,19 +417,19 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_NO_TOKEN_DEFINITION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkWarning(equeue, expectedMessage);
+		checkGrammarSemanticsWarning(equeue, expectedMessage);
 	}
 
-	public void testUndefinedTokenOkInParser() throws Exception {
+	@Test public void testUndefinedTokenOkInParser() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
 				"parser grammar t;\n"+
 				"x : ID ;");
-        assertEquals("should not be an error", 0, equeue.errors.size());
+		assertEquals("should not be an error", 0, equeue.errors.size());
 	}
 
-	public void testUndefinedRule() throws Exception {
+	@Test public void testUndefinedRule() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -436,10 +440,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLexerRuleInParser() throws Exception {
+	@Test public void testLexerRuleInParser() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -450,10 +454,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testParserRuleInLexer() throws Exception {
+	@Test public void testParserRuleInLexer() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -464,10 +468,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testRuleScopeConflict() throws Exception {
+	@Test public void testRuleScopeConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -482,10 +486,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testTokenRuleScopeConflict() throws Exception {
+	@Test public void testTokenRuleScopeConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -500,10 +504,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testTokenScopeConflict() throws Exception {
+	@Test public void testTokenScopeConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -519,10 +523,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testTokenRuleScopeConflictInLexerGrammar() throws Exception {
+	@Test public void testTokenRuleScopeConflictInLexerGrammar() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -537,10 +541,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testTokenLabelScopeConflict() throws Exception {
+	@Test public void testTokenLabelScopeConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -555,10 +559,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testRuleLabelScopeConflict() throws Exception {
+	@Test public void testRuleLabelScopeConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -574,10 +578,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLabelAndRuleNameConflict() throws Exception {
+	@Test public void testLabelAndRuleNameConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -591,10 +595,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLabelAndTokenNameConflict() throws Exception {
+	@Test public void testLabelAndTokenNameConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -608,10 +612,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLabelAndArgConflict() throws Exception {
+	@Test public void testLabelAndArgConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -623,10 +627,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLabelAndParameterConflict() throws Exception {
+	@Test public void testLabelAndParameterConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -638,10 +642,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testLabelRuleScopeConflict() throws Exception {
+	@Test public void testLabelRuleScopeConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -658,10 +662,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testRuleScopeArgConflict() throws Exception {
+	@Test public void testRuleScopeArgConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -678,10 +682,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testRuleScopeReturnValueConflict() throws Exception {
+	@Test public void testRuleScopeReturnValueConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -698,10 +702,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testRuleScopeRuleNameConflict() throws Exception {
+	@Test public void testRuleScopeRuleNameConflict() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -718,28 +722,26 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testBadGrammarOption() throws Exception {
+	@Test public void testBadGrammarOption() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Tool antlr = newTool();
 		Grammar g = new Grammar(antlr,
-								"t",
-								new StringReader(
-									"grammar t;\n"+
-									"options {foo=3; language=Java;}\n" +
-									"a : 'a';\n"));
+								"grammar t;\n"+
+								"options {foo=3; language=Java;}\n" +
+								"a : 'a';\n");
 
 		Object expectedArg = "foo";
 		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testBadRuleOption() throws Exception {
+	@Test public void testBadRuleOption() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -752,10 +754,10 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	public void testBadSubRuleOption() throws Exception {
+	@Test public void testBadSubRuleOption() throws Exception {
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue); // unique listener per thread
 		Grammar g = new Grammar(
@@ -769,47 +771,71 @@ public class TestSymbolDefinitions extends BaseTest {
 		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
 		GrammarSemanticsMessage expectedMessage =
 			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
+		checkGrammarSemanticsError(equeue, expectedMessage);
 	}
 
-	protected void checkError(ErrorQueue equeue,
-							  GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		/*
-		System.out.println(equeue.infos);
-		System.out.println(equeue.warnings);
-		System.out.println(equeue.errors);
-		assertTrue("number of errors mismatch", n, equeue.errors.size());
-				   */
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			Message m = (Message)equeue.errors.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
-		assertTrue("error is not a GrammarSemanticsMessage",
-				   foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
+	@Test public void testTokenVocabStringUsedInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String tokens =
+			"';'=4\n";
+        mkdir(tmpdir);
+        writeFile(tmpdir, "T.tokens", tokens);
+
+		String importer =
+			"lexer grammar B; \n" +
+			"options\t{tokenVocab=T;} \n" +
+			"SEMI:';' ; \n" ;
+		writeFile(tmpdir, "B.g", importer);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/B.g",composite);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[SEMI=4]";
+		String expectedStringLiteralToTypeMap = "{';'=4}";
+		String expectedTypeToTokenList = "[SEMI]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
-	protected void checkWarning(ErrorQueue equeue,
-								GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.warnings.size(); i++) {
-			Message m = (Message)equeue.warnings.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
-		assertTrue("error is not a GrammarSemanticsMessage",
-				   foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
+	@Test public void testTokenVocabStringUsedInCombined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String tokens =
+			"';'=4\n";
+        mkdir(tmpdir);
+		writeFile(tmpdir, "T.tokens", tokens);
+
+		String importer =
+			"grammar B; \n" +
+			"options\t{tokenVocab=T;} \n" +
+			"SEMI:';' ; \n" ;
+		writeFile(tmpdir, "B.g", importer);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/B.g",composite);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[SEMI=4]";
+		String expectedStringLiteralToTypeMap = "{';'=4}";
+		String expectedTypeToTokenList = "[SEMI]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
 	}
 
 	protected void checkPlusEqualsLabels(Grammar g,
@@ -868,10 +894,10 @@ public class TestSymbolDefinitions extends BaseTest {
 			tokens.remove(tokenName);
 		}
 		// make sure there are not any others (other than <EOF> etc...)
-        for (Iterator iter = tokens.iterator(); iter.hasNext();) {
+		for (Iterator iter = tokens.iterator(); iter.hasNext();) {
 			String tokenName = (String) iter.next();
 			assertTrue("unexpected token name "+tokenName,
-					    g.getTokenType(tokenName)<Label.MIN_TOKEN_TYPE);
+					   g.getTokenType(tokenName)<Label.MIN_TOKEN_TYPE);
 		}
 
 		// make sure all expected rules are there
diff --git a/src/org/antlr/test/TestSyntacticPredicateEvaluation.java b/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
similarity index 78%
rename from src/org/antlr/test/TestSyntacticPredicateEvaluation.java
rename to tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
index 2944974..af51592 100644
--- a/src/org/antlr/test/TestSyntacticPredicateEvaluation.java
+++ b/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
@@ -27,10 +27,15 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 package org.antlr.test;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestSyntacticPredicateEvaluation extends BaseTest {
-	public void testTwoPredsWithNakedAlt() throws Exception {
+	@Test public void testTwoPredsWithNakedAlt() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : (a ';')+ ;\n" +
 			"a\n" +
 			"options {\n" +
@@ -48,7 +53,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"   : '(' c ')' | 'x' ;\n" +
 			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
 			"   ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "(x) ;", false);
 		String expecting =
 			"enter b\n" +
@@ -57,7 +62,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"alt 2\n";
 		assertEquals(expecting, found);
 
-		found = execParser("t.g", grammar, "tParser", "tLexer",
+		found = execParser("T.g", grammar, "TParser", "TLexer",
 			    "a", "(x). ;", false);
 		expecting =
 			"enter b\n" +
@@ -65,7 +70,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"alt 1\n";
 		assertEquals(expecting, found);
 
-		found = execParser("t.g", grammar, "tParser", "tLexer",
+		found = execParser("T.g", grammar, "TParser", "TLexer",
 			    "a", "((x)) ;", false);
 		expecting =
 			"enter b\n" +
@@ -77,9 +82,9 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTwoPredsWithNakedAltNotLast() throws Exception {
+	@Test public void testTwoPredsWithNakedAltNotLast() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : (a ';')+ ;\n" +
 			"a\n" +
 			"options {\n" +
@@ -97,7 +102,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"   : '(' c ')' | 'x' ;\n" +
 			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
 			"   ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "(x) ;", false);
 		String expecting =
 			"enter b\n" +
@@ -106,7 +111,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"alt 2\n";
 		assertEquals(expecting, found);
 
-		found = execParser("t.g", grammar, "tParser", "tLexer",
+		found = execParser("T.g", grammar, "TParser", "TLexer",
 			    "a", "(x). ;", false);
 		expecting =
 			"enter b\n" +
@@ -114,7 +119,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"alt 1\n";
 		assertEquals(expecting, found);
 
-		found = execParser("t.g", grammar, "tParser", "tLexer",
+		found = execParser("T.g", grammar, "TParser", "TLexer",
 			    "a", "((x)) ;", false);
 		expecting =
 			"enter b\n" +
@@ -125,9 +130,9 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testLexerPred() throws Exception {
+	@Test public void testLexerPred() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : A ;\n" +
 			"A options {k=1;}\n" + // force backtracking
 			"  : (B '.')=>B '.' {System.out.println(\"alt1\");}\n" +
@@ -135,20 +140,20 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"  ;\n" +
 			"fragment\n" +
 			"B : 'x'+ ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "s", "xxx", false);
 
 		assertEquals("alt2\n", found);
 
-		found = execParser("t.g", grammar, "tParser", "tLexer",
+		found = execParser("T.g", grammar, "TParser", "TLexer",
 			    "s", "xxx.", false);
 
 		assertEquals("alt1\n", found);
 	}
 
-	public void testLexerWithPredLongerThanAlt() throws Exception {
+	@Test public void testLexerWithPredLongerThanAlt() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : A ;\n" +
 			"A options {k=1;}\n" + // force backtracking
 			"  : (B '.')=>B {System.out.println(\"alt1\");}\n" +
@@ -157,20 +162,20 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"D : '.' {System.out.println(\"D\");} ;\n" +
 			"fragment\n" +
 			"B : 'x'+ ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "s", "xxx", false);
 
 		assertEquals("alt2\n", found);
 
-		found = execParser("t.g", grammar, "tParser", "tLexer",
+		found = execParser("T.g", grammar, "TParser", "TLexer",
 			    "s", "xxx.", false);
 
 		assertEquals("alt1\nD\n", found);
 	}
 
-	public void testLexerPredCyclicPrediction() throws Exception {
+	@Test public void testLexerPredCyclicPrediction() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : A ;\n" +
 			"A : (B)=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
 			"  | B {System.out.println(\"alt2\");}\n" +
@@ -178,15 +183,15 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"  ;\n" +
 			"fragment\n" +
 			"B : 'x'+ ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "s", "xxx", false);
 
 		assertEquals("alt1\n", found);
 	}
 
-	public void testLexerPredCyclicPrediction2() throws Exception {
+	@Test public void testLexerPredCyclicPrediction2() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : A ;\n" +
 			"A : (B '.')=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
 			"  | B {System.out.println(\"alt2\");}\n" +
@@ -194,14 +199,14 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"  ;\n" +
 			"fragment\n" +
 			"B : 'x'+ ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "s", "xxx", false);
 		assertEquals("alt2\n", found);
 	}
 
-	public void testSimpleNestedPred() throws Exception {
+	@Test public void testSimpleNestedPred() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : (expr ';')+ ;\n" +
 			"expr\n" +
 			"options {\n" +
@@ -219,7 +224,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"INT: '0'..'9'+ ;\n" +
 			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
 			"   ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "s", "(34)x;", false);
 		String expecting =
 			"enter expr (\n" +
@@ -234,9 +239,9 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTripleNestedPredInLexer() throws Exception {
+	@Test public void testTripleNestedPredInLexer() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"s : (.)+ {System.out.println(\"done\");} ;\n" +
 			"EXPR\n" +
 			"options {\n" +
@@ -254,7 +259,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"fragment INT: '0'..'9'+ ;\n" +
 			"fragment WS : (' '|'\\n')+ \n" +
 			"   ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "s", "((34)x)x", false);
 		String expecting = // has no memoization
 			"enter expr (\n" +
@@ -285,7 +290,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTreeParserWithSynPred() throws Exception {
+	@Test public void testTreeParserWithSynPred() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -308,7 +313,7 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 		assertEquals("alt 2\n", found);
 	}
 
-	public void testTreeParserWithNestedSynPred() throws Exception {
+	@Test public void testTreeParserWithNestedSynPred() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -335,10 +340,10 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 		assertEquals("b:alt 2 a:alt 1\n", found);
 	}
 
-	public void testSynPredWithOutputTemplate() throws Exception {
+	@Test public void testSynPredWithOutputTemplate() throws Exception {
 		// really just seeing if it will compile
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"options {output=template;}\n" +
 			"a\n" +
 			"options {\n" +
@@ -349,16 +354,16 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"  ;\n" +
 			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
 			"   ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "xxxy", false);
 
 		assertEquals("1:xxxy;\n", found);
 	}
 
-	public void testSynPredWithOutputAST() throws Exception {
+	@Test public void testSynPredWithOutputAST() throws Exception {
 		// really just seeing if it will compile
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 			"options {output=AST;}\n" +
 			"a\n" +
 			"options {\n" +
@@ -369,43 +374,47 @@ public class TestSyntacticPredicateEvaluation extends BaseTest {
 			"  ;\n" +
 			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
 			"   ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "xxxy", false);
 
 		assertEquals("x x x y\n", found);
 	}
 
-	public void testOptionalBlockWithSynPred() throws Exception {
+	@Test public void testOptionalBlockWithSynPred() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 				"\n" +
 				"a : ( (b)=> b {System.out.println(\"b\");})? b ;\n" +
 				"b : 'x' ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "xx", false);
-
 		assertEquals("b\n", found);
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "x", false);
+		assertEquals("", found);
 	}
 
-	public void testSynPredK2() throws Exception {
+	@Test public void testSynPredK2() throws Exception {
+		// all manually specified syn predicates are gated (i.e., forced
+		// to execute).
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 				"\n" +
 				"a : (b)=> b {System.out.println(\"alt1\");} | 'a' 'c' ;\n" +
 				"b : 'a' 'b' ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "ab", false);
 
 		assertEquals("alt1\n", found);
 	}
 
-	public void testSynPredKStar() throws Exception {
+	@Test public void testSynPredKStar() throws Exception {
 		String grammar =
-			"grammar t;\n" +
+			"grammar T;\n" +
 				"\n" +
 				"a : (b)=> b {System.out.println(\"alt1\");} | 'a'+ 'c' ;\n" +
 				"b : 'a'+ 'b' ;\n" ;
-		String found = execParser("t.g", grammar, "tParser", "tLexer",
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
 				    "a", "aaab", false);
 
 		assertEquals("alt1\n", found);
diff --git a/src/org/antlr/test/TestTemplates.java b/tool/src/test/java/org/antlr/test/TestTemplates.java
similarity index 83%
rename from src/org/antlr/test/TestTemplates.java
rename to tool/src/test/java/org/antlr/test/TestTemplates.java
index be4500e..ca22f8b 100644
--- a/src/org/antlr/test/TestTemplates.java
+++ b/tool/src/test/java/org/antlr/test/TestTemplates.java
@@ -33,16 +33,23 @@ import org.antlr.stringtemplate.StringTemplateGroup;
 import org.antlr.stringtemplate.StringTemplate;
 import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
 import org.antlr.codegen.CodeGenerator;
-import org.antlr.codegen.ActionTranslatorLexer;
+import org.antlr.grammar.v2.ANTLRParser;
+import org.antlr.grammar.v3.ActionTranslator;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /** Test templates in actions; %... shorthands */
 public class TestTemplates extends BaseTest {
 	private static final String LINE_SEP = System.getProperty("line.separator");
 
-	public void testTemplateConstructor() throws Exception {
+	@Test
+    public void testTemplateConstructor() throws Exception {
 		String action = "x = %foo(name={$ID.text});";
 		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
-			LINE_SEP + "  new STAttrMap().put(\"name\", ID1.getText()));";
+			LINE_SEP + "  new STAttrMap().put(\"name\", (ID1!=null?ID1.getText():null)));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -60,8 +67,8 @@ public class TestTemplates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 										"a",
 										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -76,7 +83,8 @@ public class TestTemplates extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testTemplateConstructorNoArgs() throws Exception {
+	@Test
+    public void testTemplateConstructorNoArgs() throws Exception {
 		String action = "x = %foo();";
 		String expecting = "x = templateLib.getInstanceOf(\"foo\");";
 
@@ -96,8 +104,8 @@ public class TestTemplates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 										"a",
 										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -112,10 +120,11 @@ public class TestTemplates extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testIndirectTemplateConstructor() throws Exception {
+	@Test
+    public void testIndirectTemplateConstructor() throws Exception {
 		String action = "x = %({\"foo\"})(name={$ID.text});";
 		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
-			LINE_SEP + "  new STAttrMap().put(\"name\", ID1.getText()));";
+			LINE_SEP + "  new STAttrMap().put(\"name\", (ID1!=null?ID1.getText():null)));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -133,8 +142,8 @@ public class TestTemplates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 										"a",
 										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -149,9 +158,9 @@ public class TestTemplates extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testStringConstructor() throws Exception {
+	@Test public void testStringConstructor() throws Exception {
 		String action = "x = %{$ID.text};";
-		String expecting = "x = new StringTemplate(templateLib,ID1.getText());";
+		String expecting = "x = new StringTemplate(templateLib,(ID1!=null?ID1.getText():null));";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -169,7 +178,7 @@ public class TestTemplates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -184,7 +193,7 @@ public class TestTemplates extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSetAttr() throws Exception {
+	@Test public void testSetAttr() throws Exception {
 		String action = "%x.y = z;";
 		String expecting = "(x).setAttribute(\"y\", z);";
 
@@ -204,8 +213,8 @@ public class TestTemplates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator =
-			new ActionTranslatorLexer(generator,
+		ActionTranslator translator =
+			new ActionTranslator(generator,
 										"a",
 										new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -220,9 +229,9 @@ public class TestTemplates extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSetAttrOfExpr() throws Exception {
+	@Test public void testSetAttrOfExpr() throws Exception {
 		String action = "%{foo($ID.text).getST()}.y = z;";
-		String expecting = "(foo(ID1.getText()).getST()).setAttribute(\"y\", z);";
+		String expecting = "(foo((ID1!=null?ID1.getText():null)).getST()).setAttribute(\"y\", z);";
 
 		ErrorQueue equeue = new ErrorQueue();
 		ErrorManager.setErrorListener(equeue);
@@ -240,7 +249,7 @@ public class TestTemplates extends BaseTest {
 		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
 		g.setCodeGenerator(generator);
 		generator.genRecognizer(); // forces load of templates
-		ActionTranslatorLexer translator = new ActionTranslatorLexer(generator,
+		ActionTranslator translator = new ActionTranslator(generator,
 																	 "a",
 																	 new antlr.CommonToken(ANTLRParser.ACTION,action),1);
 		String rawTranslation =
@@ -255,7 +264,30 @@ public class TestTemplates extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testCannotHaveSpaceBeforeDot() throws Exception {
+	@Test public void testSetAttrOfExprInMembers() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"@members {\n" +
+			"%code.instr = o;" + // must not get null ptr!
+			"}\n" +
+			"a : ID\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		assertNoErrors(equeue);
+	}
+
+	@Test public void testCannotHaveSpaceBeforeDot() throws Exception {
 		String action = "%x .y = z;";
 		String expecting = null;
 
@@ -283,7 +315,7 @@ public class TestTemplates extends BaseTest {
 		checkError(equeue, expectedMessage);
 	}
 
-	public void testCannotHaveSpaceAfterDot() throws Exception {
+	@Test public void testCannotHaveSpaceAfterDot() throws Exception {
 		String action = "%x. y = z;";
 		String expecting = null;
 
diff --git a/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java b/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java
new file mode 100644
index 0000000..df0c11a
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java
@@ -0,0 +1,797 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.TokenRewriteStream;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestTokenRewriteStream extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestTokenRewriteStream() {
+    }
+
+	@Test public void testInsertBeforeIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "0");
+		String result = tokens.toString();
+		String expecting = "0abc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertAfterLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertAfter(2, "x");
+		String result = tokens.toString();
+		String expecting = "abcx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.insertAfter(1, "x");
+		String result = tokens.toString();
+		String expecting = "axbxc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, "x");
+		String result = tokens.toString();
+		String expecting = "xbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, "x");
+		String result = tokens.toString();
+		String expecting = "abx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, "x");
+		String result = tokens.toString();
+		String expecting = "axc";
+		assertEquals(expecting, result);
+	}
+
+    @Test public void testToStringStartStop() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "MUL : '*';\n" +
+            "ASSIGN : '=';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 0123456789
+        // Input:  x = 3 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+        tokens.LT(1); // fill buffer
+        tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
+
+        String result = tokens.toOriginalString();
+        String expecting = "x = 3 * 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString();
+        expecting = "x = 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,9);
+        expecting = "x = 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(4,8);
+        expecting = "0";
+        assertEquals(expecting, result);
+    }
+
+    @Test public void testToStringStartStop2() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+        tokens.LT(1); // fill buffer
+
+        String result = tokens.toOriginalString();
+        String expecting = "x = 3 * 0 + 2 * 0;";
+        assertEquals(expecting, result);
+
+        tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
+        result = tokens.toString();
+        expecting = "x = 0 + 2 * 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,17);
+        expecting = "x = 0 + 2 * 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(4,8);
+        expecting = "0";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,8);
+        expecting = "x = 0";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(12,16);
+        expecting = "2 * 0";
+        assertEquals(expecting, result);
+
+        tokens.insertAfter(17, "// comment");
+        result = tokens.toString(12,17);
+        expecting = "2 * 0;// comment";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,8); // try again after insert at end
+        expecting = "x = 0";
+        assertEquals(expecting, result);
+    }
+
+
+    @Test public void test2ReplaceMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, "x");
+		tokens.replace(1, "y");
+		String result = tokens.toString();
+		String expecting = "ayc";
+		assertEquals(expecting, result);
+	}
+
+    @Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+        tokens.insertBefore(0, "_");
+        tokens.replace(1, "x");
+		tokens.replace(1, "y");
+		String result = tokens.toString();
+		String expecting = "_ayc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, "x");
+		tokens.delete(1);
+		String result = tokens.toString();
+		String expecting = "ac";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertInPriorReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, 2, "x");
+		tokens.insertBefore(1, "0");
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "insert op <InsertBeforeOp at 1:\"0\"> within boundaries of previous <ReplaceOp at 0..2:\"x\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testInsertThenReplaceSameIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "0");
+		tokens.replace(0, "x"); // supercedes insert at 0
+		String result = tokens.toString();
+		String expecting = "xbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void test2InsertMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(1, "y");
+		String result = tokens.toString();
+		String expecting = "ayxbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void test2InsertThenReplaceIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "x");
+		tokens.insertBefore(0, "y");
+		tokens.replace(0, "z");
+		String result = tokens.toString();
+		String expecting = "zbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, "x");
+		tokens.insertBefore(2, "y");
+		String result = tokens.toString();
+		String expecting = "abyx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertThenReplaceLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(2, "y");
+		tokens.replace(2, "x");
+		String result = tokens.toString();
+		String expecting = "abx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, "x");
+		tokens.insertAfter(2, "y");
+		String result = tokens.toString();
+		String expecting = "abxy";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(2, "y");
+		String result = tokens.toString();
+		String expecting = "abyxba";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(4, "y"); // no effect; within range of a replace
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "insert op <InsertBeforeOp at 4:\"y\"> within boundaries of previous <ReplaceOp at 2..4:\"x\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "x");
+		tokens.insertAfter(4, "y");
+		String result = tokens.toString();
+		String expecting = "abxyba";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceAll() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, 6, "x");
+		String result = tokens.toString();
+		String expecting = "x";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceSubsetThenFetch() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "xyz");
+		String result = tokens.toString(0,6);
+		String expecting = "abxyzba";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenReplaceSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "xyz");
+		tokens.replace(3, 5, "foo"); // overlaps, error
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "replace op boundaries of <ReplaceOp at 3..5:\"foo\"> overlap with previous <ReplaceOp at 2..4:\"xyz\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 4, "xyz");
+		tokens.replace(1, 3, "foo"); // overlap, error
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "replace op boundaries of <ReplaceOp at 1..3:\"foo\"> overlap with previous <ReplaceOp at 2..4:\"xyz\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 2, "xyz");
+		tokens.replace(0, 3, "foo");
+		String result = tokens.toString();
+		String expecting = "fooa";
+		assertEquals(expecting, result);
+	}
+
+	// June 2, 2008 I rewrote core of rewrite engine; just adding lots more tests here
+
+	@Test public void testCombineInserts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(0, "x");
+		tokens.insertBefore(0, "y");
+		String result = tokens.toString();
+		String expecting = "yxabc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testCombine3Inserts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(0, "y");
+		tokens.insertBefore(1, "z");
+		String result = tokens.toString();
+		String expecting = "yazxbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, 2, "foo");
+		tokens.insertBefore(0, "z"); // combine with left edge of rewrite
+		String result = tokens.toString();
+		String expecting = "zfoo";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.delete(0, 2);
+		tokens.insertBefore(0, "z"); // combine with left edge of rewrite
+		String result = tokens.toString();
+		String expecting = "z"; // make sure combo is not znull
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testDisjointInserts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(2, "y");
+		tokens.insertBefore(0, "z");
+		String result = tokens.toString();
+		String expecting = "zaxbyc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testOverlappingReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, 2, "foo");
+		tokens.replace(0, 3, "bar"); // wipes prior nested replace
+		String result = tokens.toString();
+		String expecting = "bar";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testOverlappingReplace2() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(0, 3, "bar");
+		tokens.replace(1, 2, "foo"); // cannot split earlier replace
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "replace op boundaries of <ReplaceOp at 1..2:\"foo\"> overlap with previous <ReplaceOp at 0..3:\"bar\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testOverlappingReplace3() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, 2, "foo");
+		tokens.replace(0, 2, "bar"); // wipes prior nested replace
+		String result = tokens.toString();
+		String expecting = "barc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testOverlappingReplace4() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, 2, "foo");
+		tokens.replace(1, 3, "bar"); // wipes prior nested replace
+		String result = tokens.toString();
+		String expecting = "abar";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testDropIdenticalReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(1, 2, "foo");
+		tokens.replace(1, 2, "foo"); // drop previous, identical
+		String result = tokens.toString();
+		String expecting = "afooc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testDropPrevCoveredInsert() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "foo");
+		tokens.replace(1, 2, "foo"); // kill prev insert
+		String result = tokens.toString();
+		String expecting = "afooc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLeaveAloneDisjointInsert() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.insertBefore(1, "x");
+		tokens.replace(2, 3, "foo");
+		String result = tokens.toString();
+		String expecting = "axbfoo";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLeaveAloneDisjointInsert2() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.LT(1); // fill buffer
+		tokens.replace(2, 3, "foo");
+		tokens.insertBefore(1, "x");
+		String result = tokens.toString();
+		String expecting = "axbfoo";
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTopologicalSort.java b/tool/src/test/java/org/antlr/test/TestTopologicalSort.java
new file mode 100644
index 0000000..40650a9
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTopologicalSort.java
@@ -0,0 +1,113 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+import org.antlr.misc.Graph;
+
+import java.util.List;
+
+/** Test topo sort in GraphNode. */
+public class TestTopologicalSort extends BaseTest {
+    @Test
+    public void testFairlyLargeGraph() throws Exception {
+        Graph g = new Graph();
+        g.addEdge("C", "F");
+        g.addEdge("C", "G");
+        g.addEdge("C", "A");
+        g.addEdge("C", "B");
+        g.addEdge("A", "D");
+        g.addEdge("A", "E");
+        g.addEdge("B", "E");
+        g.addEdge("D", "E");
+        g.addEdge("D", "F");
+        g.addEdge("F", "H");
+        g.addEdge("E", "F");
+
+        String expecting = "[H, F, E, D, A, G, B, C]";
+        List nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testCyclicGraph() throws Exception {
+        Graph g = new Graph();
+        g.addEdge("A", "B");
+        g.addEdge("B", "C");
+        g.addEdge("C", "A");
+        g.addEdge("C", "D");
+
+        String expecting = "[D, C, B, A]";
+        List nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testRepeatedEdges() throws Exception {
+        Graph g = new Graph();
+        g.addEdge("A", "B");
+        g.addEdge("B", "C");
+        g.addEdge("A", "B"); // dup
+        g.addEdge("C", "D");
+
+        String expecting = "[D, C, B, A]";
+        List nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testSimpleTokenDependence() throws Exception {
+        Graph g = new Graph();
+        g.addEdge("Java.g", "MyJava.tokens"); // Java feeds off manual token file
+        g.addEdge("Java.tokens", "Java.g");        
+        g.addEdge("Def.g", "Java.tokens");    // walkers feed off generated tokens
+        g.addEdge("Ref.g", "Java.tokens");
+
+        String expecting = "[MyJava.tokens, Java.g, Java.tokens, Def.g, Ref.g]";
+        List nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testParserLexerCombo() throws Exception {
+        Graph g = new Graph();
+        g.addEdge("JavaLexer.tokens", "JavaLexer.g");
+        g.addEdge("JavaParser.g", "JavaLexer.tokens");
+        g.addEdge("Def.g", "JavaLexer.tokens");
+        g.addEdge("Ref.g", "JavaLexer.tokens");
+
+        String expecting = "[JavaLexer.g, JavaLexer.tokens, JavaParser.g, Def.g, Ref.g]";
+        List nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java b/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java
new file mode 100644
index 0000000..edbd7fa
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java
@@ -0,0 +1,1102 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.antlr.tool.GrammarSyntaxMessage;
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/** Tree rewrites in tree parsers are basically identical to rewrites
+ *  in a normal grammar except that the atomic element is a node not
+ *  a Token.  Tests here ensure duplication of nodes occurs properly
+ *  and basic functionality.
+ */
+public class TestTreeGrammarRewriteAST extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testFlatList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID INT -> INT ID\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("34 abc\n", found);
+	}
+
+	@Test public void testSimpleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID INT) -> ^(INT ID)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(34 abc)\n", found);
+	}
+
+	@Test public void testNonImaginaryWithCtor() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : INT -> INT[\"99\"]\n" + // make new INT node
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "34");
+		assertEquals("99\n", found);
+	}
+
+	@Test public void testCombinedRewriteAndAuto() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID INT) -> ^(INT ID) | INT\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(34 abc)\n", found);
+
+		found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+							   treeGrammar, "TP", "TLexer", "a", "a", "34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testAvoidDup() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID -> ^(ID ID)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("(abc abc)\n", found);
+	}
+
+	@Test public void testLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID+ INT+ -> (^(ID INT))+ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : (^(ID INT))+ -> INT+ ID+\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a b c 3 4 5");
+		assertEquals("3 4 5 a b c\n", found);
+	}
+
+	@Test public void testAutoDup() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID \n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testAutoDupRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : b c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 1");
+		assertEquals("a 1\n", found);
+	}
+
+    @Test public void testAutoWildcard() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ID . \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testNoWildcardAsRootError() throws Exception {
+        ErrorQueue equeue = new ErrorQueue();
+        ErrorManager.setErrorListener(equeue);
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST;}\n" +
+            "a : ^(. INT) \n" +
+            "  ;\n";
+
+        Grammar g = new Grammar(treeGrammar);
+        Tool antlr = newTool();
+        antlr.setOutputDirectory(null); // write to /dev/null
+        CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+        g.setCodeGenerator(generator);
+        generator.genRecognizer();
+
+        assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+        int expectedMsgID = ErrorManager.MSG_WILDCARD_AS_ROOT;
+        Object expectedArg = null;
+        antlr.RecognitionException expectedExc = null;
+        GrammarSyntaxMessage expectedMessage =
+            new GrammarSyntaxMessage(expectedMsgID, g, null, expectedArg, expectedExc);
+
+        checkError(equeue, expectedMessage);        
+    }
+
+    @Test public void testAutoWildcard2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID INT);\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ^(ID .) \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("(abc 34)\n", found);
+    }
+
+    @Test public void testAutoWildcardWithLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ID c=. \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testAutoWildcardWithListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ID c+=. \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testAutoDupMultiple() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID ID INT\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a b 3");
+		assertEquals("a b 3\n", found);
+	}
+
+	@Test public void testAutoDupTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID INT)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTree2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT INT -> ^(ID INT INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID b b)\n" +
+			"  ;\n" +
+			"b : INT ;";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3 4");
+		assertEquals("(a 3 4)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x=ID y=INT)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithListLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x+=ID y+=INT)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithRuleRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(b INT) ;\n" +
+			"b : ID ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithRuleRootAndLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x=b INT) ;\n" +
+			"b : ID ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithRuleRootAndListLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x+=b y+=c) ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupNestedTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID y=ID INT -> ^($x ^($y INT));\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID ^(ID INT))\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a b 3");
+		assertEquals("(a (b 3))\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithSubruleInside() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {OP;}\n" +
+			"a : (x=ID|x=INT) -> ^(OP $x) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(OP (b|c)) ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a");
+		assertEquals("(OP a)\n", found);
+	}
+
+	@Test public void testDelete() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID -> \n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("", found);
+	}
+
+	@Test public void testSetMatchNoRewrite() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : b INT\n" +
+			"  ;\n" +
+			"b : ID | INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testSetOptionalMatchNoRewrite() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : (ID|INT)? INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc 34\n", found);
+	}
+
+
+	@Test public void testSetMatchNoRewriteLevel2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT -> ^($x INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID (ID | INT) ) ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(abc 34)\n", found);
+	}
+
+	@Test public void testSetMatchNoRewriteLevel2Root() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT -> ^($x INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^((ID | INT) INT) ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(abc 34)\n", found);
+	}
+
+
+	// REWRITE MODE
+
+	@Test public void testRewriteModeCombinedRewriteAndAuto() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"a : ^(ID INT) -> ^(ID[\"ick\"] INT)\n" +
+			"  | INT\n" + // leaves it alone, returning $a.start
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(ick 34)\n", found);
+
+		found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+							   treeGrammar, "TP", "TLexer", "a", "a", "34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testRewriteModeFlatTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : ID a ;\n" +
+			"a : INT -> INT[\"1\"]\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("abc 1\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleFlatTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : a ;\n" +
+			"a : b ;\n" +
+			"b : ID INT -> INT ID\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34 abc\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : a ;\n" +
+			"a : b ;\n" + // a.tree must become b.tree
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : a* b ;\n" + // only b contributes to tree, but it's after a*; s.tree = b.tree
+			"a : X ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'boo' ID INT -> 'boo' ^(ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : 'boo' a* b ;\n" + // don't reset s.tree to b.tree due to 'boo'
+			"a : X ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
+		assertEquals("boo 34\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree4() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : ^('boo' a* b) ;\n" + // don't reset s.tree to b.tree due to 'boo'
+			"a : X ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
+		assertEquals("(boo 34)\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree5() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : ^(a b) ;\n" + // s.tree is a.tree
+			"a : 'boo' ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
+		assertEquals("(boo 34)\n", found);
+	}
+
+    @Test public void testRewriteOfRuleRef() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ID INT | INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : a -> a ;\n" +
+            "a : ID INT -> ID INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefRoot() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(a ^(ID INT)) -> a ;\n" +
+            "a : INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
+        // emits whole tree when you ref the root since I can't know whether
+        // you want the children or not.  You might be returning a whole new
+        // tree.  Hmm...still seems weird.  oh well.
+        assertEquals("(12 (abc 34))\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefRootLabeled() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(label=a ^(ID INT)) -> a ;\n" +
+            "a : INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
+        // emits whole tree when you ref the root since I can't know whether
+        // you want the children or not.  You might be returning a whole new
+        // tree.  Hmm...still seems weird.  oh well.
+        assertEquals("(12 (abc 34))\n", found);
+    }
+
+    @Ignore
+    @Test public void testRewriteOfRuleRefRootListLabeled() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(label+=a ^(ID INT)) -> a ;\n" +
+            "a : INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
+        // emits whole tree when you ref the root since I can't know whether
+        // you want the children or not.  You might be returning a whole new
+        // tree.  Hmm...still seems weird.  oh well.
+        assertEquals("(12 (abc 34))\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefChild() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID ^(INT INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(ID a) -> a ;\n" +
+            "a : ^(INT INT) ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("(34 34)\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID ^(INT INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(ID label=a) -> a ;\n" +
+            "a : ^(INT INT) ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("(34 34)\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID ^(INT INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(ID label+=a) -> a ;\n" +
+            "a : ^(INT INT) ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("(34 34)\n", found);
+    }
+
+    @Test public void testRewriteModeWithPredicatedRewrites() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID[\"root\"] ^(ID INT)) | INT -> ^(ID[\"root\"] INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : ^(ID a) {System.out.println(\"altered tree=\"+$s.start.toStringTree());};\n" +
+			"a : ^(ID INT) -> {true}? ^(ID[\"ick\"] INT)\n" +
+			"              -> INT\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("altered tree=(root (ick 34))\n" +
+					 "(root (ick 34))\n", found);
+	}
+
+    @Test public void testWildcardSingleNode() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID[\"root\"] INT);\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ^(ID c=.) -> $c\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("34\n", found);
+    }
+
+    @Test public void testWildcardUnlabeledSingleNode() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID INT);\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ^(ID .) -> ID\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("abc\n", found);
+    }
+
+    @Test public void testWildcardGrabsSubtree() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z));\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ^(ID c=.) -> $c\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 1 2 3");
+        assertEquals("(1 2 3)\n", found);
+    }
+
+    @Test public void testWildcardGrabsSubtree2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID x=INT y=INT z=INT -> ID ^($x $y $z);\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ID c=. -> $c\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 1 2 3");
+        assertEquals("(1 2 3)\n", found);
+    }
+
+    @Test public void testWildcardListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : INT INT INT ;\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : (c+=.)+ -> $c+\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "1 2 3");
+        assertEquals("1 2 3\n", found);
+    }
+
+    @Test public void testWildcardListLabel2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST; ASTLabelType=CommonTree;}\n" +
+            "a  : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z));\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(INT (c+=.)+) -> $c+\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "1 2 3");
+        assertEquals("(2 3) (2 3)\n", found);
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeIterator.java b/tool/src/test/java/org/antlr/test/TestTreeIterator.java
new file mode 100644
index 0000000..95480c6
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeIterator.java
@@ -0,0 +1,131 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2005-2008 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+import org.antlr.runtime.tree.*;
+
+public class TestTreeIterator {
+    static final String[] tokens = new String[] {
+        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "A", "B", "C", "D", "E", "F", "G"
+    };
+
+    @Test public void testNode() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("A");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testFlatAB() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(nil A B)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "nil DOWN A B UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testAB() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A B)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testABC() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A B C)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B C UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testVerticalList() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A (B C))");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B DOWN C UP UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testComplex() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A (B (C D E) F) G)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testReset() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A (B (C D E) F) G)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+
+        it.reset();
+        buf = toString(it);
+        expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
+        found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    protected static StringBuffer toString(TreeIterator it) {
+        StringBuffer buf = new StringBuffer();
+        while ( it.hasNext() ) {
+            CommonTree n = (CommonTree)it.next();
+            buf.append(n);
+            if ( it.hasNext() ) buf.append(" ");
+        }
+        return buf;
+    }
+}
diff --git a/src/org/antlr/test/TestTreeNodeStream.java b/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
similarity index 79%
rename from src/org/antlr/test/TestTreeNodeStream.java
rename to tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
index 4e48135..d3ab509 100644
--- a/src/org/antlr/test/TestTreeNodeStream.java
+++ b/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
@@ -31,6 +31,8 @@ import org.antlr.runtime.CommonToken;
 import org.antlr.runtime.Token;
 import org.antlr.runtime.tree.*;
 
+import org.junit.Test;
+
 /** Test the tree node stream. */
 public class TestTreeNodeStream extends BaseTest {
 
@@ -39,7 +41,11 @@ public class TestTreeNodeStream extends BaseTest {
 		return new CommonTreeNodeStream(t);
 	}
 
-	public void testSingleNode() throws Exception {
+    public String toTokenTypeString(TreeNodeStream stream) {
+        return ((CommonTreeNodeStream)stream).toTokenTypeString();
+    }
+
+	@Test public void testSingleNode() throws Exception {
 		Tree t = new CommonTree(new CommonToken(101));
 
 		TreeNodeStream stream = newStream(t);
@@ -48,11 +54,11 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(expecting, found);
 
 		expecting = " 101";
-		found = stream.toString();
+		found = toTokenTypeString(stream);
 		assertEquals(expecting, found);
 	}
 
-	public void test4Nodes() throws Exception {
+	@Test public void test4Nodes() throws Exception {
 		// ^(101 ^(102 103) 104)
 		Tree t = new CommonTree(new CommonToken(101));
 		t.addChild(new CommonTree(new CommonToken(102)));
@@ -65,11 +71,11 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(expecting, found);
 
 		expecting = " 101 2 102 2 103 3 104 3";
-		found = stream.toString();
+		found = toTokenTypeString(stream);
 		assertEquals(expecting, found);
 	}
 
-	public void testList() throws Exception {
+	@Test public void testList() throws Exception {
 		Tree root = new CommonTree((Token)null);
 
 		Tree t = new CommonTree(new CommonToken(101));
@@ -82,49 +88,49 @@ public class TestTreeNodeStream extends BaseTest {
 		root.addChild(t);
 		root.addChild(u);
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(root);
+		TreeNodeStream stream = newStream(root);
 		String expecting = " 101 102 103 104 105";
 		String found = toNodesOnlyString(stream);
 		assertEquals(expecting, found);
 
 		expecting = " 101 2 102 2 103 3 104 3 105";
-		found = stream.toString();
+		found = toTokenTypeString(stream);
 		assertEquals(expecting, found);
 	}
 
-	public void testFlatList() throws Exception {
+	@Test public void testFlatList() throws Exception {
 		Tree root = new CommonTree((Token)null);
 
 		root.addChild(new CommonTree(new CommonToken(101)));
 		root.addChild(new CommonTree(new CommonToken(102)));
 		root.addChild(new CommonTree(new CommonToken(103)));
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(root);
+		TreeNodeStream stream = newStream(root);
 		String expecting = " 101 102 103";
 		String found = toNodesOnlyString(stream);
 		assertEquals(expecting, found);
 
 		expecting = " 101 102 103";
-		found = stream.toString();
+		found = toTokenTypeString(stream);
 		assertEquals(expecting, found);
 	}
 
-	public void testListWithOneNode() throws Exception {
+	@Test public void testListWithOneNode() throws Exception {
 		Tree root = new CommonTree((Token)null);
 
 		root.addChild(new CommonTree(new CommonToken(101)));
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(root);
+		TreeNodeStream stream = newStream(root);
 		String expecting = " 101";
 		String found = toNodesOnlyString(stream);
 		assertEquals(expecting, found);
 
 		expecting = " 101";
-		found = stream.toString();
+		found = toTokenTypeString(stream);
 		assertEquals(expecting, found);
 	}
 
-	public void testAoverB() throws Exception {
+	@Test public void testAoverB() throws Exception {
 		Tree t = new CommonTree(new CommonToken(101));
 		t.addChild(new CommonTree(new CommonToken(102)));
 
@@ -134,11 +140,11 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(expecting, found);
 
 		expecting = " 101 2 102 3";
-		found = stream.toString();
+		found = toTokenTypeString(stream);
 		assertEquals(expecting, found);
 	}
 
-	public void testLT() throws Exception {
+	@Test public void testLT() throws Exception {
 		// ^(101 ^(102 103) 104)
 		Tree t = new CommonTree(new CommonToken(101));
 		t.addChild(new CommonTree(new CommonToken(102)));
@@ -159,7 +165,7 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(Token.EOF, ((Tree)stream.LT(100)).getType());
 	}
 
-	public void testMarkRewindEntire() throws Exception {
+	@Test public void testMarkRewindEntire() throws Exception {
 		// ^(101 ^(102 103 ^(106 107) ) 104 105)
 		// stream has 7 real + 6 nav nodes
 		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
@@ -173,7 +179,7 @@ public class TestTreeNodeStream extends BaseTest {
 		r0.addChild(new CommonTree(new CommonToken(104)));
 		r0.addChild(new CommonTree(new CommonToken(105)));
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		TreeNodeStream stream = newStream(r0);
 		int m = stream.mark(); // MARK
 		for (int k=1; k<=13; k++) { // consume til end
 			stream.LT(1);
@@ -192,7 +198,7 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
 	}
 
-	public void testMarkRewindInMiddle() throws Exception {
+	@Test public void testMarkRewindInMiddle() throws Exception {
 		// ^(101 ^(102 103 ^(106 107) ) 104 105)
 		// stream has 7 real + 6 nav nodes
 		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
@@ -206,18 +212,19 @@ public class TestTreeNodeStream extends BaseTest {
 		r0.addChild(new CommonTree(new CommonToken(104)));
 		r0.addChild(new CommonTree(new CommonToken(105)));
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		TreeNodeStream stream = newStream(r0);
 		for (int k=1; k<=7; k++) { // consume til middle
 			//System.out.println(((Tree)stream.LT(1)).getType());
 			stream.consume();
 		}
 		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		int m = stream.mark(); // MARK
+		stream.mark(); // MARK
 		stream.consume(); // consume 107
 		stream.consume(); // consume UP
 		stream.consume(); // consume UP
 		stream.consume(); // consume 104
-		stream.rewind(m);      // REWIND
+		stream.rewind(); // REWIND
+        stream.mark();   // keep saving nodes though
 
 		assertEquals(107, ((Tree)stream.LT(1)).getType());
 		stream.consume();
@@ -236,7 +243,7 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
 	}
 
-	public void testMarkRewindNested() throws Exception {
+	@Test public void testMarkRewindNested() throws Exception {
 		// ^(101 ^(102 103 ^(106 107) ) 104 105)
 		// stream has 7 real + 6 nav nodes
 		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
@@ -250,7 +257,7 @@ public class TestTreeNodeStream extends BaseTest {
 		r0.addChild(new CommonTree(new CommonToken(104)));
 		r0.addChild(new CommonTree(new CommonToken(105)));
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
+		TreeNodeStream stream = newStream(r0);
 		int m = stream.mark(); // MARK at start
 		stream.consume(); // consume 101
 		stream.consume(); // consume DN
@@ -275,7 +282,7 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
 	}
 
-	public void testSeek() throws Exception {
+	@Test public void testSeekFromStart() throws Exception {
 		// ^(101 ^(102 103 ^(106 107) ) 104 105)
 		// stream has 7 real + 6 nav nodes
 		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
@@ -289,10 +296,7 @@ public class TestTreeNodeStream extends BaseTest {
 		r0.addChild(new CommonTree(new CommonToken(104)));
 		r0.addChild(new CommonTree(new CommonToken(105)));
 
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
-		stream.consume(); // consume 101
-		stream.consume(); // consume DN
-		stream.consume(); // consume 102
+		TreeNodeStream stream = newStream(r0);
 		stream.seek(7);   // seek to 107
 		assertEquals(107, ((Tree)stream.LT(1)).getType());
 		stream.consume(); // consume 107
@@ -301,38 +305,40 @@ public class TestTreeNodeStream extends BaseTest {
 		assertEquals(104, ((Tree)stream.LT(1)).getType());
 	}
 
-	public void testSeekFromStart() throws Exception {
-		// ^(101 ^(102 103 ^(106 107) ) 104 105)
-		// stream has 7 real + 6 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r0.addChild(r1);
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		Tree r2 = new CommonTree(new CommonToken(106));
-		r2.addChild(new CommonTree(new CommonToken(107)));
-		r1.addChild(r2);
-		r0.addChild(new CommonTree(new CommonToken(104)));
-		r0.addChild(new CommonTree(new CommonToken(105)));
-
-		CommonTreeNodeStream stream = new CommonTreeNodeStream(r0);
-		stream.seek(7);   // seek to 107
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 107
-		stream.consume(); // consume UP
-		stream.consume(); // consume UP
-		assertEquals(104, ((Tree)stream.LT(1)).getType());
-	}
+    @Test public void testReset() throws Exception {
+        // ^(101 ^(102 103 ^(106 107) ) 104 105)
+        // stream has 7 real + 6 nav nodes
+        // Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        Tree r0 = new CommonTree(new CommonToken(101));
+        Tree r1 = new CommonTree(new CommonToken(102));
+        r0.addChild(r1);
+        r1.addChild(new CommonTree(new CommonToken(103)));
+        Tree r2 = new CommonTree(new CommonToken(106));
+        r2.addChild(new CommonTree(new CommonToken(107)));
+        r1.addChild(r2);
+        r0.addChild(new CommonTree(new CommonToken(104)));
+        r0.addChild(new CommonTree(new CommonToken(105)));
+
+        TreeNodeStream stream = newStream(r0);
+        String v = toNodesOnlyString(stream); // scan all
+        stream.reset();
+        String v2 = toNodesOnlyString(stream); // scan all
+        assertEquals(v,v2);
+    }
 
 	public String toNodesOnlyString(TreeNodeStream nodes) {
+        TreeAdaptor adaptor = nodes.getTreeAdaptor();
 		StringBuffer buf = new StringBuffer();
-		for (int i=0; i<nodes.size(); i++) {
-			Object t = nodes.LT(i+1);
-			int type = nodes.getTreeAdaptor().getType(t);
+        Object o = nodes.LT(1);
+        int type = adaptor.getType(o);
+        while ( o!=null && type!=Token.EOF ) {
 			if ( !(type==Token.DOWN||type==Token.UP) ) {
 				buf.append(" ");
 				buf.append(type);
 			}
+            nodes.consume();
+            o = nodes.LT(1);
+            type = adaptor.getType(o);
 		}
 		return buf.toString();
 	}
diff --git a/src/org/antlr/test/TestTreeParsing.java b/tool/src/test/java/org/antlr/test/TestTreeParsing.java
similarity index 63%
rename from src/org/antlr/test/TestTreeParsing.java
rename to tool/src/test/java/org/antlr/test/TestTreeParsing.java
index e8d90a1..0e7569e 100644
--- a/src/org/antlr/test/TestTreeParsing.java
+++ b/tool/src/test/java/org/antlr/test/TestTreeParsing.java
@@ -27,8 +27,13 @@
 */
 package org.antlr.test;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
 public class TestTreeParsing extends BaseTest {
-	public void testFlatList() throws Exception {
+	@Test public void testFlatList() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -48,7 +53,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("abc, 34\n", found);
 	}
 
-	public void testSimpleTree() throws Exception {
+	@Test public void testSimpleTree() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -68,7 +73,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("abc, 34\n", found);
 	}
 
-	public void testFlatVsTreeDecision() throws Exception {
+	@Test public void testFlatVsTreeDecision() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -91,7 +96,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("^(a 1)b 2\n", found);
 	}
 
-	public void testFlatVsTreeDecision2() throws Exception {
+	@Test public void testFlatVsTreeDecision2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -115,7 +120,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("^(a 3)b 5\n", found);
 	}
 
-	public void testCyclicDFALookahead() throws Exception {
+	@Test public void testCyclicDFALookahead() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -137,7 +142,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("alt 1\n", found);
 	}
 
-	public void testTemplateOutput() throws Exception {
+	@Test public void testTemplateOutput() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -158,7 +163,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("34\n", found);
 	}
 
-	public void testNullableChildList() throws Exception {
+	@Test public void testNullableChildList() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -178,7 +183,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testNullableChildList2() throws Exception {
+	@Test public void testNullableChildList2() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -199,7 +204,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("abc\n", found);
 	}
 
-	public void testNullableChildList3() throws Exception {
+	@Test public void testNullableChildList3() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -221,7 +226,7 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("abc, def\n", found);
 	}
 
-	public void testActionsAfterRoot() throws Exception {
+	@Test public void testActionsAfterRoot() throws Exception {
 		String grammar =
 			"grammar T;\n" +
 			"options {output=AST;}\n" +
@@ -242,4 +247,97 @@ public class TestTreeParsing extends BaseTest {
 		assertEquals("abc, 2\n", found);
 	}
 
+    @Test public void testWildcardLookahead() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' . INT) {System.out.print(\"alt 1\");}"+
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 1\n", found);
+    }
+
+    @Test public void testWildcardLookahead2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' . INT) {System.out.print(\"alt 1\");}"+
+            "  | ^('+' . .)   {System.out.print(\"alt 2\");}\n" +
+            "  ;\n";
+
+        // AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 1\n", found);
+    }
+
+    @Test public void testWildcardLookahead3() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' ID INT) {System.out.print(\"alt 1\");}"+
+            "  | ^('+' . .)   {System.out.print(\"alt 2\");}\n" +
+            "  ;\n";
+
+        // AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 1\n", found);
+    }
+
+    @Test public void testWildcardPlusLookahead() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' INT INT ) {System.out.print(\"alt 1\");}"+
+            "  | ^('+' .+)   {System.out.print(\"alt 2\");}\n" +
+            "  ;\n";
+
+        // AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 2\n", found);
+    }
+
 }
diff --git a/src/org/antlr/test/TestTreeWizard.java b/tool/src/test/java/org/antlr/test/TestTreeWizard.java
similarity index 84%
rename from src/org/antlr/test/TestTreeWizard.java
rename to tool/src/test/java/org/antlr/test/TestTreeWizard.java
index 1113fd5..ad40bad 100644
--- a/src/org/antlr/test/TestTreeWizard.java
+++ b/tool/src/test/java/org/antlr/test/TestTreeWizard.java
@@ -34,12 +34,18 @@ import java.util.List;
 import java.util.ArrayList;
 import java.util.HashMap;
 
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+
 public class TestTreeWizard extends BaseTest {
 	protected static final String[] tokens =
 		new String[] {"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"};
 	protected static final TreeAdaptor adaptor = new CommonTreeAdaptor();
 
-	public void testSingleNode() throws Exception {
+	@Test public void testSingleNode() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("ID");
 		String found = t.toStringTree();
@@ -47,7 +53,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSingleNodeWithArg() throws Exception {
+	@Test public void testSingleNodeWithArg() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("ID[foo]");
 		String found = t.toStringTree();
@@ -55,7 +61,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSingleNodeTree() throws Exception {
+	@Test public void testSingleNodeTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A)");
 		String found = t.toStringTree();
@@ -63,7 +69,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSingleLevelTree() throws Exception {
+	@Test public void testSingleLevelTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C D)");
 		String found = t.toStringTree();
@@ -71,7 +77,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testListTree() throws Exception {
+	@Test public void testListTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
 		String found = t.toStringTree();
@@ -79,13 +85,13 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testInvalidListTree() throws Exception {
+	@Test public void testInvalidListTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("A B C");
 		assertTrue(t==null);
 	}
 
-	public void testDoubleLevelTree() throws Exception {
+	@Test public void testDoubleLevelTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A (B C) (B D) E)");
 		String found = t.toStringTree();
@@ -93,7 +99,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testSingleNodeIndex() throws Exception {
+	@Test public void testSingleNodeIndex() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("ID");
 		Map m = wiz.index(t);
@@ -102,25 +108,25 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNoRepeatsIndex() throws Exception {
+	@Test public void testNoRepeatsIndex() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C D)");
 		Map m = wiz.index(t);
-		String found = m.toString();
-		String expecting = "{8=[D], 6=[B], 7=[C], 5=[A]}";
+		String found = sortMapToString(m);
+        String expecting = "{5=[A], 6=[B], 7=[C], 8=[D]}";
 		assertEquals(expecting, found);
 	}
 
-	public void testRepeatsIndex() throws Exception {
+	@Test public void testRepeatsIndex() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
 		Map m = wiz.index(t);
-		String found = m.toString();
-		String expecting = "{8=[D, D], 6=[B, B, B], 7=[C], 5=[A, A]}";
+		String found =  sortMapToString(m);
+        String expecting = "{5=[A, A], 6=[B, B, B], 7=[C], 8=[D, D]}";
 		assertEquals(expecting, found);
 	}
 
-	public void testNoRepeatsVisit() throws Exception {
+	@Test public void testNoRepeatsVisit() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C D)");
 		final List elements = new ArrayList();
@@ -134,7 +140,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testNoRepeatsVisit2() throws Exception {
+	@Test public void testNoRepeatsVisit2() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
 		final List elements = new ArrayList();
@@ -149,7 +155,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRepeatsVisit() throws Exception {
+	@Test public void testRepeatsVisit() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
 		final List elements = new ArrayList();
@@ -164,7 +170,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRepeatsVisit2() throws Exception {
+	@Test public void testRepeatsVisit2() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
 		final List elements = new ArrayList();
@@ -179,7 +185,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRepeatsVisitWithContext() throws Exception {
+	@Test public void testRepeatsVisitWithContext() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
 		final List elements = new ArrayList();
@@ -196,7 +202,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testRepeatsVisitWithNullParentAndContext() throws Exception {
+	@Test public void testRepeatsVisitWithNullParentAndContext() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
 		final List elements = new ArrayList();
@@ -213,7 +219,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testVisitPattern() throws Exception {
+	@Test public void testVisitPattern() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C (A B) D)");
 		final List elements = new ArrayList();
@@ -228,7 +234,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testVisitPatternMultiple() throws Exception {
+	@Test public void testVisitPatternMultiple() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C (A B) (D (A B)))");
 		final List elements = new ArrayList();
@@ -245,7 +251,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testVisitPatternMultipleWithLabels() throws Exception {
+	@Test public void testVisitPatternMultipleWithLabels() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
 		final List elements = new ArrayList();
@@ -262,35 +268,35 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals(expecting, found);
 	}
 
-	public void testParse() throws Exception {
+	@Test public void testParse() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C)");
 		boolean valid = wiz.parse(t, "(A B C)");
 		assertTrue(valid);
 	}
 
-	public void testParseSingleNode() throws Exception {
+	@Test public void testParseSingleNode() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("A");
 		boolean valid = wiz.parse(t, "A");
 		assertTrue(valid);
 	}
 
-	public void testParseFlatTree() throws Exception {
+	@Test public void testParseFlatTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
 		boolean valid = wiz.parse(t, "(nil A B C)");
 		assertTrue(valid);
 	}
 
-	public void testWildcard() throws Exception {
+	@Test public void testWildcard() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C)");
 		boolean valid = wiz.parse(t, "(A . .)");
 		assertTrue(valid);
 	}
 
-	public void testParseWithText() throws Exception {
+	@Test public void testParseWithText() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B[foo] C[bar])");
 		// C pattern has no text arg so despite [bar] in t, no need
@@ -299,14 +305,14 @@ public class TestTreeWizard extends BaseTest {
 		assertTrue(valid);
 	}
 
-	public void testParseWithTextFails() throws Exception {
+	@Test public void testParseWithTextFails() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C)");
 		boolean valid = wiz.parse(t, "(A[foo] B C)");
 		assertTrue(!valid); // fails
 	}
 
-	public void testParseLabels() throws Exception {
+	@Test public void testParseLabels() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C)");
 		Map labels = new HashMap();
@@ -317,7 +323,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals("C", labels.get("c").toString());
 	}
 
-	public void testParseWithWildcardLabels() throws Exception {
+	@Test public void testParseWithWildcardLabels() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C)");
 		Map labels = new HashMap();
@@ -327,7 +333,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals("C", labels.get("c").toString());
 	}
 
-	public void testParseLabelsAndTestText() throws Exception {
+	@Test public void testParseLabelsAndTestText() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B[foo] C)");
 		Map labels = new HashMap();
@@ -338,7 +344,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals("C", labels.get("c").toString());
 	}
 
-	public void testParseLabelsInNestedTree() throws Exception {
+	@Test public void testParseLabelsInNestedTree() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A (B C) (D E))");
 		Map labels = new HashMap();
@@ -351,7 +357,7 @@ public class TestTreeWizard extends BaseTest {
 		assertEquals("E", labels.get("e").toString());
 	}
 
-	public void testEquals() throws Exception {
+	@Test public void testEquals() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t1 = (CommonTree)wiz.create("(A B C)");
 		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
@@ -359,7 +365,7 @@ public class TestTreeWizard extends BaseTest {
 		assertTrue(same);
 	}
 
-	public void testEqualsWithText() throws Exception {
+	@Test public void testEqualsWithText() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
 		CommonTree t2 = (CommonTree)wiz.create("(A B[foo] C)");
@@ -367,7 +373,7 @@ public class TestTreeWizard extends BaseTest {
 		assertTrue(same);
 	}
 	
-	public void testEqualsWithMismatchedText() throws Exception {
+	@Test public void testEqualsWithMismatchedText() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
 		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
@@ -375,7 +381,7 @@ public class TestTreeWizard extends BaseTest {
 		assertTrue(!same);
 	}
 
-	public void testFindPattern() throws Exception {
+	@Test public void testFindPattern() throws Exception {
 		TreeWizard wiz = new TreeWizard(adaptor, tokens);
 		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
 		final List subtrees = wiz.find(t, "(A B)");
diff --git a/tool/src/test/java/org/antlr/test/TestTrees.java b/tool/src/test/java/org/antlr/test/TestTrees.java
new file mode 100644
index 0000000..d35c3ad
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTrees.java
@@ -0,0 +1,409 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.test;
+
+import org.antlr.runtime.tree.*;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestTrees extends BaseTest {
+	TreeAdaptor adaptor = new CommonTreeAdaptor();
+	protected boolean debug = false;
+
+	static class V extends CommonTree {
+		public int x;
+		public V(Token t) { this.token = t;}
+		public V(int ttype, int x) { this.x=x; token=new CommonToken(ttype); }
+		public V(int ttype, Token t, int x) { token=t; this.x=x;}
+		public String toString() { return (token!=null?token.getText():"")+"<V>";}
+	}
+
+	@Test public void testSingleNode() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(101));
+		assertNull(t.parent);
+		assertEquals(-1, t.childIndex);
+	}
+
+	@Test public void testTwoChildrenOfNilRoot() throws Exception {
+		CommonTree root_0 = (CommonTree)adaptor.nil();
+		CommonTree t = new V(101, 2);
+		CommonTree u = new V(new CommonToken(102,"102"));
+		adaptor.addChild(root_0, t);
+		adaptor.addChild(root_0, u);
+		assertNull(root_0.parent);
+		assertEquals(-1, root_0.childIndex);
+		assertEquals(0, t.childIndex);
+		assertEquals(1, u.childIndex);
+	}
+
+	@Test public void test4Nodes() throws Exception {
+		// ^(101 ^(102 103) 104)
+		CommonTree r0 = new CommonTree(new CommonToken(101));
+		r0.addChild(new CommonTree(new CommonToken(102)));
+		r0.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		r0.addChild(new CommonTree(new CommonToken(104)));
+
+		assertNull(r0.parent);
+		assertEquals(-1, r0.childIndex);
+	}
+
+	@Test public void testList() throws Exception {
+		// ^(nil 101 102 103)
+		CommonTree r0 = new CommonTree((Token)null);
+		CommonTree c0, c1, c2;
+		r0.addChild(c0=new CommonTree(new CommonToken(101)));
+		r0.addChild(c1=new CommonTree(new CommonToken(102)));
+		r0.addChild(c2=new CommonTree(new CommonToken(103)));
+
+		assertNull(r0.parent);
+		assertEquals(-1, r0.childIndex);
+		assertEquals(r0, c0.parent);
+		assertEquals(0, c0.childIndex);
+		assertEquals(r0, c1.parent);
+		assertEquals(1, c1.childIndex);		
+		assertEquals(r0, c2.parent);
+		assertEquals(2, c2.childIndex);
+	}
+
+	@Test public void testList2() throws Exception {
+		// Add child ^(nil 101 102 103) to root 5
+		// should pull 101 102 103 directly to become 5's child list
+		CommonTree root = new CommonTree(new CommonToken(5));
+
+		// child tree
+		CommonTree r0 = new CommonTree((Token)null);
+		CommonTree c0, c1, c2;
+		r0.addChild(c0=new CommonTree(new CommonToken(101)));
+		r0.addChild(c1=new CommonTree(new CommonToken(102)));
+		r0.addChild(c2=new CommonTree(new CommonToken(103)));
+
+		root.addChild(r0);
+
+		assertNull(root.parent);
+		assertEquals(-1, root.childIndex);
+		// check children of root all point at root
+		assertEquals(root, c0.parent);
+		assertEquals(0, c0.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(1, c1.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(2, c2.childIndex);
+	}
+
+	@Test public void testAddListToExistChildren() throws Exception {
+		// Add child ^(nil 101 102 103) to root ^(5 6)
+		// should add 101 102 103 to end of 5's child list
+		CommonTree root = new CommonTree(new CommonToken(5));
+		root.addChild(new CommonTree(new CommonToken(6)));
+
+		// child tree
+		CommonTree r0 = new CommonTree((Token)null);
+		CommonTree c0, c1, c2;
+		r0.addChild(c0=new CommonTree(new CommonToken(101)));
+		r0.addChild(c1=new CommonTree(new CommonToken(102)));
+		r0.addChild(c2=new CommonTree(new CommonToken(103)));
+
+		root.addChild(r0);
+
+		assertNull(root.parent);
+		assertEquals(-1, root.childIndex);
+		// check children of root all point at root
+		assertEquals(root, c0.parent);
+		assertEquals(1, c0.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(2, c1.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(3, c2.childIndex);
+	}
+
+	@Test public void testDupTree() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		CommonTree r0 = new CommonTree(new CommonToken(101));
+		CommonTree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTree dup = (CommonTree)(new CommonTreeAdaptor()).dupTree(r0);
+
+		assertNull(dup.parent);
+		assertEquals(-1, dup.childIndex);
+		dup.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot() throws Exception {
+		// 5 becomes new root of ^(nil 101 102 103)
+		CommonTree newRoot = new CommonTree(new CommonToken(5));
+
+		CommonTree oldRoot = new CommonTree((Token)null);
+		oldRoot.addChild(new CommonTree(new CommonToken(101)));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot2() throws Exception {
+		// 5 becomes new root of ^(101 102 103)
+		CommonTree newRoot = new CommonTree(new CommonToken(5));
+
+		CommonTree oldRoot = new CommonTree(new CommonToken(101));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot3() throws Exception {
+		// ^(nil 5) becomes new root of ^(nil 101 102 103)
+		CommonTree newRoot = new CommonTree((Token)null);
+		newRoot.addChild(new CommonTree(new CommonToken(5)));
+
+		CommonTree oldRoot = new CommonTree((Token)null);
+		oldRoot.addChild(new CommonTree(new CommonToken(101)));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot5() throws Exception {
+		// ^(nil 5) becomes new root of ^(101 102 103)
+		CommonTree newRoot = new CommonTree((Token)null);
+		newRoot.addChild(new CommonTree(new CommonToken(5)));
+
+		CommonTree oldRoot = new CommonTree(new CommonToken(101));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot6() throws Exception {
+		// emulates construction of ^(5 6)
+		CommonTree root_0 = (CommonTree)adaptor.nil();
+		CommonTree root_1 = (CommonTree)adaptor.nil();
+		root_1 = (CommonTree)adaptor.becomeRoot(new CommonTree(new CommonToken(5)), root_1);
+
+		adaptor.addChild(root_1, new CommonTree(new CommonToken(6)));
+
+		adaptor.addChild(root_0, root_1);
+
+		root_0.sanityCheckParentAndChildIndexes();
+	}
+
+	// Test replaceChildren
+
+	@Test public void testReplaceWithNoChildren() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(101));
+		CommonTree newChild = new CommonTree(new CommonToken(5));
+		boolean error = false;
+		try {
+			t.replaceChildren(0, 0, newChild);
+		}
+		catch (IllegalArgumentException iae) {
+			error = true;
+		}
+		assertTrue(error);
+	}
+
+	@Test public void testReplaceWithOneChildren() throws Exception {
+		// assume token type 99 and use text
+		CommonTree t = new CommonTree(new CommonToken(99,"a"));
+		CommonTree c0 = new CommonTree(new CommonToken(99, "b"));
+		t.addChild(c0);
+
+		CommonTree newChild = new CommonTree(new CommonToken(99, "c"));
+		t.replaceChildren(0, 0, newChild);
+		String expecting = "(a c)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceInMiddle() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c"))); // index 1
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+		t.replaceChildren(1, 1, newChild);
+		String expecting = "(a b x d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAtLeft() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b"))); // index 0
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+		t.replaceChildren(0, 0, newChild);
+		String expecting = "(a x c d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAtRight() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d"))); // index 2
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+		t.replaceChildren(2, 2, newChild);
+		String expecting = "(a b c x)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceOneWithTwoAtLeft() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(0, 0, newChildren);
+		String expecting = "(a x y c d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceOneWithTwoAtRight() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(2, 2, newChildren);
+		String expecting = "(a b c x y)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceOneWithTwoInMiddle() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(1, 1, newChildren);
+		String expecting = "(a b x y d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceTwoWithOneAtLeft() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+
+		t.replaceChildren(0, 1, newChild);
+		String expecting = "(a x d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceTwoWithOneAtRight() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+
+		t.replaceChildren(1, 2, newChild);
+		String expecting = "(a b x)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAllWithOne() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+
+		t.replaceChildren(0, 2, newChild);
+		String expecting = "(a x)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAllWithTwo() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(0, 2, newChildren);
+		String expecting = "(a x y)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-java/antlr3.git



More information about the pkg-java-commits mailing list